about summary refs log tree commit diff
path: root/nixpkgs/nixos/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/tests')
-rw-r--r--nixpkgs/nixos/tests/3proxy.nix185
-rw-r--r--nixpkgs/nixos/tests/acme.nix210
-rw-r--r--nixpkgs/nixos/tests/all-tests.nix340
-rw-r--r--nixpkgs/nixos/tests/ammonite.nix20
-rw-r--r--nixpkgs/nixos/tests/atd.nix31
-rw-r--r--nixpkgs/nixos/tests/automysqlbackup.nix38
-rw-r--r--nixpkgs/nixos/tests/avahi.nix67
-rw-r--r--nixpkgs/nixos/tests/babeld.nix148
-rw-r--r--nixpkgs/nixos/tests/bcachefs.nix34
-rw-r--r--nixpkgs/nixos/tests/beanstalkd.nix49
-rw-r--r--nixpkgs/nixos/tests/bees.nix62
-rw-r--r--nixpkgs/nixos/tests/bind.nix27
-rw-r--r--nixpkgs/nixos/tests/bittorrent.nix164
-rw-r--r--nixpkgs/nixos/tests/boot-stage1.nix162
-rw-r--r--nixpkgs/nixos/tests/boot.nix105
-rw-r--r--nixpkgs/nixos/tests/borgbackup.nix175
-rw-r--r--nixpkgs/nixos/tests/buildbot.nix113
-rw-r--r--nixpkgs/nixos/tests/buildkite-agents.nix31
-rw-r--r--nixpkgs/nixos/tests/caddy.nix85
-rw-r--r--nixpkgs/nixos/tests/cadvisor.nix34
-rw-r--r--nixpkgs/nixos/tests/cage.nix43
-rw-r--r--nixpkgs/nixos/tests/cassandra.nix131
-rw-r--r--nixpkgs/nixos/tests/ceph-multi-node.nix233
-rw-r--r--nixpkgs/nixos/tests/ceph-single-node.nix196
-rw-r--r--nixpkgs/nixos/tests/certmgr.nix151
-rw-r--r--nixpkgs/nixos/tests/cfssl.nix67
-rw-r--r--nixpkgs/nixos/tests/chromium.nix266
-rw-r--r--nixpkgs/nixos/tests/cjdns.nix121
-rw-r--r--nixpkgs/nixos/tests/clickhouse.nix31
-rw-r--r--nixpkgs/nixos/tests/cloud-init.nix52
-rw-r--r--nixpkgs/nixos/tests/cockroachdb.nix124
-rw-r--r--nixpkgs/nixos/tests/codimd.nix52
-rw-r--r--nixpkgs/nixos/tests/common/acme/client/default.nix15
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/default.nix135
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/mkcerts.nix68
-rwxr-xr-xnixpkgs/nixos/tests/common/acme/server/mkcerts.sh6
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/snakeoil-certs.nix171
-rw-r--r--nixpkgs/nixos/tests/common/auto.nix68
-rw-r--r--nixpkgs/nixos/tests/common/ec2.nix49
-rw-r--r--nixpkgs/nixos/tests/common/resolver.nix141
-rw-r--r--nixpkgs/nixos/tests/common/user-account.nix15
-rw-r--r--nixpkgs/nixos/tests/common/webroot/news-rss.xml27
-rw-r--r--nixpkgs/nixos/tests/common/x11.nix17
-rw-r--r--nixpkgs/nixos/tests/consul.nix143
-rw-r--r--nixpkgs/nixos/tests/containers-bridge.nix102
-rw-r--r--nixpkgs/nixos/tests/containers-custom-pkgs.nix42
-rw-r--r--nixpkgs/nixos/tests/containers-ephemeral.nix54
-rw-r--r--nixpkgs/nixos/tests/containers-extra_veth.nix94
-rw-r--r--nixpkgs/nixos/tests/containers-hosts.nix52
-rw-r--r--nixpkgs/nixos/tests/containers-imperative.nix149
-rw-r--r--nixpkgs/nixos/tests/containers-ip.nix77
-rw-r--r--nixpkgs/nixos/tests/containers-macvlans.nix86
-rw-r--r--nixpkgs/nixos/tests/containers-physical_interfaces.nix136
-rw-r--r--nixpkgs/nixos/tests/containers-portforward.nix62
-rw-r--r--nixpkgs/nixos/tests/containers-reloadable.nix72
-rw-r--r--nixpkgs/nixos/tests/containers-restart_networking.nix115
-rw-r--r--nixpkgs/nixos/tests/containers-tmpfs.nix93
-rw-r--r--nixpkgs/nixos/tests/corerad.nix70
-rw-r--r--nixpkgs/nixos/tests/couchdb.nix76
-rw-r--r--nixpkgs/nixos/tests/deluge.nix107
-rw-r--r--nixpkgs/nixos/tests/dhparams.nix142
-rw-r--r--nixpkgs/nixos/tests/dnscrypt-proxy2.nix36
-rw-r--r--nixpkgs/nixos/tests/docker-containers.nix27
-rw-r--r--nixpkgs/nixos/tests/docker-edge.nix49
-rw-r--r--nixpkgs/nixos/tests/docker-preloader.nix27
-rw-r--r--nixpkgs/nixos/tests/docker-registry.nix61
-rw-r--r--nixpkgs/nixos/tests/docker-tools-overlay.nix33
-rw-r--r--nixpkgs/nixos/tests/docker-tools.nix158
-rw-r--r--nixpkgs/nixos/tests/docker.nix49
-rw-r--r--nixpkgs/nixos/tests/documize.nix62
-rw-r--r--nixpkgs/nixos/tests/dokuwiki.nix74
-rw-r--r--nixpkgs/nixos/tests/dovecot.nix77
-rw-r--r--nixpkgs/nixos/tests/ec2.nix149
-rw-r--r--nixpkgs/nixos/tests/ecryptfs.nix85
-rw-r--r--nixpkgs/nixos/tests/elk.nix212
-rw-r--r--nixpkgs/nixos/tests/emacs-daemon.nix48
-rw-r--r--nixpkgs/nixos/tests/env.nix36
-rw-r--r--nixpkgs/nixos/tests/etcd-cluster.nix154
-rw-r--r--nixpkgs/nixos/tests/etcd.nix25
-rw-r--r--nixpkgs/nixos/tests/fancontrol.nix28
-rw-r--r--nixpkgs/nixos/tests/fenics.nix50
-rw-r--r--nixpkgs/nixos/tests/ferm.nix74
-rw-r--r--nixpkgs/nixos/tests/firefox.nix39
-rw-r--r--nixpkgs/nixos/tests/firewall.nix65
-rw-r--r--nixpkgs/nixos/tests/fish.nix24
-rw-r--r--nixpkgs/nixos/tests/flannel.nix57
-rw-r--r--nixpkgs/nixos/tests/fluentd.nix49
-rw-r--r--nixpkgs/nixos/tests/fontconfig-default-fonts.nix33
-rw-r--r--nixpkgs/nixos/tests/freeswitch.nix29
-rw-r--r--nixpkgs/nixos/tests/fsck.nix31
-rw-r--r--nixpkgs/nixos/tests/gerrit.nix55
-rw-r--r--nixpkgs/nixos/tests/gitdaemon.nix64
-rw-r--r--nixpkgs/nixos/tests/gitea.nix109
-rw-r--r--nixpkgs/nixos/tests/gitlab.nix97
-rw-r--r--nixpkgs/nixos/tests/gitolite-fcgiwrap.nix93
-rw-r--r--nixpkgs/nixos/tests/gitolite.nix138
-rw-r--r--nixpkgs/nixos/tests/glusterfs.nix68
-rw-r--r--nixpkgs/nixos/tests/gnome3-xorg.nix79
-rw-r--r--nixpkgs/nixos/tests/gnome3.nix76
-rw-r--r--nixpkgs/nixos/tests/gocd-agent.nix48
-rw-r--r--nixpkgs/nixos/tests/gocd-server.nix28
-rw-r--r--nixpkgs/nixos/tests/google-oslogin/default.nix70
-rw-r--r--nixpkgs/nixos/tests/google-oslogin/server.nix29
-rw-r--r--nixpkgs/nixos/tests/google-oslogin/server.py96
-rw-r--r--nixpkgs/nixos/tests/gotify-server.nix45
-rw-r--r--nixpkgs/nixos/tests/grafana.nix97
-rw-r--r--nixpkgs/nixos/tests/graphite.nix50
-rw-r--r--nixpkgs/nixos/tests/graylog.nix115
-rw-r--r--nixpkgs/nixos/tests/grocy.nix47
-rw-r--r--nixpkgs/nixos/tests/gvisor.nix49
-rw-r--r--nixpkgs/nixos/tests/hadoop/hdfs.nix54
-rw-r--r--nixpkgs/nixos/tests/hadoop/yarn.nix46
-rw-r--r--nixpkgs/nixos/tests/haka.nix24
-rw-r--r--nixpkgs/nixos/tests/handbrake.nix31
-rw-r--r--nixpkgs/nixos/tests/haproxy.nix47
-rw-r--r--nixpkgs/nixos/tests/hardened.nix130
-rw-r--r--nixpkgs/nixos/tests/hibernate.nix44
-rw-r--r--nixpkgs/nixos/tests/hitch/default.nix33
-rw-r--r--nixpkgs/nixos/tests/hitch/example.pem53
-rw-r--r--nixpkgs/nixos/tests/hitch/example/index.txt1
-rw-r--r--nixpkgs/nixos/tests/hocker-fetchdocker/default.nix15
-rw-r--r--nixpkgs/nixos/tests/hocker-fetchdocker/hello-world-container.nix19
-rw-r--r--nixpkgs/nixos/tests/hocker-fetchdocker/machine.nix26
-rw-r--r--nixpkgs/nixos/tests/home-assistant.nix111
-rw-r--r--nixpkgs/nixos/tests/hound.nix59
-rw-r--r--nixpkgs/nixos/tests/hydra/common.nix47
-rwxr-xr-xnixpkgs/nixos/tests/hydra/create-trivial-project.sh59
-rw-r--r--nixpkgs/nixos/tests/hydra/db-migration.nix92
-rw-r--r--nixpkgs/nixos/tests/hydra/default.nix59
-rw-r--r--nixpkgs/nixos/tests/i3wm.nix46
-rw-r--r--nixpkgs/nixos/tests/icingaweb2.nix71
-rw-r--r--nixpkgs/nixos/tests/iftop.nix33
-rw-r--r--nixpkgs/nixos/tests/ihatemoney.nix55
-rw-r--r--nixpkgs/nixos/tests/incron.nix52
-rw-r--r--nixpkgs/nixos/tests/influxdb.nix40
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/default.nix75
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/generate-keys.nix10
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/id_ed255197
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519.pub1
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key7
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key.pub1
-rw-r--r--nixpkgs/nixos/tests/initrd-network.nix33
-rw-r--r--nixpkgs/nixos/tests/installed-tests/colord.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/default.nix99
-rw-r--r--nixpkgs/nixos/tests/installed-tests/flatpak-builder.nix14
-rw-r--r--nixpkgs/nixos/tests/installed-tests/flatpak.nix19
-rw-r--r--nixpkgs/nixos/tests/installed-tests/fwupd.nix12
-rw-r--r--nixpkgs/nixos/tests/installed-tests/gcab.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/gdk-pixbuf.nix13
-rw-r--r--nixpkgs/nixos/tests/installed-tests/gjs.nix6
-rw-r--r--nixpkgs/nixos/tests/installed-tests/glib-networking.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/glib-testing.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/gnome-photos.nix35
-rw-r--r--nixpkgs/nixos/tests/installed-tests/graphene.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/ibus.nix20
-rw-r--r--nixpkgs/nixos/tests/installed-tests/libgdata.nix11
-rw-r--r--nixpkgs/nixos/tests/installed-tests/libjcat.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/libxmlb.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/malcontent.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/ostree.nix23
-rw-r--r--nixpkgs/nixos/tests/installed-tests/xdg-desktop-portal.nix9
-rw-r--r--nixpkgs/nixos/tests/installer.nix773
-rw-r--r--nixpkgs/nixos/tests/iodine.nix64
-rw-r--r--nixpkgs/nixos/tests/ipfs.nix55
-rw-r--r--nixpkgs/nixos/tests/ipv6.nix91
-rw-r--r--nixpkgs/nixos/tests/jackett.nix19
-rw-r--r--nixpkgs/nixos/tests/jellyfin.nix16
-rw-r--r--nixpkgs/nixos/tests/jenkins.nix49
-rw-r--r--nixpkgs/nixos/tests/jirafeau.nix22
-rw-r--r--nixpkgs/nixos/tests/k3s.nix78
-rw-r--r--nixpkgs/nixos/tests/kafka.nix93
-rw-r--r--nixpkgs/nixos/tests/keepalived.nix42
-rw-r--r--nixpkgs/nixos/tests/kerberos/default.nix7
-rw-r--r--nixpkgs/nixos/tests/kerberos/heimdal.nix42
-rw-r--r--nixpkgs/nixos/tests/kerberos/mit.nix41
-rw-r--r--nixpkgs/nixos/tests/kernel-latest.nix17
-rw-r--r--nixpkgs/nixos/tests/kernel-lts.nix17
-rw-r--r--nixpkgs/nixos/tests/kernel-testing.nix17
-rw-r--r--nixpkgs/nixos/tests/kexec.nix25
-rw-r--r--nixpkgs/nixos/tests/keymap.nix175
-rw-r--r--nixpkgs/nixos/tests/knot.nix210
-rw-r--r--nixpkgs/nixos/tests/krb5/default.nix5
-rw-r--r--nixpkgs/nixos/tests/krb5/deprecated-config.nix50
-rw-r--r--nixpkgs/nixos/tests/krb5/example-config.nix108
-rw-r--r--nixpkgs/nixos/tests/kubernetes/base.nix111
-rw-r--r--nixpkgs/nixos/tests/kubernetes/default.nix7
-rw-r--r--nixpkgs/nixos/tests/kubernetes/dns.nix128
-rw-r--r--nixpkgs/nixos/tests/kubernetes/e2e.nix40
-rw-r--r--nixpkgs/nixos/tests/kubernetes/rbac.nix140
-rw-r--r--nixpkgs/nixos/tests/ldap.nix405
-rw-r--r--nixpkgs/nixos/tests/leaps.nix32
-rw-r--r--nixpkgs/nixos/tests/lidarr.nix20
-rw-r--r--nixpkgs/nixos/tests/lightdm.nix28
-rw-r--r--nixpkgs/nixos/tests/limesurvey.nix26
-rw-r--r--nixpkgs/nixos/tests/login.nix59
-rw-r--r--nixpkgs/nixos/tests/loki.nix39
-rw-r--r--nixpkgs/nixos/tests/lorri/builder.sh3
-rw-r--r--nixpkgs/nixos/tests/lorri/default.nix26
-rw-r--r--nixpkgs/nixos/tests/lorri/fake-shell.nix5
-rw-r--r--nixpkgs/nixos/tests/magic-wormhole-mailbox-server.nix38
-rw-r--r--nixpkgs/nixos/tests/magnetico.nix40
-rw-r--r--nixpkgs/nixos/tests/mailcatcher.nix30
-rw-r--r--nixpkgs/nixos/tests/make-test-python.nix9
-rw-r--r--nixpkgs/nixos/tests/make-test.nix9
-rw-r--r--nixpkgs/nixos/tests/mathics.nix20
-rw-r--r--nixpkgs/nixos/tests/matomo.nix48
-rw-r--r--nixpkgs/nixos/tests/matrix-synapse.nix90
-rw-r--r--nixpkgs/nixos/tests/mediawiki.nix21
-rw-r--r--nixpkgs/nixos/tests/memcached.nix24
-rw-r--r--nixpkgs/nixos/tests/mesos.nix92
-rw-r--r--nixpkgs/nixos/tests/mesos_test.py72
-rw-r--r--nixpkgs/nixos/tests/metabase.nix20
-rw-r--r--nixpkgs/nixos/tests/minidlna.nix39
-rw-r--r--nixpkgs/nixos/tests/miniflux.nix56
-rw-r--r--nixpkgs/nixos/tests/minio.nix55
-rw-r--r--nixpkgs/nixos/tests/misc.nix130
-rw-r--r--nixpkgs/nixos/tests/moinmoin.nix28
-rw-r--r--nixpkgs/nixos/tests/mongodb.nix52
-rw-r--r--nixpkgs/nixos/tests/moodle.nix22
-rw-r--r--nixpkgs/nixos/tests/morty.nix30
-rw-r--r--nixpkgs/nixos/tests/mosquitto.nix90
-rw-r--r--nixpkgs/nixos/tests/mpd.nix132
-rw-r--r--nixpkgs/nixos/tests/mpich-example.c21
-rw-r--r--nixpkgs/nixos/tests/mumble.nix73
-rw-r--r--nixpkgs/nixos/tests/munin.nix44
-rw-r--r--nixpkgs/nixos/tests/mutable-users.nix45
-rw-r--r--nixpkgs/nixos/tests/mxisd.nix30
-rw-r--r--nixpkgs/nixos/tests/mysql-backup.nix56
-rw-r--r--nixpkgs/nixos/tests/mysql-replication.nix89
-rw-r--r--nixpkgs/nixos/tests/mysql.nix143
-rw-r--r--nixpkgs/nixos/tests/nagios.nix116
-rw-r--r--nixpkgs/nixos/tests/nat.nix120
-rw-r--r--nixpkgs/nixos/tests/ndppd.nix60
-rw-r--r--nixpkgs/nixos/tests/neo4j.nix20
-rw-r--r--nixpkgs/nixos/tests/netdata.nix38
-rw-r--r--nixpkgs/nixos/tests/networking-proxy.nix135
-rw-r--r--nixpkgs/nixos/tests/networking.nix699
-rw-r--r--nixpkgs/nixos/tests/nextcloud/basic.nix64
-rw-r--r--nixpkgs/nixos/tests/nextcloud/default.nix9
-rw-r--r--nixpkgs/nixos/tests/nextcloud/with-mysql-and-memcached.nix100
-rw-r--r--nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix100
-rw-r--r--nixpkgs/nixos/tests/nexus.nix32
-rw-r--r--nixpkgs/nixos/tests/nfs/default.nix9
-rw-r--r--nixpkgs/nixos/tests/nfs/kerberos.nix133
-rw-r--r--nixpkgs/nixos/tests/nfs/simple.nix94
-rw-r--r--nixpkgs/nixos/tests/nghttpx.nix61
-rw-r--r--nixpkgs/nixos/tests/nginx-etag.nix89
-rw-r--r--nixpkgs/nixos/tests/nginx-pubhtml.nix20
-rw-r--r--nixpkgs/nixos/tests/nginx-sso.nix48
-rw-r--r--nixpkgs/nixos/tests/nginx.nix129
-rw-r--r--nixpkgs/nixos/tests/nix-ssh-serve.nix45
-rw-r--r--nixpkgs/nixos/tests/nixos-generate-config.nix26
-rw-r--r--nixpkgs/nixos/tests/novacomd.nix28
-rw-r--r--nixpkgs/nixos/tests/nsd.nix99
-rw-r--r--nixpkgs/nixos/tests/nzbget.nix30
-rw-r--r--nixpkgs/nixos/tests/openarena.nix71
-rw-r--r--nixpkgs/nixos/tests/openldap.nix33
-rw-r--r--nixpkgs/nixos/tests/opensmtpd.nix125
-rw-r--r--nixpkgs/nixos/tests/openssh.nix112
-rw-r--r--nixpkgs/nixos/tests/openstack-image.nix94
-rw-r--r--nixpkgs/nixos/tests/orangefs.nix82
-rw-r--r--nixpkgs/nixos/tests/os-prober.nix120
-rw-r--r--nixpkgs/nixos/tests/osrm-backend.nix57
-rw-r--r--nixpkgs/nixos/tests/overlayfs.nix50
-rw-r--r--nixpkgs/nixos/tests/packagekit.nix26
-rw-r--r--nixpkgs/nixos/tests/pam-oath-login.nix108
-rw-r--r--nixpkgs/nixos/tests/pam-u2f.nix25
-rw-r--r--nixpkgs/nixos/tests/pantheon.nix59
-rw-r--r--nixpkgs/nixos/tests/paperless.nix36
-rw-r--r--nixpkgs/nixos/tests/partition.nix247
-rw-r--r--nixpkgs/nixos/tests/pdns-recursor.nix12
-rw-r--r--nixpkgs/nixos/tests/peerflix.nix23
-rw-r--r--nixpkgs/nixos/tests/pgjwt.nix34
-rw-r--r--nixpkgs/nixos/tests/pgmanage.nix41
-rw-r--r--nixpkgs/nixos/tests/php/default.nix7
-rw-r--r--nixpkgs/nixos/tests/php/fpm.nix55
-rw-r--r--nixpkgs/nixos/tests/php/pcre.nix37
-rw-r--r--nixpkgs/nixos/tests/plasma5.nix59
-rw-r--r--nixpkgs/nixos/tests/plotinus.nix28
-rw-r--r--nixpkgs/nixos/tests/postgis.nix29
-rw-r--r--nixpkgs/nixos/tests/postgresql-wal-receiver.nix99
-rw-r--r--nixpkgs/nixos/tests/postgresql.nix90
-rw-r--r--nixpkgs/nixos/tests/powerdns.nix13
-rw-r--r--nixpkgs/nixos/tests/pppd.nix62
-rw-r--r--nixpkgs/nixos/tests/predictable-interface-names.nix33
-rw-r--r--nixpkgs/nixos/tests/printing.nix137
-rw-r--r--nixpkgs/nixos/tests/prometheus-exporters.nix552
-rw-r--r--nixpkgs/nixos/tests/prometheus.nix245
-rw-r--r--nixpkgs/nixos/tests/proxy.nix90
-rw-r--r--nixpkgs/nixos/tests/quagga.nix96
-rw-r--r--nixpkgs/nixos/tests/quorum.nix79
-rw-r--r--nixpkgs/nixos/tests/rabbitmq.nix21
-rw-r--r--nixpkgs/nixos/tests/radarr.nix18
-rw-r--r--nixpkgs/nixos/tests/radicale.nix122
-rw-r--r--nixpkgs/nixos/tests/redis.nix24
-rw-r--r--nixpkgs/nixos/tests/redmine.nix76
-rw-r--r--nixpkgs/nixos/tests/resolv.nix46
-rw-r--r--nixpkgs/nixos/tests/restic.nix63
-rw-r--r--nixpkgs/nixos/tests/riak.nix18
-rw-r--r--nixpkgs/nixos/tests/roundcube.nix31
-rw-r--r--nixpkgs/nixos/tests/rspamd.nix304
-rw-r--r--nixpkgs/nixos/tests/rss2email.nix66
-rw-r--r--nixpkgs/nixos/tests/rsyslogd.nix40
-rw-r--r--nixpkgs/nixos/tests/run-in-machine.nix23
-rw-r--r--nixpkgs/nixos/tests/rxe.nix47
-rw-r--r--nixpkgs/nixos/tests/samba.nix47
-rw-r--r--nixpkgs/nixos/tests/sanoid.nix90
-rw-r--r--nixpkgs/nixos/tests/sddm.nix69
-rw-r--r--nixpkgs/nixos/tests/service-runner.nix36
-rw-r--r--nixpkgs/nixos/tests/shiori.nix81
-rw-r--r--nixpkgs/nixos/tests/signal-desktop.nix38
-rw-r--r--nixpkgs/nixos/tests/simple.nix17
-rw-r--r--nixpkgs/nixos/tests/slurm.nix141
-rw-r--r--nixpkgs/nixos/tests/smokeping.nix33
-rw-r--r--nixpkgs/nixos/tests/snapper.nix35
-rw-r--r--nixpkgs/nixos/tests/solr.nix56
-rw-r--r--nixpkgs/nixos/tests/sonarr.nix18
-rw-r--r--nixpkgs/nixos/tests/spacecookie.nix51
-rw-r--r--nixpkgs/nixos/tests/specialisation.nix43
-rw-r--r--nixpkgs/nixos/tests/spike.nix22
-rw-r--r--nixpkgs/nixos/tests/ssh-keys.nix15
-rw-r--r--nixpkgs/nixos/tests/strongswan-swanctl.nix148
-rw-r--r--nixpkgs/nixos/tests/sudo.nix83
-rw-r--r--nixpkgs/nixos/tests/switch-test.nix38
-rw-r--r--nixpkgs/nixos/tests/sympa.nix36
-rw-r--r--nixpkgs/nixos/tests/syncthing-init.nix32
-rw-r--r--nixpkgs/nixos/tests/syncthing-relay.nix26
-rw-r--r--nixpkgs/nixos/tests/systemd-analyze.nix46
-rw-r--r--nixpkgs/nixos/tests/systemd-confinement.nix168
-rw-r--r--nixpkgs/nixos/tests/systemd-networkd-vrf.nix221
-rw-r--r--nixpkgs/nixos/tests/systemd-networkd.nix113
-rw-r--r--nixpkgs/nixos/tests/systemd-nspawn.nix60
-rw-r--r--nixpkgs/nixos/tests/systemd-timesyncd.nix52
-rw-r--r--nixpkgs/nixos/tests/systemd.nix121
-rw-r--r--nixpkgs/nixos/tests/taskserver.nix290
-rw-r--r--nixpkgs/nixos/tests/telegraf.nix30
-rw-r--r--nixpkgs/nixos/tests/testdb.sql11
-rw-r--r--nixpkgs/nixos/tests/tiddlywiki.nix69
-rw-r--r--nixpkgs/nixos/tests/timezone.nix50
-rw-r--r--nixpkgs/nixos/tests/tinydns.nix26
-rw-r--r--nixpkgs/nixos/tests/tor.nix30
-rw-r--r--nixpkgs/nixos/tests/trac.nix19
-rw-r--r--nixpkgs/nixos/tests/traefik.nix87
-rw-r--r--nixpkgs/nixos/tests/transmission.nix21
-rw-r--r--nixpkgs/nixos/tests/trezord.nix20
-rw-r--r--nixpkgs/nixos/tests/trickster.nix37
-rw-r--r--nixpkgs/nixos/tests/trilium-server.nix53
-rw-r--r--nixpkgs/nixos/tests/tuptime.nix29
-rw-r--r--nixpkgs/nixos/tests/udisks2.nix69
-rw-r--r--nixpkgs/nixos/tests/upnp.nix96
-rw-r--r--nixpkgs/nixos/tests/uwsgi.nix38
-rw-r--r--nixpkgs/nixos/tests/vault.nix23
-rw-r--r--nixpkgs/nixos/tests/victoriametrics.nix31
-rw-r--r--nixpkgs/nixos/tests/virtualbox.nix526
-rw-r--r--nixpkgs/nixos/tests/wireguard/default.nix71
-rw-r--r--nixpkgs/nixos/tests/wireguard/generated.nix61
-rw-r--r--nixpkgs/nixos/tests/wireguard/make-peer.nix23
-rw-r--r--nixpkgs/nixos/tests/wireguard/namespaces.nix78
-rw-r--r--nixpkgs/nixos/tests/wireguard/snakeoil-keys.nix11
-rw-r--r--nixpkgs/nixos/tests/wireguard/wg-quick.nix63
-rw-r--r--nixpkgs/nixos/tests/wordpress.nix57
-rw-r--r--nixpkgs/nixos/tests/xandikos.nix70
-rw-r--r--nixpkgs/nixos/tests/xautolock.nix24
-rw-r--r--nixpkgs/nixos/tests/xfce.nix46
-rw-r--r--nixpkgs/nixos/tests/xmonad.nix41
-rw-r--r--nixpkgs/nixos/tests/xmpp/ejabberd.nix268
-rw-r--r--nixpkgs/nixos/tests/xmpp/prosody-mysql.nix77
-rw-r--r--nixpkgs/nixos/tests/xmpp/prosody.nix46
-rw-r--r--nixpkgs/nixos/tests/xmpp/xmpp-sendmessage.nix46
-rw-r--r--nixpkgs/nixos/tests/xrdp.nix47
-rw-r--r--nixpkgs/nixos/tests/xss-lock.nix44
-rw-r--r--nixpkgs/nixos/tests/yabar.nix33
-rw-r--r--nixpkgs/nixos/tests/yggdrasil.nix125
-rw-r--r--nixpkgs/nixos/tests/zfs.nix75
-rw-r--r--nixpkgs/nixos/tests/zookeeper.nix34
-rw-r--r--nixpkgs/nixos/tests/zsh-history.nix35
376 files changed, 26775 insertions, 0 deletions
diff --git a/nixpkgs/nixos/tests/3proxy.nix b/nixpkgs/nixos/tests/3proxy.nix
new file mode 100644
index 000000000000..3e2061d7e42f
--- /dev/null
+++ b/nixpkgs/nixos/tests/3proxy.nix
@@ -0,0 +1,185 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "3proxy";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ misuzu ];
+  };
+
+  nodes = {
+    peer0 = { lib, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.1";
+            prefixLength = 24;
+          }
+          {
+            address = "216.58.211.111";
+            prefixLength = 24;
+          }
+        ];
+      };
+    };
+
+    peer1 = { lib, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.2";
+            prefixLength = 24;
+          }
+          {
+            address = "216.58.211.112";
+            prefixLength = 24;
+          }
+        ];
+      };
+      # test that binding to [::] is working when ipv6 is disabled
+      networking.enableIPv6 = false;
+      services._3proxy = {
+        enable = true;
+        services = [
+          {
+            type = "admin";
+            bindPort = 9999;
+            auth = [ "none" ];
+          }
+          {
+            type = "proxy";
+            bindPort = 3128;
+            auth = [ "none" ];
+          }
+        ];
+      };
+      networking.firewall.allowedTCPPorts = [ 3128 9999 ];
+    };
+
+    peer2 = { lib, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.3";
+            prefixLength = 24;
+          }
+          {
+            address = "216.58.211.113";
+            prefixLength = 24;
+          }
+        ];
+      };
+      services._3proxy = {
+        enable = true;
+        services = [
+          {
+            type = "admin";
+            bindPort = 9999;
+            auth = [ "none" ];
+          }
+          {
+            type = "proxy";
+            bindPort = 3128;
+            auth = [ "iponly" ];
+            acl = [
+              {
+                rule = "allow";
+              }
+            ];
+          }
+        ];
+      };
+      networking.firewall.allowedTCPPorts = [ 3128 9999 ];
+    };
+
+    peer3 = { lib, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.4";
+            prefixLength = 24;
+          }
+          {
+            address = "216.58.211.114";
+            prefixLength = 24;
+          }
+        ];
+      };
+      services._3proxy = {
+        enable = true;
+        usersFile = pkgs.writeText "3proxy.passwd" ''
+          admin:CR:$1$.GUV4Wvk$WnEVQtaqutD9.beO5ar1W/
+        '';
+        services = [
+          {
+            type = "admin";
+            bindPort = 9999;
+            auth = [ "none" ];
+          }
+          {
+            type = "proxy";
+            bindPort = 3128;
+            auth = [ "strong" ];
+            acl = [
+              {
+                rule = "allow";
+              }
+            ];
+          }
+        ];
+      };
+      networking.firewall.allowedTCPPorts = [ 3128 9999 ];
+    };
+  };
+
+  testScript = ''
+    peer1.wait_for_unit("3proxy.service")
+    peer1.wait_for_open_port("9999")
+
+    # test none auth
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://216.58.211.112:9999"
+    )
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://192.168.0.2:9999"
+    )
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://127.0.0.1:9999"
+    )
+
+    peer2.wait_for_unit("3proxy.service")
+    peer2.wait_for_open_port("9999")
+
+    # test iponly auth
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://216.58.211.113:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://192.168.0.3:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://127.0.0.1:9999"
+    )
+
+    peer3.wait_for_unit("3proxy.service")
+    peer3.wait_for_open_port("9999")
+
+    # test strong auth
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://admin:bigsecret\@192.168.0.4:3128 -S -O /dev/null http://216.58.211.114:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://admin:bigsecret\@192.168.0.4:3128 -S -O /dev/null http://192.168.0.4:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://216.58.211.114:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://192.168.0.4:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://127.0.0.1:9999"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/acme.nix b/nixpkgs/nixos/tests/acme.nix
new file mode 100644
index 000000000000..693f02962f45
--- /dev/null
+++ b/nixpkgs/nixos/tests/acme.nix
@@ -0,0 +1,210 @@
+let
+  commonConfig = ./common/acme/client;
+
+  dnsScript = {writeScript, dnsAddress, bash, curl}: writeScript "dns-hook.sh" ''
+    #!${bash}/bin/bash
+    set -euo pipefail
+    echo '[INFO]' "[$2]" 'dns-hook.sh' $*
+    if [ "$1" = "present" ]; then
+      ${curl}/bin/curl --data '{"host": "'"$2"'", "value": "'"$3"'"}' http://${dnsAddress}:8055/set-txt
+    else
+      ${curl}/bin/curl --data '{"host": "'"$2"'"}' http://${dnsAddress}:8055/clear-txt
+    fi
+  '';
+
+in import ./make-test-python.nix {
+  name = "acme";
+
+  nodes = rec {
+    acme = { nodes, lib, ... }: {
+      imports = [ ./common/acme/server ];
+      networking.nameservers = lib.mkForce [
+        nodes.dnsserver.config.networking.primaryIPAddress
+      ];
+    };
+
+    dnsserver = { nodes, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 8055 53 ];
+      networking.firewall.allowedUDPPorts = [ 53 ];
+      systemd.services.pebble-challtestsrv = {
+        enable = true;
+        description = "Pebble ACME challenge test server";
+        wantedBy = [ "network.target" ];
+        serviceConfig = {
+          ExecStart = "${pkgs.pebble}/bin/pebble-challtestsrv -dns01 ':53' -defaultIPv6 '' -defaultIPv4 '${nodes.webserver.config.networking.primaryIPAddress}'";
+          # Required to bind on privileged ports.
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        };
+      };
+    };
+
+    acmeStandalone = { nodes, lib, config, pkgs, ... }: {
+      imports = [ commonConfig ];
+      networking.nameservers = lib.mkForce [
+        nodes.dnsserver.config.networking.primaryIPAddress
+      ];
+      networking.firewall.allowedTCPPorts = [ 80 ];
+      security.acme.certs."standalone.test" = {
+        webroot = "/var/lib/acme/acme-challenges";
+      };
+      systemd.targets."acme-finished-standalone.test" = {};
+      systemd.services."acme-standalone.test" = {
+        wants = [ "acme-finished-standalone.test.target" ];
+        before = [ "acme-finished-standalone.test.target" ];
+      };
+      services.nginx.enable = true;
+      services.nginx.virtualHosts."standalone.test" = {
+        locations."/.well-known/acme-challenge".root = "/var/lib/acme/acme-challenges";
+      };
+    };
+
+    webserver = { nodes, config, pkgs, lib, ... }: {
+      imports = [ commonConfig ];
+      networking.firewall.allowedTCPPorts = [ 80 443 ];
+      networking.nameservers = lib.mkForce [
+        nodes.dnsserver.config.networking.primaryIPAddress
+      ];
+
+      # A target remains active. Use this to probe the fact that
+      # a service fired eventhough it is not RemainAfterExit
+      systemd.targets."acme-finished-a.example.test" = {};
+      systemd.services."acme-a.example.test" = {
+        wants = [ "acme-finished-a.example.test.target" ];
+        before = [ "acme-finished-a.example.test.target" ];
+        after = [ "nginx.service" ];
+      };
+
+      services.nginx.enable = true;
+
+      services.nginx.virtualHosts."a.example.test" = {
+        enableACME = true;
+        forceSSL = true;
+        locations."/".root = pkgs.runCommand "docroot" {} ''
+          mkdir -p "$out"
+          echo hello world > "$out/index.html"
+        '';
+      };
+
+      security.acme.server = "https://acme.test/dir";
+
+      specialisation.second-cert.configuration = {pkgs, ...}: {
+        systemd.targets."acme-finished-b.example.test" = {};
+        systemd.services."acme-b.example.test" = {
+          wants = [ "acme-finished-b.example.test.target" ];
+          before = [ "acme-finished-b.example.test.target" ];
+          after = [ "nginx.service" ];
+        };
+        services.nginx.virtualHosts."b.example.test" = {
+          enableACME = true;
+          forceSSL = true;
+          locations."/".root = pkgs.runCommand "docroot" {} ''
+            mkdir -p "$out"
+            echo hello world > "$out/index.html"
+          '';
+        };
+      };
+      specialisation.dns-01.configuration = {pkgs, config, nodes, lib, ...}: {
+        security.acme.certs."example.test" = {
+          domain = "*.example.test";
+          dnsProvider = "exec";
+          dnsPropagationCheck = false;
+          credentialsFile = with pkgs; writeText "wildcard.env" ''
+            EXEC_PATH=${dnsScript { inherit writeScript bash curl; dnsAddress = nodes.dnsserver.config.networking.primaryIPAddress; }}
+          '';
+          user = config.services.nginx.user;
+          group = config.services.nginx.group;
+        };
+        systemd.targets."acme-finished-example.test" = {};
+        systemd.services."acme-example.test" = {
+          wants = [ "acme-finished-example.test.target" ];
+          before = [ "acme-finished-example.test.target" "nginx.service" ];
+          wantedBy = [ "nginx.service" ];
+        };
+        services.nginx.virtualHosts."c.example.test" = {
+          forceSSL = true;
+          sslCertificate = config.security.acme.certs."example.test".directory + "/cert.pem";
+          sslTrustedCertificate = config.security.acme.certs."example.test".directory + "/full.pem";
+          sslCertificateKey = config.security.acme.certs."example.test".directory + "/key.pem";
+          locations."/".root = pkgs.runCommand "docroot" {} ''
+            mkdir -p "$out"
+            echo hello world > "$out/index.html"
+          '';
+        };
+      };
+    };
+
+    client = {nodes, lib, ...}: {
+      imports = [ commonConfig ];
+      networking.nameservers = lib.mkForce [
+        nodes.dnsserver.config.networking.primaryIPAddress
+      ];
+    };
+  };
+
+  testScript = {nodes, ...}:
+    let
+      newServerSystem = nodes.webserver.config.system.build.toplevel;
+      switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test";
+    in
+    # Note, wait_for_unit does not work for oneshot services that do not have RemainAfterExit=true,
+    # this is because a oneshot goes from inactive => activating => inactive, and never
+    # reaches the active state. To work around this, we create some mock target units which
+    # get pulled in by the oneshot units. The target units linger after activation, and hence we
+    # can use them to probe that a oneshot fired. It is a bit ugly, but it is the best we can do
+    ''
+      client.start()
+      dnsserver.start()
+
+      acme.wait_for_unit("default.target")
+      dnsserver.wait_for_unit("pebble-challtestsrv.service")
+      client.succeed(
+          'curl --data \'{"host": "acme.test", "addresses": ["${nodes.acme.config.networking.primaryIPAddress}"]}\' http://${nodes.dnsserver.config.networking.primaryIPAddress}:8055/add-a'
+      )
+      client.succeed(
+          'curl --data \'{"host": "standalone.test", "addresses": ["${nodes.acmeStandalone.config.networking.primaryIPAddress}"]}\' http://${nodes.dnsserver.config.networking.primaryIPAddress}:8055/add-a'
+      )
+
+      acme.start()
+      acmeStandalone.start()
+
+      acme.wait_for_unit("default.target")
+      acme.wait_for_unit("pebble.service")
+
+      with subtest("can request certificate with HTTPS-01 challenge"):
+          acmeStandalone.wait_for_unit("default.target")
+          acmeStandalone.succeed("systemctl start acme-standalone.test.service")
+          acmeStandalone.wait_for_unit("acme-finished-standalone.test.target")
+
+      client.wait_for_unit("default.target")
+
+      client.succeed("curl https://acme.test:15000/roots/0 > /tmp/ca.crt")
+      client.succeed("curl https://acme.test:15000/intermediate-keys/0 >> /tmp/ca.crt")
+
+      with subtest("Can request certificate for nginx service"):
+          webserver.wait_for_unit("acme-finished-a.example.test.target")
+          client.succeed(
+              "curl --cacert /tmp/ca.crt https://a.example.test/ | grep -qF 'hello world'"
+          )
+
+      with subtest("Can add another certificate for nginx service"):
+          webserver.succeed(
+              "/run/current-system/specialisation/second-cert/bin/switch-to-configuration test"
+          )
+          webserver.wait_for_unit("acme-finished-b.example.test.target")
+          client.succeed(
+              "curl --cacert /tmp/ca.crt https://b.example.test/ | grep -qF 'hello world'"
+          )
+
+      with subtest("Can request wildcard certificates using DNS-01 challenge"):
+          webserver.succeed(
+              "${switchToNewServer}"
+          )
+          webserver.succeed(
+              "/run/current-system/specialisation/dns-01/bin/switch-to-configuration test"
+          )
+          webserver.wait_for_unit("acme-finished-example.test.target")
+          client.succeed(
+              "curl --cacert /tmp/ca.crt https://c.example.test/ | grep -qF 'hello world'"
+          )
+    '';
+}
diff --git a/nixpkgs/nixos/tests/all-tests.nix b/nixpkgs/nixos/tests/all-tests.nix
new file mode 100644
index 000000000000..30229a3a5b2f
--- /dev/null
+++ b/nixpkgs/nixos/tests/all-tests.nix
@@ -0,0 +1,340 @@
+{ system, pkgs, callTest }:
+# The return value of this function will be an attrset with arbitrary depth and
+# the `anything` returned by callTest at its test leafs.
+# The tests not supported by `system` will be replaced with `{}`, so that
+# `passthru.tests` can contain links to those without breaking on architectures
+# where said tests are unsupported.
+# Example callTest that just extracts the derivation from the test:
+#   callTest = t: t.test;
+
+with pkgs.lib;
+
+let
+  discoverTests = val:
+    if !isAttrs val then val
+    else if hasAttr "test" val then callTest val
+    else mapAttrs (n: s: discoverTests s) val;
+  handleTest = path: args:
+    discoverTests (import path ({ inherit system pkgs; } // args));
+  handleTestOn = systems: path: args:
+    if elem system systems then handleTest path args
+    else {};
+in
+{
+  _3proxy = handleTest ./3proxy.nix {};
+  acme = handleTest ./acme.nix {};
+  atd = handleTest ./atd.nix {};
+  automysqlbackup = handleTest ./automysqlbackup.nix {};
+  avahi = handleTest ./avahi.nix {};
+  babeld = handleTest ./babeld.nix {};
+  bcachefs = handleTestOn ["x86_64-linux"] ./bcachefs.nix {}; # linux-4.18.2018.10.12 is unsupported on aarch64
+  beanstalkd = handleTest ./beanstalkd.nix {};
+  bees = handleTest ./bees.nix {};
+  bind = handleTest ./bind.nix {};
+  bittorrent = handleTest ./bittorrent.nix {};
+  buildkite-agents = handleTest ./buildkite-agents.nix {};
+  boot = handleTestOn ["x86_64-linux"] ./boot.nix {}; # syslinux is unsupported on aarch64
+  boot-stage1 = handleTest ./boot-stage1.nix {};
+  borgbackup = handleTest ./borgbackup.nix {};
+  buildbot = handleTest ./buildbot.nix {};
+  caddy = handleTest ./caddy.nix {};
+  cadvisor = handleTestOn ["x86_64-linux"] ./cadvisor.nix {};
+  cage = handleTest ./cage.nix {};
+  cassandra = handleTest ./cassandra.nix {};
+  ceph-single-node = handleTestOn ["x86_64-linux"] ./ceph-single-node.nix {};
+  ceph-multi-node = handleTestOn ["x86_64-linux"] ./ceph-multi-node.nix {};
+  certmgr = handleTest ./certmgr.nix {};
+  cfssl = handleTestOn ["x86_64-linux"] ./cfssl.nix {};
+  chromium = (handleTestOn ["x86_64-linux"] ./chromium.nix {}).stable or {};
+  cjdns = handleTest ./cjdns.nix {};
+  clickhouse = handleTest ./clickhouse.nix {};
+  cloud-init = handleTest ./cloud-init.nix {};
+  codimd = handleTest ./codimd.nix {};
+  consul = handleTest ./consul.nix {};
+  cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
+  containers-bridge = handleTest ./containers-bridge.nix {};
+  containers-custom-pkgs.nix = handleTest ./containers-custom-pkgs.nix {};
+  containers-ephemeral = handleTest ./containers-ephemeral.nix {};
+  containers-extra_veth = handleTest ./containers-extra_veth.nix {};
+  containers-hosts = handleTest ./containers-hosts.nix {};
+  containers-imperative = handleTest ./containers-imperative.nix {};
+  containers-ip = handleTest ./containers-ip.nix {};
+  containers-macvlans = handleTest ./containers-macvlans.nix {};
+  containers-physical_interfaces = handleTest ./containers-physical_interfaces.nix {};
+  containers-portforward = handleTest ./containers-portforward.nix {};
+  containers-restart_networking = handleTest ./containers-restart_networking.nix {};
+  containers-tmpfs = handleTest ./containers-tmpfs.nix {};
+  corerad = handleTest ./corerad.nix {};
+  couchdb = handleTest ./couchdb.nix {};
+  deluge = handleTest ./deluge.nix {};
+  dhparams = handleTest ./dhparams.nix {};
+  dnscrypt-proxy2 = handleTestOn ["x86_64-linux"] ./dnscrypt-proxy2.nix {};
+  docker = handleTestOn ["x86_64-linux"] ./docker.nix {};
+  docker-containers = handleTestOn ["x86_64-linux"] ./docker-containers.nix {};
+  docker-edge = handleTestOn ["x86_64-linux"] ./docker-edge.nix {};
+  docker-preloader = handleTestOn ["x86_64-linux"] ./docker-preloader.nix {};
+  docker-registry = handleTest ./docker-registry.nix {};
+  docker-tools = handleTestOn ["x86_64-linux"] ./docker-tools.nix {};
+  docker-tools-overlay = handleTestOn ["x86_64-linux"] ./docker-tools-overlay.nix {};
+  documize = handleTest ./documize.nix {};
+  dokuwiki = handleTest ./dokuwiki.nix {};
+  dovecot = handleTest ./dovecot.nix {};
+  # ec2-config doesn't work in a sandbox as the simulated ec2 instance needs network access
+  #ec2-config = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-config or {};
+  ec2-nixops = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-nixops or {};
+  ecryptfs = handleTest ./ecryptfs.nix {};
+  ejabberd = handleTest ./xmpp/ejabberd.nix {};
+  elk = handleTestOn ["x86_64-linux"] ./elk.nix {};
+  env = handleTest ./env.nix {};
+  etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
+  etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
+  fancontrol = handleTest ./fancontrol.nix {};
+  ferm = handleTest ./ferm.nix {};
+  firefox = handleTest ./firefox.nix {};
+  firefox-esr = handleTest ./firefox.nix { esr = true; };
+  firewall = handleTest ./firewall.nix {};
+  fish = handleTest ./fish.nix {};
+  flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
+  fluentd = handleTest ./fluentd.nix {};
+  fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {};
+  freeswitch = handleTest ./freeswitch.nix {};
+  fsck = handleTest ./fsck.nix {};
+  gerrit = handleTest ./gerrit.nix {};
+  gotify-server = handleTest ./gotify-server.nix {};
+  grocy = handleTest ./grocy.nix {};
+  gitdaemon = handleTest ./gitdaemon.nix {};
+  gitea = handleTest ./gitea.nix {};
+  gitlab = handleTest ./gitlab.nix {};
+  gitolite = handleTest ./gitolite.nix {};
+  gitolite-fcgiwrap = handleTest ./gitolite-fcgiwrap.nix {};
+  glusterfs = handleTest ./glusterfs.nix {};
+  gnome3-xorg = handleTest ./gnome3-xorg.nix {};
+  gnome3 = handleTest ./gnome3.nix {};
+  installed-tests = pkgs.recurseIntoAttrs (handleTest ./installed-tests {});
+  gocd-agent = handleTest ./gocd-agent.nix {};
+  gocd-server = handleTest ./gocd-server.nix {};
+  google-oslogin = handleTest ./google-oslogin {};
+  grafana = handleTest ./grafana.nix {};
+  graphite = handleTest ./graphite.nix {};
+  graylog = handleTest ./graylog.nix {};
+  gvisor = handleTest ./gvisor.nix {};
+  hadoop.hdfs = handleTestOn [ "x86_64-linux" ] ./hadoop/hdfs.nix {};
+  hadoop.yarn = handleTestOn [ "x86_64-linux" ] ./hadoop/yarn.nix {};
+  handbrake = handleTestOn ["x86_64-linux"] ./handbrake.nix {};
+  haproxy = handleTest ./haproxy.nix {};
+  hardened = handleTest ./hardened.nix {};
+  # 9pnet_virtio used to mount /nix partition doesn't support
+  # hibernation. This test happens to work on x86_64-linux but
+  # not on other platforms.
+  hibernate = handleTestOn ["x86_64-linux"] ./hibernate.nix {};
+  hitch = handleTest ./hitch {};
+  hocker-fetchdocker = handleTest ./hocker-fetchdocker {};
+  home-assistant = handleTest ./home-assistant.nix {};
+  hound = handleTest ./hound.nix {};
+  hydra = handleTest ./hydra {};
+  hydra-db-migration = handleTest ./hydra/db-migration.nix {};
+  i3wm = handleTest ./i3wm.nix {};
+  icingaweb2 = handleTest ./icingaweb2.nix {};
+  iftop = handleTest ./iftop.nix {};
+  ihatemoney = handleTest ./ihatemoney.nix {};
+  incron = handleTest ./incron.nix {};
+  influxdb = handleTest ./influxdb.nix {};
+  initrd-network-ssh = handleTest ./initrd-network-ssh {};
+  initrdNetwork = handleTest ./initrd-network.nix {};
+  installer = handleTest ./installer.nix {};
+  iodine = handleTest ./iodine.nix {};
+  ipv6 = handleTest ./ipv6.nix {};
+  jackett = handleTest ./jackett.nix {};
+  jellyfin = handleTest ./jellyfin.nix {};
+  jenkins = handleTest ./jenkins.nix {};
+  jirafeau = handleTest ./jirafeau.nix {};
+  k3s = handleTest ./k3s.nix {};
+  kafka = handleTest ./kafka.nix {};
+  keepalived = handleTest ./keepalived.nix {};
+  kerberos = handleTest ./kerberos/default.nix {};
+  kernel-latest = handleTest ./kernel-latest.nix {};
+  kernel-lts = handleTest ./kernel-lts.nix {};
+  kernel-testing = handleTest ./kernel-testing.nix {};
+  keymap = handleTest ./keymap.nix {};
+  knot = handleTest ./knot.nix {};
+  krb5 = discoverTests (import ./krb5 {});
+  kubernetes.dns = handleTestOn ["x86_64-linux"] ./kubernetes/dns.nix {};
+  # kubernetes.e2e should eventually replace kubernetes.rbac when it works
+  #kubernetes.e2e = handleTestOn ["x86_64-linux"] ./kubernetes/e2e.nix {};
+  kubernetes.rbac = handleTestOn ["x86_64-linux"] ./kubernetes/rbac.nix {};
+  latestKernel.hardened = handleTest ./hardened.nix { latestKernel = true; };
+  latestKernel.login = handleTest ./login.nix { latestKernel = true; };
+  ldap = handleTest ./ldap.nix {};
+  leaps = handleTest ./leaps.nix {};
+  lidarr = handleTest ./lidarr.nix {};
+  lightdm = handleTest ./lightdm.nix {};
+  limesurvey = handleTest ./limesurvey.nix {};
+  login = handleTest ./login.nix {};
+  loki = handleTest ./loki.nix {};
+  #logstash = handleTest ./logstash.nix {};
+  lorri = handleTest ./lorri/default.nix {};
+  magnetico = handleTest ./magnetico.nix {};
+  magic-wormhole-mailbox-server = handleTest ./magic-wormhole-mailbox-server.nix {};
+  mailcatcher = handleTest ./mailcatcher.nix {};
+  mathics = handleTest ./mathics.nix {};
+  matomo = handleTest ./matomo.nix {};
+  matrix-synapse = handleTest ./matrix-synapse.nix {};
+  mediawiki = handleTest ./mediawiki.nix {};
+  memcached = handleTest ./memcached.nix {};
+  mesos = handleTest ./mesos.nix {};
+  metabase = handleTest ./metabase.nix {};
+  miniflux = handleTest ./miniflux.nix {};
+  minio = handleTest ./minio.nix {};
+  minidlna = handleTest ./minidlna.nix {};
+  misc = handleTest ./misc.nix {};
+  moinmoin = handleTest ./moinmoin.nix {};
+  mongodb = handleTest ./mongodb.nix {};
+  moodle = handleTest ./moodle.nix {};
+  morty = handleTest ./morty.nix {};
+  mosquitto = handleTest ./mosquitto.nix {};
+  mpd = handleTest ./mpd.nix {};
+  mumble = handleTest ./mumble.nix {};
+  munin = handleTest ./munin.nix {};
+  mutableUsers = handleTest ./mutable-users.nix {};
+  mxisd = handleTest ./mxisd.nix {};
+  mysql = handleTest ./mysql.nix {};
+  mysqlBackup = handleTest ./mysql-backup.nix {};
+  mysqlReplication = handleTest ./mysql-replication.nix {};
+  nagios = handleTest ./nagios.nix {};
+  nat.firewall = handleTest ./nat.nix { withFirewall = true; };
+  nat.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; };
+  nat.standalone = handleTest ./nat.nix { withFirewall = false; };
+  ndppd = handleTest ./ndppd.nix {};
+  neo4j = handleTest ./neo4j.nix {};
+  specialisation = handleTest ./specialisation.nix {};
+  netdata = handleTest ./netdata.nix {};
+  networking.networkd = handleTest ./networking.nix { networkd = true; };
+  networking.scripted = handleTest ./networking.nix { networkd = false; };
+  # TODO: put in networking.nix after the test becomes more complete
+  networkingProxy = handleTest ./networking-proxy.nix {};
+  nextcloud = handleTest ./nextcloud {};
+  nexus = handleTest ./nexus.nix {};
+  # TODO: Test nfsv3 + Kerberos
+  nfs3 = handleTest ./nfs { version = 3; };
+  nfs4 = handleTest ./nfs { version = 4; };
+  nghttpx = handleTest ./nghttpx.nix {};
+  nginx = handleTest ./nginx.nix {};
+  nginx-etag = handleTest ./nginx-etag.nix {};
+  nginx-pubhtml = handleTest ./nginx-pubhtml.nix {};
+  nginx-sso = handleTest ./nginx-sso.nix {};
+  nix-ssh-serve = handleTest ./nix-ssh-serve.nix {};
+  nixos-generate-config = handleTest ./nixos-generate-config.nix {};
+  novacomd = handleTestOn ["x86_64-linux"] ./novacomd.nix {};
+  nsd = handleTest ./nsd.nix {};
+  nzbget = handleTest ./nzbget.nix {};
+  openarena = handleTest ./openarena.nix {};
+  openldap = handleTest ./openldap.nix {};
+  opensmtpd = handleTest ./opensmtpd.nix {};
+  openssh = handleTest ./openssh.nix {};
+  openstack-image-userdata = (handleTestOn ["x86_64-linux"] ./openstack-image.nix {}).userdata or {};
+  openstack-image-metadata = (handleTestOn ["x86_64-linux"] ./openstack-image.nix {}).metadata or {};
+  orangefs = handleTest ./orangefs.nix {};
+  os-prober = handleTestOn ["x86_64-linux"] ./os-prober.nix {};
+  osrm-backend = handleTest ./osrm-backend.nix {};
+  overlayfs = handleTest ./overlayfs.nix {};
+  packagekit = handleTest ./packagekit.nix {};
+  pam-oath-login = handleTest ./pam-oath-login.nix {};
+  pam-u2f = handleTest ./pam-u2f.nix {};
+  pantheon = handleTest ./pantheon.nix {};
+  paperless = handleTest ./paperless.nix {};
+  peerflix = handleTest ./peerflix.nix {};
+  pgjwt = handleTest ./pgjwt.nix {};
+  pgmanage = handleTest ./pgmanage.nix {};
+  php = handleTest ./php {};
+  plasma5 = handleTest ./plasma5.nix {};
+  plotinus = handleTest ./plotinus.nix {};
+  postgis = handleTest ./postgis.nix {};
+  postgresql = handleTest ./postgresql.nix {};
+  postgresql-wal-receiver = handleTest ./postgresql-wal-receiver.nix {};
+  powerdns = handleTest ./powerdns.nix {};
+  pppd = handleTest ./pppd.nix {};
+  predictable-interface-names = handleTest ./predictable-interface-names.nix {};
+  printing = handleTest ./printing.nix {};
+  prometheus = handleTest ./prometheus.nix {};
+  prometheus-exporters = handleTest ./prometheus-exporters.nix {};
+  prosody = handleTest ./xmpp/prosody.nix {};
+  prosodyMysql = handleTest ./xmpp/prosody-mysql.nix {};
+  proxy = handleTest ./proxy.nix {};
+  quagga = handleTest ./quagga.nix {};
+  quorum = handleTest ./quorum.nix {};
+  rabbitmq = handleTest ./rabbitmq.nix {};
+  radarr = handleTest ./radarr.nix {};
+  radicale = handleTest ./radicale.nix {};
+  redis = handleTest ./redis.nix {};
+  redmine = handleTest ./redmine.nix {};
+  restic = handleTest ./restic.nix {};
+  roundcube = handleTest ./roundcube.nix {};
+  rspamd = handleTest ./rspamd.nix {};
+  rss2email = handleTest ./rss2email.nix {};
+  rsyslogd = handleTest ./rsyslogd.nix {};
+  runInMachine = handleTest ./run-in-machine.nix {};
+  rxe = handleTest ./rxe.nix {};
+  samba = handleTest ./samba.nix {};
+  sanoid = handleTest ./sanoid.nix {};
+  sddm = handleTest ./sddm.nix {};
+  service-runner = handleTest ./service-runner.nix {};
+  shiori = handleTest ./shiori.nix {};
+  signal-desktop = handleTest ./signal-desktop.nix {};
+  simple = handleTest ./simple.nix {};
+  slurm = handleTest ./slurm.nix {};
+  smokeping = handleTest ./smokeping.nix {};
+  snapper = handleTest ./snapper.nix {};
+  solr = handleTest ./solr.nix {};
+  spacecookie = handleTest ./spacecookie.nix {};
+  sonarr = handleTest ./sonarr.nix {};
+  strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
+  sudo = handleTest ./sudo.nix {};
+  switchTest = handleTest ./switch-test.nix {};
+  sympa = handleTest ./sympa.nix {};
+  syncthing-init = handleTest ./syncthing-init.nix {};
+  syncthing-relay = handleTest ./syncthing-relay.nix {};
+  systemd = handleTest ./systemd.nix {};
+  systemd-analyze = handleTest ./systemd-analyze.nix {};
+  systemd-confinement = handleTest ./systemd-confinement.nix {};
+  systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
+  systemd-networkd-vrf = handleTest ./systemd-networkd-vrf.nix {};
+  systemd-networkd = handleTest ./systemd-networkd.nix {};
+  systemd-nspawn = handleTest ./systemd-nspawn.nix {};
+  pdns-recursor = handleTest ./pdns-recursor.nix {};
+  taskserver = handleTest ./taskserver.nix {};
+  telegraf = handleTest ./telegraf.nix {};
+  tiddlywiki = handleTest ./tiddlywiki.nix {};
+  timezone = handleTest ./timezone.nix {};
+  tinydns = handleTest ./tinydns.nix {};
+  tor = handleTest ./tor.nix {};
+  # traefik test relies on docker-containers
+  traefik = handleTestOn ["x86_64-linux"] ./traefik.nix {};
+  transmission = handleTest ./transmission.nix {};
+  trac = handleTest ./trac.nix {};
+  trilium-server = handleTestOn ["x86_64-linux"] ./trilium-server.nix {};
+  trezord = handleTest ./trezord.nix {};
+  trickster = handleTest ./trickster.nix {};
+  tuptime = handleTest ./tuptime.nix {};
+  udisks2 = handleTest ./udisks2.nix {};
+  upnp = handleTest ./upnp.nix {};
+  uwsgi = handleTest ./uwsgi.nix {};
+  vault = handleTest ./vault.nix {};
+  victoriametrics = handleTest ./victoriametrics.nix {};
+  virtualbox = handleTestOn ["x86_64-linux"] ./virtualbox.nix {};
+  wg-quick = handleTest ./wireguard/wg-quick.nix {};
+  wireguard = handleTest ./wireguard {};
+  wireguard-generated = handleTest ./wireguard/generated.nix {};
+  wireguard-namespaces = handleTest ./wireguard/namespaces.nix {};
+  wordpress = handleTest ./wordpress.nix {};
+  xandikos = handleTest ./xandikos.nix {};
+  xautolock = handleTest ./xautolock.nix {};
+  xfce = handleTest ./xfce.nix {};
+  xmonad = handleTest ./xmonad.nix {};
+  xrdp = handleTest ./xrdp.nix {};
+  xss-lock = handleTest ./xss-lock.nix {};
+  yabar = handleTest ./yabar.nix {};
+  yggdrasil = handleTest ./yggdrasil.nix {};
+  zfs = handleTest ./zfs.nix {};
+  zsh-history = handleTest ./zsh-history.nix {};
+  zookeeper = handleTest ./zookeeper.nix {};
+}
diff --git a/nixpkgs/nixos/tests/ammonite.nix b/nixpkgs/nixos/tests/ammonite.nix
new file mode 100644
index 000000000000..1955e42be5f0
--- /dev/null
+++ b/nixpkgs/nixos/tests/ammonite.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "ammonite";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  nodes = {
+    amm =
+      { pkgs, ... }:
+        {
+          environment.systemPackages = [ pkgs.ammonite ];
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    amm.succeed("amm -c 'val foo = 21; println(foo * 2)' | grep 42")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/atd.nix b/nixpkgs/nixos/tests/atd.nix
new file mode 100644
index 000000000000..c3abe5c253df
--- /dev/null
+++ b/nixpkgs/nixos/tests/atd.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "atd";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ bjornfor ];
+  };
+
+  machine =
+    { ... }:
+    { services.atd.enable = true;
+      users.users.alice = { isNormalUser = true; };
+    };
+
+  # "at" has a resolution of 1 minute
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("atd.service")  # wait for atd to start
+    machine.fail("test -f ~root/at-1")
+    machine.fail("test -f ~alice/at-1")
+
+    machine.succeed("echo 'touch ~root/at-1' | at now+1min")
+    machine.succeed("su - alice -c \"echo 'touch at-1' | at now+1min\"")
+
+    machine.succeed("sleep 1.5m")
+
+    machine.succeed("test -f ~root/at-1")
+    machine.succeed("test -f ~alice/at-1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/automysqlbackup.nix b/nixpkgs/nixos/tests/automysqlbackup.nix
new file mode 100644
index 000000000000..224b93862fbd
--- /dev/null
+++ b/nixpkgs/nixos/tests/automysqlbackup.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+{
+  name = "automysqlbackup";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  machine =
+    { pkgs, ... }:
+    {
+      services.mysql.enable = true;
+      services.mysql.package = pkgs.mysql;
+      services.mysql.initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
+
+      services.automysqlbackup.enable = true;
+    };
+
+  testScript = ''
+    start_all()
+
+    # Need to have mysql started so that it can be populated with data.
+    machine.wait_for_unit("mysql.service")
+
+    with subtest("Wait for testdb to be fully populated (5 rows)."):
+        machine.wait_until_succeeds(
+            "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+        )
+
+    with subtest("Do a backup and wait for it to start"):
+        machine.start_job("automysqlbackup.service")
+        machine.wait_for_job("automysqlbackup.service")
+
+    with subtest("wait for backup file and check that data appears in backup"):
+        machine.wait_for_file("/var/backup/mysql/daily/testdb")
+        machine.succeed(
+            "${pkgs.gzip}/bin/zcat /var/backup/mysql/daily/testdb/daily_testdb_*.sql.gz | grep hello"
+        )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/avahi.nix b/nixpkgs/nixos/tests/avahi.nix
new file mode 100644
index 000000000000..fe027c14d5a8
--- /dev/null
+++ b/nixpkgs/nixos/tests/avahi.nix
@@ -0,0 +1,67 @@
+# Test whether `avahi-daemon' and `libnss-mdns' work as expected.
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "avahi";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes = let
+    cfg = { ... }: {
+      services.avahi = {
+        enable = true;
+        nssmdns = true;
+        publish.addresses = true;
+        publish.domain = true;
+        publish.enable = true;
+        publish.userServices = true;
+        publish.workstation = true;
+        extraServiceFiles.ssh = "${pkgs.avahi}/etc/avahi/services/ssh.service";
+      };
+    };
+  in {
+    one = cfg;
+    two = cfg;
+  };
+
+  testScript = ''
+    start_all()
+
+    # mDNS.
+    one.wait_for_unit("network.target")
+    two.wait_for_unit("network.target")
+
+    one.succeed("avahi-resolve-host-name one.local | tee out >&2")
+    one.succeed('test "`cut -f1 < out`" = one.local')
+    one.succeed("avahi-resolve-host-name two.local | tee out >&2")
+    one.succeed('test "`cut -f1 < out`" = two.local')
+
+    two.succeed("avahi-resolve-host-name one.local | tee out >&2")
+    two.succeed('test "`cut -f1 < out`" = one.local')
+    two.succeed("avahi-resolve-host-name two.local | tee out >&2")
+    two.succeed('test "`cut -f1 < out`" = two.local')
+
+    # Basic DNS-SD.
+    one.succeed("avahi-browse -r -t _workstation._tcp | tee out >&2")
+    one.succeed("test `wc -l < out` -gt 0")
+    two.succeed("avahi-browse -r -t _workstation._tcp | tee out >&2")
+    two.succeed("test `wc -l < out` -gt 0")
+
+    # More DNS-SD.
+    one.execute('avahi-publish -s "This is a test" _test._tcp 123 one=1 &')
+    one.sleep(5)
+    two.succeed("avahi-browse -r -t _test._tcp | tee out >&2")
+    two.succeed("test `wc -l < out` -gt 0")
+
+    # NSS-mDNS.
+    one.succeed("getent hosts one.local >&2")
+    one.succeed("getent hosts two.local >&2")
+    two.succeed("getent hosts one.local >&2")
+    two.succeed("getent hosts two.local >&2")
+
+    # extra service definitions
+    one.succeed("avahi-browse -r -t _ssh._tcp | tee out >&2")
+    one.succeed("test `wc -l < out` -gt 0")
+    two.succeed("avahi-browse -r -t _ssh._tcp | tee out >&2")
+    two.succeed("test `wc -l < out` -gt 0")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/babeld.nix b/nixpkgs/nixos/tests/babeld.nix
new file mode 100644
index 000000000000..fafa788ba57b
--- /dev/null
+++ b/nixpkgs/nixos/tests/babeld.nix
@@ -0,0 +1,148 @@
+
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "babeld";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ hexa ];
+  };
+
+  nodes =
+    { client = { pkgs, lib, ... }:
+      {
+        virtualisation.vlans = [ 10 ];
+
+        networking = {
+          useDHCP = false;
+          interfaces."eth1" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.10.2"; prefixLength = 24; } ];
+            ipv4.routes = lib.mkForce [ { address = "0.0.0.0"; prefixLength = 0; via = "192.168.10.1"; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:10::2"; prefixLength = 64; } ];
+            ipv6.routes = lib.mkForce [ { address = "::"; prefixLength = 0; via = "2001:db8:10::1"; } ];
+          };
+        };
+      };
+
+      local_router = { pkgs, lib, ... }:
+      {
+        virtualisation.vlans = [ 10 20 ];
+
+        boot.kernel.sysctl."net.ipv4.conf.all.forwarding" = 1;
+        boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
+
+        networking = {
+          useDHCP = false;
+          firewall.enable = false;
+
+          interfaces."eth1" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.10.1"; prefixLength = 24; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:10::1"; prefixLength = 64; } ];
+          };
+
+          interfaces."eth2" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.20.1"; prefixLength = 24; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:20::1"; prefixLength = 64; } ];
+          };
+        };
+
+        services.babeld = {
+          enable = true;
+          interfaces.eth2 = {
+            hello-interval = 1;
+            type = "wired";
+          };
+          extraConfig = ''
+            local-port-readwrite 33123
+
+            import-table 254 # main
+            export-table 254 # main
+
+            in ip 192.168.10.0/24 deny
+            in ip 192.168.20.0/24 deny
+            in ip 2001:db8:10::/64 deny
+            in ip 2001:db8:20::/64 deny
+
+            in ip 192.168.30.0/24 allow
+            in ip 2001:db8:30::/64 allow
+
+            in deny
+
+            redistribute local proto 2
+            redistribute local deny
+          '';
+        };
+      };
+      remote_router = { pkgs, lib, ... }:
+      {
+        virtualisation.vlans = [ 20 30 ];
+
+        boot.kernel.sysctl."net.ipv4.conf.all.forwarding" = 1;
+        boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
+
+        networking = {
+          useDHCP = false;
+          firewall.enable = false;
+
+          interfaces."eth1" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.20.2"; prefixLength = 24; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:20::2"; prefixLength = 64; } ];
+          };
+
+          interfaces."eth2" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.30.1"; prefixLength = 24; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:30::1"; prefixLength = 64; } ];
+          };
+        };
+
+        services.babeld = {
+          enable = true;
+          interfaces.eth1 = {
+            hello-interval = 1;
+            type = "wired";
+          };
+          extraConfig = ''
+            local-port-readwrite 33123
+
+            import-table 254 # main
+            export-table 254 # main
+
+            in ip 192.168.20.0/24 deny
+            in ip 192.168.30.0/24 deny
+            in ip 2001:db8:20::/64 deny
+            in ip 2001:db8:30::/64 deny
+
+            in ip 192.168.10.0/24 allow
+            in ip 2001:db8:10::/64 allow
+
+            in deny
+
+            redistribute local proto 2
+            redistribute local deny
+          '';
+        };
+
+      };
+    };
+
+  testScript =
+    ''
+      start_all()
+
+      client.wait_for_unit("network-online.target")
+      local_router.wait_for_unit("network-online.target")
+      remote_router.wait_for_unit("network-online.target")
+
+      local_router.wait_for_unit("babeld.service")
+      remote_router.wait_for_unit("babeld.service")
+
+      local_router.wait_until_succeeds("ip route get 192.168.30.1")
+      local_router.wait_until_succeeds("ip route get 2001:db8:30::1")
+
+      remote_router.wait_until_succeeds("ip route get 192.168.10.1")
+      remote_router.wait_until_succeeds("ip route get 2001:db8:10::1")
+
+      client.succeed("ping -c1 192.168.30.1")
+      client.succeed("ping -c1 2001:db8:30::1")
+
+      remote_router.succeed("ping -c1 192.168.10.2")
+      remote_router.succeed("ping -c1 2001:db8:10::2")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/bcachefs.nix b/nixpkgs/nixos/tests/bcachefs.nix
new file mode 100644
index 000000000000..0541e5803225
--- /dev/null
+++ b/nixpkgs/nixos/tests/bcachefs.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "bcachefs";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ chiiruno ];
+
+  machine = { pkgs, ... }: {
+    virtualisation.emptyDiskImages = [ 4096 ];
+    networking.hostId = "deadbeef";
+    boot.supportedFilesystems = [ "bcachefs" ];
+    environment.systemPackages = with pkgs; [ parted ];
+  };
+
+  testScript = ''
+    machine.succeed("modprobe bcachefs")
+    machine.succeed("bcachefs version")
+    machine.succeed("ls /dev")
+    
+    machine.succeed(
+        "mkdir /tmp/mnt",
+        "udevadm settle",
+        "parted --script /dev/vdb mklabel msdos",
+        "parted --script /dev/vdb -- mkpart primary 1024M -1s",
+        "udevadm settle",
+        # Due to #32279, we cannot use encryption for this test yet
+        # "echo password | bcachefs format --encrypted /dev/vdb1",
+        # "echo password | bcachefs unlock /dev/vdb1",
+        "bcachefs format /dev/vdb1",
+        "mount -t bcachefs /dev/vdb1 /tmp/mnt",
+        "udevadm settle",
+        "bcachefs fs usage /tmp/mnt",
+        "umount /tmp/mnt",
+        "udevadm settle",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/beanstalkd.nix b/nixpkgs/nixos/tests/beanstalkd.nix
new file mode 100644
index 000000000000..4f4a454fb47f
--- /dev/null
+++ b/nixpkgs/nixos/tests/beanstalkd.nix
@@ -0,0 +1,49 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  pythonEnv = pkgs.python3.withPackages (p: [p.beanstalkc]);
+
+  produce = pkgs.writeScript "produce.py" ''
+    #!${pythonEnv.interpreter}
+    import beanstalkc
+
+    queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
+    queue.put(b'this is a job')
+    queue.put(b'this is another job')
+  '';
+
+  consume = pkgs.writeScript "consume.py" ''
+    #!${pythonEnv.interpreter}
+    import beanstalkc
+
+    queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
+
+    job = queue.reserve(timeout=0)
+    print(job.body.decode('utf-8'))
+    job.delete()
+  '';
+
+in
+{
+  name = "beanstalkd";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  machine =
+    { ... }:
+    { services.beanstalkd.enable = true;
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("beanstalkd.service")
+
+    machine.succeed("${produce}")
+    assert "this is a job\n" == machine.succeed(
+        "${consume}"
+    )
+    assert "this is another job\n" == machine.succeed(
+        "${consume}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/bees.nix b/nixpkgs/nixos/tests/bees.nix
new file mode 100644
index 000000000000..6e6a9c3446b0
--- /dev/null
+++ b/nixpkgs/nixos/tests/bees.nix
@@ -0,0 +1,62 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "bees";
+
+  machine = { config, pkgs, ... }: {
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.btrfs-progs}/bin/mkfs.btrfs -f -L aux1 /dev/vdb
+      ${pkgs.btrfs-progs}/bin/mkfs.btrfs -f -L aux2 /dev/vdc
+    '';
+    virtualisation.emptyDiskImages = [ 4096 4096 ];
+    fileSystems = lib.mkVMOverride {
+      "/aux1" = { # filesystem configured to be deduplicated
+        device = "/dev/disk/by-label/aux1";
+        fsType = "btrfs";
+      };
+      "/aux2" = { # filesystem not configured to be deduplicated
+        device = "/dev/disk/by-label/aux2";
+        fsType = "btrfs";
+      };
+    };
+    services.beesd.filesystems = {
+      aux1 = {
+        spec = "LABEL=aux1";
+        hashTableSizeMB = 16;
+        verbosity = "debug";
+      };
+    };
+  };
+
+  testScript =
+  let
+    someContentIsShared = loc: pkgs.writeShellScript "some-content-is-shared" ''
+      [[ $(btrfs fi du -s --raw ${lib.escapeShellArg loc}/dedup-me-{1,2} | awk 'BEGIN { count=0; } NR>1 && $3 == 0 { count++ } END { print count }') -eq 0 ]]
+    '';
+  in ''
+    # shut down the instance started by systemd at boot, so we can test our test procedure
+    machine.succeed("systemctl stop beesd@aux1.service")
+
+    machine.succeed(
+        "dd if=/dev/urandom of=/aux1/dedup-me-1 bs=1M count=8",
+        "cp --reflink=never /aux1/dedup-me-1 /aux1/dedup-me-2",
+        "cp --reflink=never /aux1/* /aux2/",
+        "sync",
+    )
+    machine.fail(
+        "${someContentIsShared "/aux1"}",
+        "${someContentIsShared "/aux2"}",
+    )
+    machine.succeed("systemctl start beesd@aux1.service")
+
+    # assert that "Set Shared" column is nonzero
+    machine.wait_until_succeeds(
+        "${someContentIsShared "/aux1"}",
+    )
+    machine.fail("${someContentIsShared "/aux2"}")
+
+    # assert that 16MB hash table size requested was honored
+    machine.succeed(
+        "[[ $(stat -c %s /aux1/.beeshome/beeshash.dat) = $(( 16 * 1024 * 1024)) ]]"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/bind.nix b/nixpkgs/nixos/tests/bind.nix
new file mode 100644
index 000000000000..09917b15a8e0
--- /dev/null
+++ b/nixpkgs/nixos/tests/bind.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix {
+  name = "bind";
+
+  machine = { pkgs, lib, ... }: {
+    services.bind.enable = true;
+    services.bind.extraOptions = "empty-zones-enable no;";
+    services.bind.zones = lib.singleton {
+      name = ".";
+      file = pkgs.writeText "root.zone" ''
+        $TTL 3600
+        . IN SOA ns.example.org. admin.example.org. ( 1 3h 1h 1w 1d )
+        . IN NS ns.example.org.
+
+        ns.example.org. IN A    192.168.0.1
+        ns.example.org. IN AAAA abcd::1
+
+        1.0.168.192.in-addr.arpa IN PTR ns.example.org.
+      '';
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("bind.service")
+    machine.wait_for_open_port(53)
+    machine.succeed("host 192.168.0.1 127.0.0.1 | grep -qF ns.example.org")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/bittorrent.nix b/nixpkgs/nixos/tests/bittorrent.nix
new file mode 100644
index 000000000000..0a97d5556a26
--- /dev/null
+++ b/nixpkgs/nixos/tests/bittorrent.nix
@@ -0,0 +1,164 @@
+# This test runs a Bittorrent tracker on one machine, and verifies
+# that two client machines can download the torrent using
+# `transmission'.  The first client (behind a NAT router) downloads
+# from the initial seeder running on the tracker.  Then we kill the
+# initial seeder.  The second client downloads from the first client,
+# which only works if the first client successfully uses the UPnP-IGD
+# protocol to poke a hole in the NAT.
+
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+
+  # Some random file to serve.
+  file = pkgs.hello.src;
+
+  internalRouterAddress = "192.168.3.1";
+  internalClient1Address = "192.168.3.2";
+  externalRouterAddress = "80.100.100.1";
+  externalClient2Address = "80.100.100.2";
+  externalTrackerAddress = "80.100.100.3";
+
+  transmissionConfig = { ... }: {
+    environment.systemPackages = [ pkgs.transmission ];
+    services.transmission = {
+      enable = true;
+      settings = {
+        dht-enabled = false;
+        message-level = 3;
+      };
+    };
+  };
+in
+
+{
+  name = "bittorrent";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ domenkozar eelco rob bobvanderlinden ];
+  };
+
+  nodes = {
+    tracker = { pkgs, ... }: {
+      imports = [ transmissionConfig ];
+
+      virtualisation.vlans = [ 1 ];
+      networking.firewall.enable = false;
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = externalTrackerAddress; prefixLength = 24; }
+      ];
+
+      # We need Apache on the tracker to serve the torrents.
+      services.httpd = {
+        enable = true;
+        virtualHosts = {
+          "torrentserver.org" = {
+            adminAddr = "foo@example.org";
+            documentRoot = "/tmp";
+          };
+        };
+      };
+      services.opentracker.enable = true;
+    };
+
+    router = { pkgs, nodes, ... }: {
+      virtualisation.vlans = [ 1 2 ];
+      networking.nat.enable = true;
+      networking.nat.internalInterfaces = [ "eth2" ];
+      networking.nat.externalInterface = "eth1";
+      networking.firewall.enable = true;
+      networking.firewall.trustedInterfaces = [ "eth2" ];
+      networking.interfaces.eth0.ipv4.addresses = [];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = externalRouterAddress; prefixLength = 24; }
+      ];
+      networking.interfaces.eth2.ipv4.addresses = [
+        { address = internalRouterAddress; prefixLength = 24; }
+      ];
+      services.miniupnpd = {
+        enable = true;
+        externalInterface = "eth1";
+        internalIPs = [ "eth2" ];
+        appendConfig = ''
+          ext_ip=${externalRouterAddress}
+        '';
+      };
+    };
+
+    client1 = { pkgs, nodes, ... }: {
+      imports = [ transmissionConfig ];
+      environment.systemPackages = [ pkgs.miniupnpc ];
+
+      virtualisation.vlans = [ 2 ];
+      networking.interfaces.eth0.ipv4.addresses = [];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = internalClient1Address; prefixLength = 24; }
+      ];
+      networking.defaultGateway = internalRouterAddress;
+      networking.firewall.enable = false;
+    };
+
+    client2 = { pkgs, ... }: {
+      imports = [ transmissionConfig ];
+
+      virtualisation.vlans = [ 1 ];
+      networking.interfaces.eth0.ipv4.addresses = [];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = externalClient2Address; prefixLength = 24; }
+      ];
+      networking.firewall.enable = false;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+      start_all()
+
+      # Wait for network and miniupnpd.
+      router.wait_for_unit("network-online.target")
+      router.wait_for_unit("miniupnpd")
+
+      # Create the torrent.
+      tracker.succeed("mkdir /tmp/data")
+      tracker.succeed(
+          "cp ${file} /tmp/data/test.tar.bz2"
+      )
+      tracker.succeed(
+          "transmission-create /tmp/data/test.tar.bz2 --private --tracker http://${externalTrackerAddress}:6969/announce --outfile /tmp/test.torrent"
+      )
+      tracker.succeed("chmod 644 /tmp/test.torrent")
+
+      # Start the tracker.  !!! use a less crappy tracker
+      tracker.wait_for_unit("network-online.target")
+      tracker.wait_for_unit("opentracker.service")
+      tracker.wait_for_open_port(6969)
+
+      # Start the initial seeder.
+      tracker.succeed(
+          "transmission-remote --add /tmp/test.torrent --no-portmap --no-dht --download-dir /tmp/data"
+      )
+
+      # Now we should be able to download from the client behind the NAT.
+      tracker.wait_for_unit("httpd")
+      client1.wait_for_unit("network-online.target")
+      client1.succeed(
+          "transmission-remote --add http://${externalTrackerAddress}/test.torrent --download-dir /tmp >&2 &"
+      )
+      client1.wait_for_file("/tmp/test.tar.bz2")
+      client1.succeed(
+          "cmp /tmp/test.tar.bz2 ${file}"
+      )
+
+      # Bring down the initial seeder.
+      # tracker.stop_job("transmission")
+
+      # Now download from the second client.  This can only succeed if
+      # the first client created a NAT hole in the router.
+      client2.wait_for_unit("network-online.target")
+      client2.succeed(
+          "transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht --download-dir /tmp >&2 &"
+      )
+      client2.wait_for_file("/tmp/test.tar.bz2")
+      client2.succeed(
+          "cmp /tmp/test.tar.bz2 ${file}"
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/boot-stage1.nix b/nixpkgs/nixos/tests/boot-stage1.nix
new file mode 100644
index 000000000000..cfb2ccb82856
--- /dev/null
+++ b/nixpkgs/nixos/tests/boot-stage1.nix
@@ -0,0 +1,162 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "boot-stage1";
+
+  machine = { config, pkgs, lib, ... }: {
+    boot.extraModulePackages = let
+      compileKernelModule = name: source: pkgs.runCommandCC name rec {
+        inherit source;
+        kdev = config.boot.kernelPackages.kernel.dev;
+        kver = config.boot.kernelPackages.kernel.modDirVersion;
+        ksrc = "${kdev}/lib/modules/${kver}/build";
+        hardeningDisable = [ "pic" ];
+        nativeBuildInputs = kdev.moduleBuildDependencies;
+      } ''
+        echo "obj-m += $name.o" > Makefile
+        echo "$source" > "$name.c"
+        make -C "$ksrc" M=$(pwd) modules
+        install -vD "$name.ko" "$out/lib/modules/$kver/$name.ko"
+      '';
+
+      # This spawns a kthread which just waits until it gets a signal and
+      # terminates if that is the case. We want to make sure that nothing during
+      # the boot process kills any kthread by accident, like what happened in
+      # issue #15226.
+      kcanary = compileKernelModule "kcanary" ''
+        #include <linux/version.h>
+        #include <linux/init.h>
+        #include <linux/module.h>
+        #include <linux/kernel.h>
+        #include <linux/kthread.h>
+        #include <linux/sched.h>
+        #include <linux/signal.h>
+        #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+        #include <linux/sched/signal.h>
+        #endif
+
+        struct task_struct *canaryTask;
+
+        static int kcanary(void *nothing)
+        {
+          allow_signal(SIGINT);
+          allow_signal(SIGTERM);
+          allow_signal(SIGKILL);
+          while (!kthread_should_stop()) {
+            set_current_state(TASK_INTERRUPTIBLE);
+            schedule_timeout_interruptible(msecs_to_jiffies(100));
+            if (signal_pending(current)) break;
+          }
+          return 0;
+        }
+
+        static int kcanaryInit(void)
+        {
+          kthread_run(&kcanary, NULL, "kcanary");
+          return 0;
+        }
+
+        static void kcanaryExit(void)
+        {
+          kthread_stop(canaryTask);
+        }
+
+        module_init(kcanaryInit);
+        module_exit(kcanaryExit);
+      '';
+
+    in lib.singleton kcanary;
+
+    boot.initrd.kernelModules = [ "kcanary" ];
+
+    boot.initrd.extraUtilsCommands = let
+      compile = name: source: pkgs.runCommandCC name { inherit source; } ''
+        mkdir -p "$out/bin"
+        echo "$source" | gcc -Wall -o "$out/bin/$name" -xc -
+      '';
+
+      daemonize = name: source: compile name ''
+        #include <stdio.h>
+        #include <unistd.h>
+
+        void runSource(void) {
+        ${source}
+        }
+
+        int main(void) {
+          if (fork() > 0) return 0;
+          setsid();
+          runSource();
+          return 1;
+        }
+      '';
+
+      mkCmdlineCanary = { name, cmdline ? "", source ? "" }: (daemonize name ''
+        char *argv[] = {"${cmdline}", NULL};
+        execvp("${name}-child", argv);
+      '') // {
+        child = compile "${name}-child" ''
+          #include <stdio.h>
+          #include <unistd.h>
+
+          int main(void) {
+            ${source}
+            while (1) sleep(1);
+            return 1;
+          }
+        '';
+      };
+
+      copyCanaries = with lib; concatMapStrings (canary: ''
+        ${optionalString (canary ? child) ''
+          copy_bin_and_libs "${canary.child}/bin/${canary.child.name}"
+        ''}
+        copy_bin_and_libs "${canary}/bin/${canary.name}"
+      '');
+
+    in copyCanaries [
+      # Simple canary process which just sleeps forever and should be killed by
+      # stage 2.
+      (daemonize "canary1" "while (1) sleep(1);")
+
+      # We want this canary process to try mimicking a kthread using a cmdline
+      # with a zero length so we can make sure that the process is properly
+      # killed in stage 1.
+      (mkCmdlineCanary {
+        name = "canary2";
+        source = ''
+          FILE *f;
+          f = fopen("/run/canary2.pid", "w");
+          fprintf(f, "%d\n", getpid());
+          fclose(f);
+        '';
+      })
+
+      # This canary process mimicks a storage daemon, which we do NOT want to be
+      # killed before going into stage 2. For more on root storage daemons, see:
+      # https://www.freedesktop.org/wiki/Software/systemd/RootStorageDaemons/
+      (mkCmdlineCanary {
+        name = "canary3";
+        cmdline = "@canary3";
+      })
+    ];
+
+    boot.initrd.postMountCommands = ''
+      canary1
+      canary2
+      canary3
+      # Make sure the pidfile of canary 2 is created so that we still can get
+      # its former pid after the killing spree starts next within stage 1.
+      while [ ! -s /run/canary2.pid ]; do sleep 0.1; done
+    '';
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("test -s /run/canary2.pid")
+    machine.fail("pgrep -a canary1")
+    machine.fail("kill -0 $(< /run/canary2.pid)")
+    machine.succeed('pgrep -a -f "^@canary3$"')
+    machine.succeed('pgrep -a -f "^kcanary$"')
+  '';
+
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ aszlig ];
+})
diff --git a/nixpkgs/nixos/tests/boot.nix b/nixpkgs/nixos/tests/boot.nix
new file mode 100644
index 000000000000..c5040f3b31fb
--- /dev/null
+++ b/nixpkgs/nixos/tests/boot.nix
@@ -0,0 +1,105 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+
+  iso =
+    (import ../lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ ../modules/installer/cd-dvd/installation-cd-minimal.nix
+          ../modules/testing/test-instrumentation.nix
+        ];
+    }).config.system.build.isoImage;
+
+  pythonDict = params: "\n    {\n        ${concatStringsSep ",\n        " (mapAttrsToList (name: param: "\"${name}\": \"${param}\"") params)},\n    }\n";
+
+  makeBootTest = name: extraConfig:
+    let
+      machineConfig = pythonDict ({ qemuFlags = "-m 768"; } // extraConfig);
+    in
+      makeTest {
+        inherit iso;
+        name = "boot-" + name;
+        nodes = { };
+        testScript =
+          ''
+            machine = create_machine(${machineConfig})
+            machine.start()
+            machine.wait_for_unit("multi-user.target")
+            machine.succeed("nix verify -r --no-trust /run/current-system")
+
+            with subtest("Check whether the channel got installed correctly"):
+                machine.succeed("nix-instantiate --dry-run '<nixpkgs>' -A hello")
+                machine.succeed("nix-env --dry-run -iA nixos.procps")
+
+            machine.shutdown()
+          '';
+      };
+
+  makeNetbootTest = name: extraConfig:
+    let
+      config = (import ../lib/eval-config.nix {
+          inherit system;
+          modules =
+            [ ../modules/installer/netboot/netboot.nix
+              ../modules/testing/test-instrumentation.nix
+              { key = "serial"; }
+            ];
+        }).config;
+      ipxeBootDir = pkgs.symlinkJoin {
+        name = "ipxeBootDir";
+        paths = [
+          config.system.build.netbootRamdisk
+          config.system.build.kernel
+          config.system.build.netbootIpxeScript
+        ];
+      };
+      machineConfig = pythonDict ({
+        qemuFlags = "-boot order=n -m 2000";
+        netBackendArgs = "tftp=${ipxeBootDir},bootfile=netboot.ipxe";
+      } // extraConfig);
+    in
+      makeTest {
+        name = "boot-netboot-" + name;
+        nodes = { };
+        testScript = ''
+            machine = create_machine(${machineConfig})
+            machine.start()
+            machine.wait_for_unit("multi-user.target")
+            machine.shutdown()
+          '';
+      };
+in {
+
+    biosCdrom = makeBootTest "bios-cdrom" {
+      cdrom = "${iso}/iso/${iso.isoName}";
+    };
+
+    biosUsb = makeBootTest "bios-usb" {
+      usb = "${iso}/iso/${iso.isoName}";
+    };
+
+    uefiCdrom = makeBootTest "uefi-cdrom" {
+      cdrom = "${iso}/iso/${iso.isoName}";
+      bios = "${pkgs.OVMF.fd}/FV/OVMF.fd";
+    };
+
+    uefiUsb = makeBootTest "uefi-usb" {
+      usb = "${iso}/iso/${iso.isoName}";
+      bios = "${pkgs.OVMF.fd}/FV/OVMF.fd";
+    };
+
+    biosNetboot = makeNetbootTest "bios" {};
+
+    uefiNetboot = makeNetbootTest "uefi" {
+      bios = "${pkgs.OVMF.fd}/FV/OVMF.fd";
+      # Custom ROM is needed for EFI PXE boot. I failed to understand exactly why, because QEMU should still use iPXE for EFI.
+      netFrontendArgs = "romfile=${pkgs.ipxe}/ipxe.efirom";
+    };
+}
diff --git a/nixpkgs/nixos/tests/borgbackup.nix b/nixpkgs/nixos/tests/borgbackup.nix
new file mode 100644
index 000000000000..d97471e293e8
--- /dev/null
+++ b/nixpkgs/nixos/tests/borgbackup.nix
@@ -0,0 +1,175 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  passphrase = "supersecret";
+  dataDir = "/ran:dom/data";
+  excludeFile = "not_this_file";
+  keepFile = "important_file";
+  keepFileData = "important_data";
+  localRepo = "/root/back:up";
+  archiveName = "my_archive";
+  remoteRepo = "borg@server:."; # No need to specify path
+  privateKey = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
+    RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
+    AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
+    9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+  publicKey = ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv root@client
+  '';
+  privateKeyAppendOnly = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLwAAAJC9YTxxvWE8
+    cQAAAAtzc2gtZWQyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLw
+    AAAEAAhV7wTl5dL/lz+PF/d4PnZXuG1Id6L/mFEiGT1tZsuFpxm7PUQsZB2Ejs8Xp0YVp8
+    IOW+HylIRzhweORbRCMvAAAADXJzY2h1ZXR6QGt1cnQ=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+  publicKeyAppendOnly = ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpxm7PUQsZB2Ejs8Xp0YVp8IOW+HylIRzhweORbRCMv root@client
+  '';
+
+in {
+  name = "borgbackup";
+  meta = with pkgs.stdenv.lib; {
+    maintainers = with maintainers; [ dotlambda ];
+  };
+
+  nodes = {
+    client = { ... }: {
+      services.borgbackup.jobs = {
+        
+        local = {
+          paths = dataDir;
+          repo = localRepo;
+          preHook = ''
+            # Don't append a timestamp
+            archiveName="${archiveName}"
+          '';
+          encryption = {
+            mode = "repokey";
+            inherit passphrase;
+          };
+          compression = "auto,zlib,9";
+          prune.keep = {
+            within = "1y";
+            yearly = 5;
+          };
+          exclude = [ "*/${excludeFile}" ];
+          postHook = "echo post";
+          startAt = [ ]; # Do not run automatically
+        };
+
+        remote = {
+          paths = dataDir;
+          repo = remoteRepo;
+          encryption.mode = "none";
+          startAt = [ ];
+          environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
+        };
+
+        remoteAppendOnly = {
+          paths = dataDir;
+          repo = remoteRepo;
+          encryption.mode = "none";
+          startAt = [ ];
+          environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly";
+        };
+
+      };
+    };
+
+    server = { ... }: {
+      services.openssh = {
+        enable = true;
+        passwordAuthentication = false;
+        challengeResponseAuthentication = false;
+      };
+
+      services.borgbackup.repos.repo1 = {
+        authorizedKeys = [ publicKey ];
+        path = "/data/borgbackup";
+      };
+
+      # Second repo to make sure the authorizedKeys options are merged correctly
+      services.borgbackup.repos.repo2 = {
+        authorizedKeysAppendOnly = [ publicKeyAppendOnly ];
+        path = "/data/borgbackup";
+        quota = ".5G";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    client.fail('test -d "${remoteRepo}"')
+
+    client.succeed(
+        "cp ${privateKey} /root/id_ed25519"
+    )
+    client.succeed("chmod 0600 /root/id_ed25519")
+    client.succeed(
+        "cp ${privateKeyAppendOnly} /root/id_ed25519.appendOnly"
+    )
+    client.succeed("chmod 0600 /root/id_ed25519.appendOnly")
+
+    client.succeed("mkdir -p ${dataDir}")
+    client.succeed("touch ${dataDir}/${excludeFile}")
+    client.succeed("echo '${keepFileData}' > ${dataDir}/${keepFile}")
+
+    with subtest("local"):
+        borg = "BORG_PASSPHRASE='${passphrase}' borg"
+        client.systemctl("start --wait borgbackup-job-local")
+        client.fail("systemctl is-failed borgbackup-job-local")
+        # Make sure exactly one archive has been created
+        assert int(client.succeed("{} list '${localRepo}' | wc -l".format(borg))) > 0
+        # Make sure excludeFile has been excluded
+        client.fail(
+            "{} list '${localRepo}::${archiveName}' | grep -qF '${excludeFile}'".format(borg)
+        )
+        # Make sure keepFile has the correct content
+        client.succeed("{} extract '${localRepo}::${archiveName}'".format(borg))
+        assert "${keepFileData}" in client.succeed("cat ${dataDir}/${keepFile}")
+        # Make sure the same is true when using `borg mount`
+        client.succeed(
+            "mkdir -p /mnt/borg && {} mount '${localRepo}::${archiveName}' /mnt/borg".format(
+                borg
+            )
+        )
+        assert "${keepFileData}" in client.succeed(
+            "cat /mnt/borg/${dataDir}/${keepFile}"
+        )
+
+    with subtest("remote"):
+        borg = "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519' borg"
+        server.wait_for_unit("sshd.service")
+        client.wait_for_unit("network.target")
+        client.systemctl("start --wait borgbackup-job-remote")
+        client.fail("systemctl is-failed borgbackup-job-remote")
+
+        # Make sure we can't access repos other than the specified one
+        client.fail("{} list borg\@server:wrong".format(borg))
+
+        # TODO: Make sure that data is actually deleted
+
+    with subtest("remoteAppendOnly"):
+        borg = (
+            "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly' borg"
+        )
+        server.wait_for_unit("sshd.service")
+        client.wait_for_unit("network.target")
+        client.systemctl("start --wait borgbackup-job-remoteAppendOnly")
+        client.fail("systemctl is-failed borgbackup-job-remoteAppendOnly")
+
+        # Make sure we can't access repos other than the specified one
+        client.fail("{} list borg\@server:wrong".format(borg))
+
+        # TODO: Make sure that data is not actually deleted
+  '';
+})
diff --git a/nixpkgs/nixos/tests/buildbot.nix b/nixpkgs/nixos/tests/buildbot.nix
new file mode 100644
index 000000000000..0d979dc2d054
--- /dev/null
+++ b/nixpkgs/nixos/tests/buildbot.nix
@@ -0,0 +1,113 @@
+# Test ensures buildbot master comes up correctly and workers can connect
+
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+import ./make-test-python.nix {
+  name = "buildbot";
+
+  nodes = {
+    bbmaster = { pkgs, ... }: {
+      services.buildbot-master = {
+        enable = true;
+
+        # NOTE: use fake repo due to no internet in hydra ci
+        factorySteps = [
+          "steps.Git(repourl='git://gitrepo/fakerepo.git', mode='incremental')"
+          "steps.ShellCommand(command=['bash', 'fakerepo.sh'])"
+        ];
+        changeSource = [
+          "changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)"
+        ];
+      };
+      networking.firewall.allowedTCPPorts = [ 8010 8011 9989 ];
+      environment.systemPackages = with pkgs; [ git python3Packages.buildbot-full ];
+    };
+
+    bbworker = { pkgs, ... }: {
+      services.buildbot-worker = {
+        enable = true;
+        masterUrl = "bbmaster:9989";
+      };
+      environment.systemPackages = with pkgs; [ git python3Packages.buildbot-worker ];
+    };
+
+    gitrepo = { pkgs, ... }: {
+      services.openssh.enable = true;
+      networking.firewall.allowedTCPPorts = [ 22 9418 ];
+      environment.systemPackages = with pkgs; [ git ];
+      systemd.services.git-daemon = {
+        description   = "Git daemon for the test";
+        wantedBy      = [ "multi-user.target" ];
+        after         = [ "network.target" "sshd.service" ];
+
+        serviceConfig.Restart = "always";
+        path = with pkgs; [ coreutils git openssh ];
+        environment = { HOME = "/root"; };
+        preStart = ''
+          git config --global user.name 'Nobody Fakeuser'
+          git config --global user.email 'nobody\@fakerepo.com'
+          rm -rvf /srv/repos/fakerepo.git /tmp/fakerepo
+          mkdir -pv /srv/repos/fakerepo ~/.ssh
+          ssh-keyscan -H gitrepo > ~/.ssh/known_hosts
+          cat ~/.ssh/known_hosts
+
+          mkdir -p /src/repos/fakerepo
+          cd /srv/repos/fakerepo
+          rm -rf *
+          git init
+          echo -e '#!/bin/sh\necho fakerepo' > fakerepo.sh
+          cat fakerepo.sh
+          touch .git/git-daemon-export-ok
+          git add fakerepo.sh .git/git-daemon-export-ok
+          git commit -m fakerepo
+        '';
+        script = ''
+          git daemon --verbose --export-all --base-path=/srv/repos --reuseaddr
+        '';
+      };
+    };
+  };
+
+  testScript = ''
+    gitrepo.wait_for_unit("git-daemon.service")
+    gitrepo.wait_for_unit("multi-user.target")
+
+    with subtest("Repo is accessible via git daemon"):
+        bbmaster.wait_for_unit("network-online.target")
+        bbmaster.succeed("rm -rfv /tmp/fakerepo")
+        bbmaster.succeed("git clone git://gitrepo/fakerepo /tmp/fakerepo")
+
+    with subtest("Master service and worker successfully connect"):
+        bbmaster.wait_for_unit("buildbot-master.service")
+        bbmaster.wait_until_succeeds("curl --fail -s --head http://bbmaster:8010")
+        bbworker.wait_for_unit("network-online.target")
+        bbworker.succeed("nc -z bbmaster 8010")
+        bbworker.succeed("nc -z bbmaster 9989")
+        bbworker.wait_for_unit("buildbot-worker.service")
+
+    with subtest("Stop buildbot worker"):
+        bbmaster.succeed("systemctl -l --no-pager status buildbot-master")
+        bbmaster.succeed("systemctl stop buildbot-master")
+        bbworker.fail("nc -z bbmaster 8010")
+        bbworker.fail("nc -z bbmaster 9989")
+        bbworker.succeed("systemctl -l --no-pager status buildbot-worker")
+        bbworker.succeed("systemctl stop buildbot-worker")
+
+    with subtest("Buildbot daemon mode works"):
+        bbmaster.succeed(
+            "buildbot create-master /tmp",
+            "mv -fv /tmp/master.cfg.sample /tmp/master.cfg",
+            "sed -i 's/8010/8011/' /tmp/master.cfg",
+            "buildbot start /tmp",
+            "nc -z bbmaster 8011",
+        )
+        bbworker.wait_until_succeeds("curl --fail -s --head http://bbmaster:8011")
+        bbmaster.wait_until_succeeds("buildbot stop /tmp")
+        bbworker.fail("nc -z bbmaster 8011")
+  '';
+
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ nand0p ];
+} {}
diff --git a/nixpkgs/nixos/tests/buildkite-agents.nix b/nixpkgs/nixos/tests/buildkite-agents.nix
new file mode 100644
index 000000000000..a6f33e0143c5
--- /dev/null
+++ b/nixpkgs/nixos/tests/buildkite-agents.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "buildkite-agent";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ flokli ];
+  };
+
+  machine = { pkgs, ... }: {
+    services.buildkite-agents = {
+      one = {
+        privateSshKeyPath = (import ./ssh-keys.nix pkgs).snakeOilPrivateKey;
+        tokenPath = (pkgs.writeText "my-token" "5678");
+      };
+      two = {
+        tokenPath = (pkgs.writeText "my-token" "1234");
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    # we can't wait on the unit to start up, as we obviously can't connect to buildkite,
+    # but we can look whether files are set up correctly
+
+    machine.wait_for_file("/var/lib/buildkite-agent-one/buildkite-agent.cfg")
+    machine.wait_for_file("/var/lib/buildkite-agent-one/.ssh/id_rsa")
+
+    machine.wait_for_file("/var/lib/buildkite-agent-two/buildkite-agent.cfg")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/caddy.nix b/nixpkgs/nixos/tests/caddy.nix
new file mode 100644
index 000000000000..144d83179a16
--- /dev/null
+++ b/nixpkgs/nixos/tests/caddy.nix
@@ -0,0 +1,85 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "caddy";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ xfix ];
+  };
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.caddy.enable = true;
+      services.caddy.config = ''
+        http://localhost {
+          gzip
+
+          root ${
+            pkgs.runCommand "testdir" {} ''
+              mkdir "$out"
+              echo hello world > "$out/example.html"
+            ''
+          }
+        }
+      '';
+
+      specialisation.etag.configuration = {
+        services.caddy.config = lib.mkForce ''
+          http://localhost {
+            gzip
+
+            root ${
+              pkgs.runCommand "testdir2" {} ''
+                mkdir "$out"
+                echo changed > "$out/example.html"
+              ''
+            }
+          }
+        '';
+      };
+
+      specialisation.config-reload.configuration = {
+        services.caddy.config = ''
+          http://localhost:8080 {
+          }
+        '';
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    etagSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/etag";
+    justReloadSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/config-reload";
+  in ''
+    url = "http://localhost/example.html"
+    webserver.wait_for_unit("caddy")
+    webserver.wait_for_open_port("80")
+
+
+    def check_etag(url):
+        etag = webserver.succeed(
+            "curl -v '{}' 2>&1 | sed -n -e \"s/^< [Ee][Tt][Aa][Gg]: *//p\"".format(url)
+        )
+        etag = etag.replace("\r\n", " ")
+        http_code = webserver.succeed(
+            "curl -w \"%{{http_code}}\" -X HEAD -H 'If-None-Match: {}' {}".format(etag, url)
+        )
+        assert int(http_code) == 304, "HTTP code is not 304"
+        return etag
+
+
+    with subtest("check ETag if serving Nix store paths"):
+        old_etag = check_etag(url)
+        webserver.succeed(
+            "${etagSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.sleep(1)
+        new_etag = check_etag(url)
+        assert old_etag != new_etag, "Old ETag {} is the same as {}".format(
+            old_etag, new_etag
+        )
+
+    with subtest("config is reloaded on nixos-rebuild switch"):
+        webserver.succeed(
+            "${justReloadSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.wait_for_open_port("8080")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/cadvisor.nix b/nixpkgs/nixos/tests/cadvisor.nix
new file mode 100644
index 000000000000..60c04f147800
--- /dev/null
+++ b/nixpkgs/nixos/tests/cadvisor.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "cadvisor";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.cadvisor.enable = true;
+    };
+
+    influxdb = { lib, ... }: with lib; {
+      services.cadvisor.enable = true;
+      services.cadvisor.storageDriver = "influxdb";
+      services.influxdb.enable = true;
+    };
+  };
+
+  testScript =  ''
+      start_all()
+      machine.wait_for_unit("cadvisor.service")
+      machine.succeed("curl http://localhost:8080/containers/")
+
+      influxdb.wait_for_unit("influxdb.service")
+
+      # create influxdb database
+      influxdb.succeed(
+          'curl -XPOST http://localhost:8086/query --data-urlencode "q=CREATE DATABASE root"'
+      )
+
+      influxdb.wait_for_unit("cadvisor.service")
+      influxdb.succeed("curl http://localhost:8080/containers/")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/cage.nix b/nixpkgs/nixos/tests/cage.nix
new file mode 100644
index 000000000000..a6f73e00c066
--- /dev/null
+++ b/nixpkgs/nixos/tests/cage.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "cage";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ matthewbauer flokli ];
+  };
+
+  machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+    services.cage = {
+      enable = true;
+      user = "alice";
+      program = "${pkgs.xterm}/bin/xterm -cm -pc"; # disable color and bold to make OCR easier
+    };
+
+    # this needs a fairly recent kernel, otherwise:
+    #   [backend/drm/util.c:215] Unable to add DRM framebuffer: No such file or directory
+    #   [backend/drm/legacy.c:15] Virtual-1: Failed to set CRTC: No such file or directory
+    #   [backend/drm/util.c:215] Unable to add DRM framebuffer: No such file or directory
+    #   [backend/drm/legacy.c:15] Virtual-1: Failed to set CRTC: No such file or directory
+    #   [backend/drm/drm.c:618] Failed to initialize renderer on connector 'Virtual-1': initial page-flip failed
+    #   [backend/drm/drm.c:701] Failed to initialize renderer for plane
+    boot.kernelPackages = pkgs.linuxPackages_latest;
+
+    virtualisation.memorySize = 1024;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    with subtest("Wait for cage to boot up"):
+        start_all()
+        machine.wait_for_file("/run/user/${toString user.uid}/wayland-0.lock")
+        machine.wait_until_succeeds("pgrep xterm")
+        machine.wait_for_text("alice@machine")
+        machine.screenshot("screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/cassandra.nix b/nixpkgs/nixos/tests/cassandra.nix
new file mode 100644
index 000000000000..05607956a9d6
--- /dev/null
+++ b/nixpkgs/nixos/tests/cassandra.nix
@@ -0,0 +1,131 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  # Change this to test a different version of Cassandra:
+  testPackage = pkgs.cassandra;
+  clusterName = "NixOS Automated-Test Cluster";
+
+  testRemoteAuth = lib.versionAtLeast testPackage.version "3.11";
+  jmxRoles = [{ username = "me"; password = "password"; }];
+  jmxRolesFile = ./cassandra-jmx-roles;
+  jmxAuthArgs = "-u ${(builtins.elemAt jmxRoles 0).username} -pw ${(builtins.elemAt jmxRoles 0).password}";
+  jmxPort = 7200;  # Non-standard port so it doesn't accidentally work
+  jmxPortStr = toString jmxPort;
+
+  # Would usually be assigned to 512M.
+  # Set it to a different value, so that we can check whether our config
+  # actually changes it.
+  numMaxHeapSize = "400";
+  getHeapLimitCommand = ''
+    nodetool info -p ${jmxPortStr} | grep "^Heap Memory" | awk '{print $NF}'
+  '';
+  checkHeapLimitCommand = pkgs.writeShellScript "check-heap-limit.sh" ''
+    [ 1 -eq "$(echo "$(${getHeapLimitCommand}) < ${numMaxHeapSize}" | ${pkgs.bc}/bin/bc)" ]
+  '';
+
+  cassandraCfg = ipAddress:
+    { enable = true;
+      inherit clusterName;
+      listenAddress = ipAddress;
+      rpcAddress = ipAddress;
+      seedAddresses = [ "192.168.1.1" ];
+      package = testPackage;
+      maxHeapSize = "${numMaxHeapSize}M";
+      heapNewSize = "100M";
+      inherit jmxPort;
+    };
+  nodeCfg = ipAddress: extra: {pkgs, config, ...}: rec {
+    environment.systemPackages = [ testPackage ];
+    networking = {
+      firewall.allowedTCPPorts = [ 7000 9042 services.cassandra.jmxPort ];
+      useDHCP = false;
+      interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+        { address = ipAddress; prefixLength = 24; }
+      ];
+    };
+    services.cassandra = cassandraCfg ipAddress // extra;
+    virtualisation.memorySize = 1024;
+  };
+in
+{
+  name = "cassandra";
+  meta = {
+    maintainers = with lib.maintainers; [ johnazoidberg ];
+  };
+
+  nodes = {
+    cass0 = nodeCfg "192.168.1.1" {};
+    cass1 = nodeCfg "192.168.1.2" (lib.optionalAttrs testRemoteAuth { inherit jmxRoles; remoteJmx = true; });
+    cass2 = nodeCfg "192.168.1.3" { jvmOpts = [ "-Dcassandra.replace_address=cass1" ]; };
+  };
+
+  testScript = ''
+    # Check configuration
+    with subtest("Timers exist"):
+        cass0.succeed("systemctl list-timers | grep cassandra-full-repair.timer")
+        cass0.succeed("systemctl list-timers | grep cassandra-incremental-repair.timer")
+
+    with subtest("Can connect via cqlsh"):
+        cass0.wait_for_unit("cassandra.service")
+        cass0.wait_until_succeeds("nc -z cass0 9042")
+        cass0.succeed("echo 'show version;' | cqlsh cass0")
+
+    with subtest("Nodetool is operational"):
+        cass0.wait_for_unit("cassandra.service")
+        cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+        cass0.succeed("nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass0'")
+
+    with subtest("Cluster name was set"):
+        cass0.wait_for_unit("cassandra.service")
+        cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+        cass0.wait_until_succeeds(
+            "nodetool describecluster -p ${jmxPortStr} | grep 'Name: ${clusterName}'"
+        )
+
+    with subtest("Heap limit set correctly"):
+        # Nodetool takes a while until it can display info
+        cass0.wait_until_succeeds("nodetool info -p ${jmxPortStr}")
+        cass0.succeed("${checkHeapLimitCommand}")
+
+    # Check cluster interaction
+    with subtest("Bring up cluster"):
+        cass1.wait_for_unit("cassandra.service")
+        cass1.wait_until_succeeds(
+            "nodetool -p ${jmxPortStr} ${jmxAuthArgs} status | egrep -c '^UN' | grep 2"
+        )
+        cass0.succeed("nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass1'")
+  '' + lib.optionalString testRemoteAuth ''
+    with subtest("Remote authenticated jmx"):
+        # Doesn't work if not enabled
+        cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+        cass1.fail("nc -z 192.168.1.1 ${jmxPortStr}")
+        cass1.fail("nodetool -p ${jmxPortStr} -h 192.168.1.1 status")
+
+        # Works if enabled
+        cass1.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+        cass0.succeed("nodetool -p ${jmxPortStr} -h 192.168.1.2 ${jmxAuthArgs} status")
+  '' + ''
+    with subtest("Break and fix node"):
+        cass1.block()
+        cass0.wait_until_succeeds(
+            "nodetool status -p ${jmxPortStr} --resolve-ip | egrep -c '^DN[[:space:]]+cass1'"
+        )
+        cass0.succeed("nodetool status -p ${jmxPortStr} | egrep -c '^UN'  | grep 1")
+        cass1.unblock()
+        cass1.wait_until_succeeds(
+            "nodetool -p ${jmxPortStr} ${jmxAuthArgs} status | egrep -c '^UN'  | grep 2"
+        )
+        cass0.succeed("nodetool status -p ${jmxPortStr} | egrep -c '^UN'  | grep 2")
+
+    with subtest("Replace crashed node"):
+        cass1.block()  # .crash() waits until it's fully shutdown
+        cass2.start()
+        cass0.wait_until_fails(
+            "nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass1'"
+        )
+
+        cass2.wait_for_unit("cassandra.service")
+        cass0.wait_until_succeeds(
+            "nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass2'"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ceph-multi-node.nix b/nixpkgs/nixos/tests/ceph-multi-node.nix
new file mode 100644
index 000000000000..22fe5cada480
--- /dev/null
+++ b/nixpkgs/nixos/tests/ceph-multi-node.nix
@@ -0,0 +1,233 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+
+let
+  cfg = {
+    clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+    monA = {
+      name = "a";
+      ip = "192.168.1.1";
+    };
+    osd0 = {
+      name = "0";
+      ip = "192.168.1.2";
+      key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+      uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+    };
+    osd1 = {
+      name = "1";
+      ip = "192.168.1.3";
+      key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+      uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+    };
+    osd2 = {
+      name = "2";
+      ip = "192.168.1.4";
+      key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+      uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+    };
+  };
+  generateCephConfig = { daemonConfig }: {
+    enable = true;
+    global = {
+      fsid = cfg.clusterId;
+      monHost = cfg.monA.ip;
+      monInitialMembers = cfg.monA.name;
+    };
+  } // daemonConfig;
+
+  generateHost = { pkgs, cephConfig, networkConfig, ... }: {
+    virtualisation = {
+      memorySize = 512;
+      emptyDiskImages = [ 20480 ];
+      vlans = [ 1 ];
+    };
+
+    networking = networkConfig;
+
+    environment.systemPackages = with pkgs; [
+      bash
+      sudo
+      ceph
+      xfsprogs
+      netcat-openbsd
+    ];
+
+    boot.kernelModules = [ "xfs" ];
+
+    services.ceph = cephConfig;
+  };
+
+  networkMonA = {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = cfg.monA.ip; prefixLength = 24; }
+    ];
+    firewall = {
+      allowedTCPPorts = [ 6789 3300 ];
+      allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
+    };
+  };
+  cephConfigMonA = generateCephConfig { daemonConfig = {
+    mon = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    mgr = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+  }; };
+
+  networkOsd = osd: {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = osd.ip; prefixLength = 24; }
+    ];
+    firewall = {
+      allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
+    };
+  };
+
+  cephConfigOsd = osd: generateCephConfig { daemonConfig = {
+    osd = {
+      enable = true;
+      daemons = [ osd.name ];
+    };
+  }; };
+
+  # Following deployment is based on the manual deployment described here:
+  # https://docs.ceph.com/docs/master/install/manual-deployment/
+  # For other ways to deploy a ceph cluster, look at the documentation at
+  # https://docs.ceph.com/docs/master/
+  testscript = { ... }: ''
+    start_all()
+
+    monA.wait_for_unit("network.target")
+    osd0.wait_for_unit("network.target")
+    osd1.wait_for_unit("network.target")
+    osd2.wait_for_unit("network.target")
+
+    # Bootstrap ceph-mon daemon
+    monA.succeed(
+        "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+        "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+        "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+        "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+        "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+        "sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
+        "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+        "systemctl start ceph-mon-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+    monA.succeed("ceph mon enable-msgr2")
+
+    # Can't check ceph status until a mon is up
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
+    # Start the ceph-mgr daemon, it has no deps and hardly any setup
+    monA.succeed(
+        "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+        "systemctl start ceph-mgr-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mgr-a")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
+    # Send the admin keyring to the OSD machines
+    monA.succeed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared")
+    osd0.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+    osd1.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+    osd2.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+
+    # Bootstrap OSDs
+    osd0.succeed(
+        "mkfs.xfs /dev/vdb",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+        "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+        'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
+    )
+    osd1.succeed(
+        "mkfs.xfs /dev/vdb",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+        'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
+    )
+    osd2.succeed(
+        "mkfs.xfs /dev/vdb",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
+        'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
+    )
+
+    # Initialize the OSDs with regular filestore
+    osd0.succeed(
+        "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd0.name}",
+    )
+    osd1.succeed(
+        "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd1.name}",
+    )
+    osd2.succeed(
+        "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd2.name}",
+    )
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
+    monA.succeed(
+        "ceph osd pool create multi-node-test 128 128",
+        "ceph osd pool ls | grep 'multi-node-test'",
+        "ceph osd pool rename multi-node-test multi-node-other-test",
+        "ceph osd pool ls | grep 'multi-node-other-test'",
+    )
+    monA.wait_until_succeeds("ceph -s | grep '1 pools, 128 pgs'")
+    monA.succeed("ceph osd pool set multi-node-other-test size 2")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+    monA.wait_until_succeeds("ceph -s | grep '128 active+clean'")
+    monA.fail(
+        "ceph osd pool ls | grep 'multi-node-test'",
+        "ceph osd pool delete multi-node-other-test multi-node-other-test --yes-i-really-really-mean-it",
+    )
+
+    # Shut down ceph on all machines in a very unpolite way
+    monA.crash()
+    osd0.crash()
+    osd1.crash()
+    osd2.crash()
+
+    # Start it up
+    osd0.start()
+    osd1.start()
+    osd2.start()
+    monA.start()
+
+    # Ensure the cluster comes back up again
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+  '';
+in {
+  name = "basic-multi-node-ceph-cluster";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lejonet ];
+  };
+
+  nodes = {
+    monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
+    osd0 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd0; networkConfig = networkOsd cfg.osd0; };
+    osd1 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd1; networkConfig = networkOsd cfg.osd1; };
+    osd2 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd2; networkConfig = networkOsd cfg.osd2; };
+  };
+
+  testScript = testscript;
+})
diff --git a/nixpkgs/nixos/tests/ceph-single-node.nix b/nixpkgs/nixos/tests/ceph-single-node.nix
new file mode 100644
index 000000000000..01c4b4138451
--- /dev/null
+++ b/nixpkgs/nixos/tests/ceph-single-node.nix
@@ -0,0 +1,196 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+
+let
+  cfg = {
+    clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+    monA = {
+      name = "a";
+      ip = "192.168.1.1";
+    };
+    osd0 = {
+      name = "0";
+      key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+      uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+    };
+    osd1 = {
+      name = "1";
+      key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+      uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+    };
+    osd2 = {
+      name = "2";
+      key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+      uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+    };
+  };
+  generateCephConfig = { daemonConfig }: {
+    enable = true;
+    global = {
+      fsid = cfg.clusterId;
+      monHost = cfg.monA.ip;
+      monInitialMembers = cfg.monA.name;
+    };
+  } // daemonConfig;
+
+  generateHost = { pkgs, cephConfig, networkConfig, ... }: {
+    virtualisation = {
+      memorySize = 512;
+      emptyDiskImages = [ 20480 20480 20480 ];
+      vlans = [ 1 ];
+    };
+
+    networking = networkConfig;
+
+    environment.systemPackages = with pkgs; [
+      bash
+      sudo
+      ceph
+      xfsprogs
+    ];
+
+    boot.kernelModules = [ "xfs" ];
+
+    services.ceph = cephConfig;
+  };
+
+  networkMonA = {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = cfg.monA.ip; prefixLength = 24; }
+    ];
+  };
+  cephConfigMonA = generateCephConfig { daemonConfig = {
+    mon = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    mgr = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    osd = {
+      enable = true;
+      daemons = [ cfg.osd0.name cfg.osd1.name cfg.osd2.name ];
+    };
+  }; };
+
+  # Following deployment is based on the manual deployment described here:
+  # https://docs.ceph.com/docs/master/install/manual-deployment/
+  # For other ways to deploy a ceph cluster, look at the documentation at
+  # https://docs.ceph.com/docs/master/
+  testscript = { ... }: ''
+    start_all()
+
+    monA.wait_for_unit("network.target")
+
+    # Bootstrap ceph-mon daemon
+    monA.succeed(
+        "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+        "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+        "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+        "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+        "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+        "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+        "systemctl start ceph-mon-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+    monA.succeed("ceph mon enable-msgr2")
+
+    # Can't check ceph status until a mon is up
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
+    # Start the ceph-mgr daemon, after copying in the keyring
+    monA.succeed(
+        "sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
+        "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+        "systemctl start ceph-mgr-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mgr-a")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
+    # Bootstrap OSDs
+    monA.succeed(
+        "mkfs.xfs /dev/vdb",
+        "mkfs.xfs /dev/vdc",
+        "mkfs.xfs /dev/vdd",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+        "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "mount /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "mount /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
+        'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
+        'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
+        'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
+    )
+
+    # Initialize the OSDs with regular filestore
+    monA.succeed(
+        "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+        "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+        "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd0.name}",
+        "systemctl start ceph-osd-${cfg.osd1.name}",
+        "systemctl start ceph-osd-${cfg.osd2.name}",
+    )
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
+    monA.succeed(
+        "ceph osd pool create single-node-test 128 128",
+        "ceph osd pool ls | grep 'single-node-test'",
+        "ceph osd pool rename single-node-test single-node-other-test",
+        "ceph osd pool ls | grep 'single-node-other-test'",
+    )
+    monA.wait_until_succeeds("ceph -s | grep '1 pools, 128 pgs'")
+    monA.succeed(
+        "ceph osd getcrushmap -o crush",
+        "crushtool -d crush -o decrushed",
+        "sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
+        "crushtool -c modcrush -o recrushed",
+        "ceph osd setcrushmap -i recrushed",
+        "ceph osd pool set single-node-other-test size 2",
+    )
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+    monA.wait_until_succeeds("ceph -s | grep '128 active+clean'")
+    monA.fail(
+        "ceph osd pool ls | grep 'multi-node-test'",
+        "ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
+    )
+
+    # Shut down ceph by stopping ceph.target.
+    monA.succeed("systemctl stop ceph.target")
+
+    # Start it up
+    monA.succeed("systemctl start ceph.target")
+    monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+    monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
+
+    # Ensure the cluster comes back up again
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+  '';
+in {
+  name = "basic-single-node-ceph-cluster";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lejonet johanot ];
+  };
+
+  nodes = {
+    monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
+  };
+
+  testScript = testscript;
+})
diff --git a/nixpkgs/nixos/tests/certmgr.nix b/nixpkgs/nixos/tests/certmgr.nix
new file mode 100644
index 000000000000..ef32f54400e3
--- /dev/null
+++ b/nixpkgs/nixos/tests/certmgr.nix
@@ -0,0 +1,151 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+let
+  mkSpec = { host, service ? null, action }: {
+    inherit action;
+    authority = {
+      file = {
+        group = "nginx";
+        owner = "nginx";
+        path = "/tmp/${host}-ca.pem";
+      };
+      label = "www_ca";
+      profile = "three-month";
+      remote = "localhost:8888";
+    };
+    certificate = {
+      group = "nginx";
+      owner = "nginx";
+      path = "/tmp/${host}-cert.pem";
+    };
+    private_key = {
+      group = "nginx";
+      mode = "0600";
+      owner = "nginx";
+      path = "/tmp/${host}-key.pem";
+    };
+    request = {
+      CN = host;
+      hosts = [ host "www.${host}" ];
+      key = {
+        algo = "rsa";
+        size = 2048;
+      };
+      names = [
+        {
+          C = "US";
+          L = "San Francisco";
+          O = "Example, LLC";
+          ST = "CA";
+        }
+      ];
+    };
+    inherit service;
+  };
+
+  mkCertmgrTest = { svcManager, specs, testScript }: makeTest {
+    name = "certmgr-" + svcManager;
+    nodes = {
+      machine = { config, lib, pkgs, ... }: {
+        networking.firewall.allowedTCPPorts = with config.services; [ cfssl.port certmgr.metricsPort ];
+        networking.extraHosts = "127.0.0.1 imp.example.org decl.example.org";
+
+        services.cfssl.enable = true;
+        systemd.services.cfssl.after = [ "cfssl-init.service" "networking.target" ];
+
+        systemd.services.cfssl-init = {
+          description = "Initialize the cfssl CA";
+          wantedBy    = [ "multi-user.target" ];
+          serviceConfig = {
+            User             = "cfssl";
+            Type             = "oneshot";
+            WorkingDirectory = config.services.cfssl.dataDir;
+          };
+          script = ''
+            ${pkgs.cfssl}/bin/cfssl genkey -initca ${pkgs.writeText "ca.json" (builtins.toJSON {
+              hosts = [ "ca.example.com" ];
+              key = {
+                algo = "rsa"; size = 4096; };
+                names = [
+                  {
+                    C = "US";
+                    L = "San Francisco";
+                    O = "Internet Widgets, LLC";
+                    OU = "Certificate Authority";
+                    ST = "California";
+                  }
+                ];
+            })} | ${pkgs.cfssl}/bin/cfssljson -bare ca
+          '';
+        };
+
+        services.nginx = {
+          enable = true;
+          virtualHosts = lib.mkMerge (map (host: {
+            ${host} = {
+              sslCertificate = "/tmp/${host}-cert.pem";
+              sslCertificateKey = "/tmp/${host}-key.pem";
+              extraConfig = ''
+                ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
+              '';
+              onlySSL = true;
+              serverName = host;
+              root = pkgs.writeTextDir "index.html" "It works!";
+            };
+          }) [ "imp.example.org" "decl.example.org" ]);
+        };
+
+        systemd.services.nginx.wantedBy = lib.mkForce [];
+
+        systemd.services.certmgr.after = [ "cfssl.service" ];
+        services.certmgr = {
+          enable = true;
+          inherit svcManager;
+          inherit specs;
+        };
+
+      };
+    };
+    inherit testScript;
+  };
+in
+{
+  systemd = mkCertmgrTest {
+    svcManager = "systemd";
+    specs = {
+      decl = mkSpec { host = "decl.example.org"; service = "nginx"; action ="restart"; };
+      imp = toString (pkgs.writeText "test.json" (builtins.toJSON (
+        mkSpec { host = "imp.example.org"; service = "nginx"; action = "restart"; }
+      )));
+    };
+    testScript = ''
+      machine.wait_for_unit("cfssl.service")
+      machine.wait_until_succeeds("ls /tmp/decl.example.org-ca.pem")
+      machine.wait_until_succeeds("ls /tmp/decl.example.org-key.pem")
+      machine.wait_until_succeeds("ls /tmp/decl.example.org-cert.pem")
+      machine.wait_until_succeeds("ls /tmp/imp.example.org-ca.pem")
+      machine.wait_until_succeeds("ls /tmp/imp.example.org-key.pem")
+      machine.wait_until_succeeds("ls /tmp/imp.example.org-cert.pem")
+      machine.wait_for_unit("nginx.service")
+      assert 1 < int(machine.succeed('journalctl -u nginx | grep "Starting Nginx" | wc -l'))
+      machine.succeed("curl --cacert /tmp/imp.example.org-ca.pem https://imp.example.org")
+      machine.succeed("curl --cacert /tmp/decl.example.org-ca.pem https://decl.example.org")
+    '';
+  };
+
+  command = mkCertmgrTest {
+    svcManager = "command";
+    specs = {
+      test = mkSpec { host = "command.example.org"; action = "touch /tmp/command.executed"; };
+    };
+    testScript = ''
+      machine.wait_for_unit("cfssl.service")
+      machine.wait_until_succeeds("stat /tmp/command.executed")
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/tests/cfssl.nix b/nixpkgs/nixos/tests/cfssl.nix
new file mode 100644
index 000000000000..e291fc285fba
--- /dev/null
+++ b/nixpkgs/nixos/tests/cfssl.nix
@@ -0,0 +1,67 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "cfssl";
+
+  machine = { config, lib, pkgs, ... }:
+  {
+    networking.firewall.allowedTCPPorts = [ config.services.cfssl.port ];
+
+    services.cfssl.enable = true;
+    systemd.services.cfssl.after = [ "cfssl-init.service" ];
+
+    systemd.services.cfssl-init = {
+      description = "Initialize the cfssl CA";
+      wantedBy    = [ "multi-user.target" ];
+      serviceConfig = {
+        User             = "cfssl";
+        Type             = "oneshot";
+        WorkingDirectory = config.services.cfssl.dataDir;
+      };
+      script = with pkgs; ''
+        ${cfssl}/bin/cfssl genkey -initca ${pkgs.writeText "ca.json" (builtins.toJSON {
+          hosts = [ "ca.example.com" ];
+          key = {
+            algo = "rsa"; size = 4096; };
+            names = [
+              {
+                C = "US";
+                L = "San Francisco";
+                O = "Internet Widgets, LLC";
+                OU = "Certificate Authority";
+                ST = "California";
+              }
+            ];
+        })} | ${cfssl}/bin/cfssljson -bare ca
+      '';
+    };
+  };
+
+  testScript =
+  let
+    cfsslrequest = with pkgs; writeScript "cfsslrequest" ''
+      curl -X POST -H "Content-Type: application/json" -d @${csr} \
+        http://localhost:8888/api/v1/cfssl/newkey | ${cfssl}/bin/cfssljson /tmp/certificate
+    '';
+    csr = pkgs.writeText "csr.json" (builtins.toJSON {
+      CN = "www.example.com";
+      hosts = [ "example.com" "www.example.com" ];
+      key = {
+        algo = "rsa";
+        size = 2048;
+      };
+      names = [
+        {
+          C = "US";
+          L = "San Francisco";
+          O = "Example Company, LLC";
+          OU = "Operations";
+          ST = "California";
+        }
+      ];
+    });
+  in
+    ''
+      machine.wait_for_unit("cfssl.service")
+      machine.wait_until_succeeds("${cfsslrequest}")
+      machine.succeed("ls /tmp/certificate-key.pem")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/chromium.nix b/nixpkgs/nixos/tests/chromium.nix
new file mode 100644
index 000000000000..fc5d3a5c52fe
--- /dev/null
+++ b/nixpkgs/nixos/tests/chromium.nix
@@ -0,0 +1,266 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, channelMap ? {
+    stable = pkgs.chromium;
+    beta   = pkgs.chromiumBeta;
+    dev    = pkgs.chromiumDev;
+  }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+mapAttrs (channel: chromiumPkg: makeTest rec {
+  name = "chromium-${channel}";
+  meta = {
+    maintainers = with maintainers; [ aszlig ];
+    # https://github.com/NixOS/hydra/issues/591#issuecomment-435125621
+    inherit (chromiumPkg.meta) timeout;
+  };
+
+  enableOCR = true;
+
+  user = "alice";
+
+  machine.imports = [ ./common/user-account.nix ./common/x11.nix ];
+  machine.virtualisation.memorySize = 2047;
+  machine.test-support.displayManager.auto.user = user;
+  machine.environment.systemPackages = [ chromiumPkg ];
+
+  startupHTML = pkgs.writeText "chromium-startup.html" ''
+    <!DOCTYPE html>
+    <html>
+    <head>
+    <meta charset="UTF-8">
+    <title>Chromium startup notifier</title>
+    </head>
+    <body onload="javascript:document.title='startup done'">
+      <img src="file://${pkgs.fetchurl {
+        url = "http://nixos.org/logo/nixos-hex.svg";
+        sha256 = "07ymq6nw8kc22m7kzxjxldhiq8gzmc7f45kq2bvhbdm0w5s112s4";
+      }}" />
+    </body>
+    </html>
+  '';
+
+  testScript = let
+    xdo = name: text: let
+      xdoScript = pkgs.writeText "${name}.xdo" text;
+    in "${pkgs.xdotool}/bin/xdotool '${xdoScript}'";
+  in ''
+    import shlex
+    from contextlib import contextmanager, _GeneratorContextManager
+
+
+    # Run as user alice
+    def ru(cmd):
+        return "su - ${user} -c " + shlex.quote(cmd)
+
+
+    def create_new_win():
+        with machine.nested("Creating a new Chromium window"):
+            machine.execute(
+                ru(
+                    "${xdo "new-window" ''
+                      search --onlyvisible --name "startup done"
+                      windowfocus --sync
+                      windowactivate --sync
+                    ''}"
+                )
+            )
+            machine.execute(
+                ru(
+                    "${xdo "new-window" ''
+                      key Ctrl+n
+                    ''}"
+                )
+            )
+
+
+    def close_win():
+        def try_close(_):
+            machine.execute(
+                ru(
+                    "${xdo "close-window" ''
+                      search --onlyvisible --name "new tab"
+                      windowfocus --sync
+                      windowactivate --sync
+                    ''}"
+                )
+            )
+            machine.execute(
+                ru(
+                    "${xdo "close-window" ''
+                      key Ctrl+w
+                    ''}"
+                )
+            )
+            for _ in range(1, 20):
+                status, out = machine.execute(
+                    ru(
+                        "${xdo "wait-for-close" ''
+                          search --onlyvisible --name "new tab"
+                        ''}"
+                    )
+                )
+                if status != 0:
+                    return True
+                machine.sleep(1)
+                return False
+
+        retry(try_close)
+
+
+    def wait_for_new_win():
+        ret = False
+        with machine.nested("Waiting for new Chromium window to appear"):
+            for _ in range(1, 20):
+                status, out = machine.execute(
+                    ru(
+                        "${xdo "wait-for-window" ''
+                          search --onlyvisible --name "new tab"
+                          windowfocus --sync
+                          windowactivate --sync
+                        ''}"
+                    )
+                )
+                if status == 0:
+                    ret = True
+                    machine.sleep(10)
+                    break
+                machine.sleep(1)
+        return ret
+
+
+    def create_and_wait_for_new_win():
+        for _ in range(1, 3):
+            create_new_win()
+            if wait_for_new_win():
+                return True
+        assert False, "new window did not appear within 60 seconds"
+
+
+    @contextmanager
+    def test_new_win(description):
+        create_and_wait_for_new_win()
+        with machine.nested(description):
+            yield
+        close_win()
+
+
+    machine.wait_for_x()
+
+    url = "file://${startupHTML}"
+    machine.succeed(ru(f'ulimit -c unlimited; chromium "{url}" & disown'))
+    machine.wait_for_text("startup done")
+    machine.wait_until_succeeds(
+        ru(
+            "${xdo "check-startup" ''
+              search --sync --onlyvisible --name "startup done"
+              # close first start help popup
+              key -delay 1000 Escape
+              windowfocus --sync
+              windowactivate --sync
+            ''}"
+        )
+    )
+
+    create_and_wait_for_new_win()
+    machine.screenshot("empty_windows")
+    close_win()
+
+    machine.screenshot("startup_done")
+
+    with test_new_win("check sandbox"):
+        machine.succeed(
+            ru(
+                "${xdo "type-url" ''
+                  search --sync --onlyvisible --name "new tab"
+                  windowfocus --sync
+                  type --delay 1000 "chrome://sandbox"
+                ''}"
+            )
+        )
+
+        machine.succeed(
+            ru(
+                "${xdo "submit-url" ''
+                  search --sync --onlyvisible --name "new tab"
+                  windowfocus --sync
+                  key --delay 1000 Return
+                ''}"
+            )
+        )
+
+        machine.screenshot("sandbox_info")
+
+        machine.succeed(
+            ru(
+                "${xdo "find-window" ''
+                  search --sync --onlyvisible --name "sandbox status"
+                  windowfocus --sync
+                ''}"
+            )
+        )
+        machine.succeed(
+            ru(
+                "${xdo "copy-sandbox-info" ''
+                  key --delay 1000 Ctrl+a Ctrl+c
+                ''}"
+            )
+        )
+
+        clipboard = machine.succeed(
+            ru("${pkgs.xclip}/bin/xclip -o")
+        )
+
+        filters = [
+            "layer 1 sandbox.*namespace",
+            "pid namespaces.*yes",
+            "network namespaces.*yes",
+            "seccomp.*sandbox.*yes",
+            "you are adequately sandboxed",
+        ]
+        if not all(
+            re.search(filter, clipboard, flags=re.DOTALL | re.IGNORECASE)
+            for filter in filters
+        ):
+            assert False, f"sandbox not working properly: {clipboard}"
+
+        machine.sleep(1)
+        machine.succeed(
+            ru(
+                "${xdo "find-window-after-copy" ''
+                  search --onlyvisible --name "sandbox status"
+                ''}"
+            )
+        )
+
+        clipboard = machine.succeed(
+            ru(
+                "echo void | ${pkgs.xclip}/bin/xclip -i"
+            )
+        )
+        machine.succeed(
+            ru(
+                "${xdo "copy-sandbox-info" ''
+                  key --delay 1000 Ctrl+a Ctrl+c
+                ''}"
+            )
+        )
+
+        clipboard = machine.succeed(
+            ru("${pkgs.xclip}/bin/xclip -o")
+        )
+        if not all(
+            re.search(filter, clipboard, flags=re.DOTALL | re.IGNORECASE)
+            for filter in filters
+        ):
+            assert False, f"copying twice in a row does not work properly: {clipboard}"
+
+        machine.screenshot("after_copy_from_chromium")
+
+    machine.shutdown()
+  '';
+}) channelMap
diff --git a/nixpkgs/nixos/tests/cjdns.nix b/nixpkgs/nixos/tests/cjdns.nix
new file mode 100644
index 000000000000..d72236d415d4
--- /dev/null
+++ b/nixpkgs/nixos/tests/cjdns.nix
@@ -0,0 +1,121 @@
+let
+  carolKey = "2d2a338b46f8e4a8c462f0c385b481292a05f678e19a2b82755258cf0f0af7e2";
+  carolPubKey = "n932l3pjvmhtxxcdrqq2qpw5zc58f01vvjx01h4dtd1bb0nnu2h0.k";
+  carolPassword = "678287829ce4c67bc8b227e56d94422ee1b85fa11618157b2f591de6c6322b52";
+
+  basicConfig =
+    { ... }:
+    { services.cjdns.enable = true;
+
+      # Turning off DHCP isn't very realistic but makes
+      # the sequence of address assignment less stochastic.
+      networking.useDHCP = false;
+
+      # CJDNS output is incompatible with the XML log.
+      systemd.services.cjdns.serviceConfig.StandardOutput = "null";
+    };
+
+in
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "cjdns";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ehmry ];
+  };
+
+  nodes = { # Alice finds peers over over ETHInterface.
+      alice =
+        { ... }:
+        { imports = [ basicConfig ];
+
+          services.cjdns.ETHInterface.bind = "eth1";
+
+          services.httpd.enable = true;
+          services.httpd.adminAddr = "foo@example.org";
+          networking.firewall.allowedTCPPorts = [ 80 ];
+        };
+
+      # Bob explicitly connects to Carol over UDPInterface.
+      bob =
+        { ... }:
+
+        { imports = [ basicConfig ];
+
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = "192.168.0.2"; prefixLength = 24; }
+          ];
+
+          services.cjdns =
+            { UDPInterface =
+                { bind = "0.0.0.0:1024";
+                  connectTo."192.168.0.1:1024" =
+                    { password = carolPassword;
+                      publicKey = carolPubKey;
+                    };
+                };
+            };
+        };
+
+      # Carol listens on ETHInterface and UDPInterface,
+      # but knows neither Alice or Bob.
+      carol =
+        { ... }:
+        { imports = [ basicConfig ];
+
+          environment.etc."cjdns.keys".text = ''
+            CJDNS_PRIVATE_KEY=${carolKey}
+            CJDNS_ADMIN_PASSWORD=FOOBAR
+          '';
+
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = "192.168.0.1"; prefixLength = 24; }
+          ];
+
+          services.cjdns =
+            { authorizedPasswords = [ carolPassword ];
+              ETHInterface.bind = "eth1";
+              UDPInterface.bind = "192.168.0.1:1024";
+            };
+          networking.firewall.allowedUDPPorts = [ 1024 ];
+        };
+
+    };
+
+  testScript =
+    ''
+      import re
+
+      start_all()
+
+      alice.wait_for_unit("cjdns.service")
+      bob.wait_for_unit("cjdns.service")
+      carol.wait_for_unit("cjdns.service")
+
+
+      def cjdns_ip(machine):
+          res = machine.succeed("ip -o -6 addr show dev tun0")
+          ip = re.split("\s+|/", res)[3]
+          machine.log("has ip {}".format(ip))
+          return ip
+
+
+      alice_ip6 = cjdns_ip(alice)
+      bob_ip6 = cjdns_ip(bob)
+      carol_ip6 = cjdns_ip(carol)
+
+      # ping a few times each to let the routing table establish itself
+
+      alice.succeed("ping -c 4 {}".format(carol_ip6))
+      bob.succeed("ping -c 4 {}".format(carol_ip6))
+
+      carol.succeed("ping -c 4 {}".format(alice_ip6))
+      carol.succeed("ping -c 4 {}".format(bob_ip6))
+
+      alice.succeed("ping -c 4 {}".format(bob_ip6))
+      bob.succeed("ping -c 4 {}".format(alice_ip6))
+
+      alice.wait_for_unit("httpd.service")
+
+      bob.succeed("curl --fail -g http://[{}]".format(alice_ip6))
+    '';
+})
diff --git a/nixpkgs/nixos/tests/clickhouse.nix b/nixpkgs/nixos/tests/clickhouse.nix
new file mode 100644
index 000000000000..2d8a7cf7aa9f
--- /dev/null
+++ b/nixpkgs/nixos/tests/clickhouse.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "clickhouse";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ ma27 ];
+
+  machine = {
+    services.clickhouse.enable = true;
+  };
+
+  testScript =
+    let
+      # work around quote/substitution complexity by Nix, Perl, bash and SQL.
+      tableDDL = pkgs.writeText "ddl.sql" "CREATE TABLE `demo` (`value` FixedString(10)) engine = MergeTree PARTITION BY value ORDER BY tuple();";
+      insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');";
+      selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`";
+    in
+      ''
+        machine.start()
+        machine.wait_for_unit("clickhouse.service")
+        machine.wait_for_open_port(9000)
+
+        machine.succeed(
+            "cat ${tableDDL} | clickhouse-client"
+        )
+        machine.succeed(
+            "cat ${insertQuery} | clickhouse-client"
+        )
+        machine.succeed(
+            "cat ${selectQuery} | clickhouse-client | grep foo"
+        )
+      '';
+})
diff --git a/nixpkgs/nixos/tests/cloud-init.nix b/nixpkgs/nixos/tests/cloud-init.nix
new file mode 100644
index 000000000000..aafa6e24e84a
--- /dev/null
+++ b/nixpkgs/nixos/tests/cloud-init.nix
@@ -0,0 +1,52 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  metadataDrive = pkgs.stdenv.mkDerivation {
+    name = "metadata";
+    buildCommand = ''
+      mkdir -p $out/iso
+
+      cat << EOF > $out/iso/user-data
+      #cloud-config
+      write_files:
+      -   content: |
+                cloudinit
+          path: /tmp/cloudinit-write-file
+      EOF
+
+      cat << EOF > $out/iso/meta-data
+      instance-id: iid-local01
+      local-hostname: "test"
+      public-keys:
+          - "should be a key!"
+      EOF
+      ${pkgs.cdrkit}/bin/genisoimage -volid cidata -joliet -rock -o $out/metadata.iso $out/iso
+      '';
+  };
+in makeTest {
+  name = "cloud-init";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lewo ];
+  };
+  machine =
+    { ... }:
+    {
+      virtualisation.qemu.options = [ "-cdrom" "${metadataDrive}/metadata.iso" ];
+      services.cloud-init.enable = true;
+    };
+  testScript = ''
+      machine.start()
+      machine.wait_for_unit("cloud-init.service")
+      machine.succeed("cat /tmp/cloudinit-write-file | grep -q 'cloudinit'")
+
+      machine.wait_until_succeeds(
+          "cat /root/.ssh/authorized_keys | grep -q 'should be a key!'"
+      )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/cockroachdb.nix b/nixpkgs/nixos/tests/cockroachdb.nix
new file mode 100644
index 000000000000..d0cc5e19837c
--- /dev/null
+++ b/nixpkgs/nixos/tests/cockroachdb.nix
@@ -0,0 +1,124 @@
+# This performs a full 'end-to-end' test of a multi-node CockroachDB cluster
+# using the built-in 'cockroach workload' command, to simulate a semi-realistic
+# test load. It generally takes anywhere from 3-5 minutes to run and 1-2GB of
+# RAM (though each of 3 workers gets 2GB allocated)
+#
+# CockroachDB requires synchronized system clocks within a small error window
+# (~500ms by default) on each node in order to maintain a multi-node cluster.
+# Cluster joins that are outside this window will fail, and nodes that skew
+# outside the window after joining will promptly get kicked out.
+#
+# To accomodate this, we use QEMU/virtio infrastructure and load the 'ptp_kvm'
+# driver inside a guest. This driver allows the host machine to pass its clock
+# through to the guest as a hardware clock that appears as a Precision Time
+# Protocol (PTP) Clock device, generally /dev/ptp0. PTP devices can be measured
+# and used as hardware reference clocks (similar to an on-board GPS clock) by
+# NTP software. In our case, we use Chrony to synchronize to the reference
+# clock.
+#
+# This test is currently NOT enabled as a continuously-checked NixOS test.
+# Ideally, this test would be run by Hydra and Borg on all relevant changes,
+# except:
+#
+#   - Not every build machine is compatible with the ptp_kvm driver.
+#     Virtualized EC2 instances, for example, do not support loading the ptp_kvm
+#     driver into guests. However, bare metal builders (e.g. Packet) do seem to
+#     work just fine. In practice, this means x86_64-linux builds would fail
+#     randomly, depending on which build machine got the job. (This is probably
+#     worth some investigation; I imagine it's based on ptp_kvm's usage of paravirt
+#     support which may not be available in 'nested' environments.)
+#
+#   - ptp_kvm is not supported on aarch64, otherwise it seems likely Cockroach
+#     could be tested there, as well. This seems to be due to the usage of
+#     the TSC in ptp_kvm, which isn't supported (easily) on AArch64. (And:
+#     testing stuff, not just making sure it builds, is important to ensure
+#     aarch64 support remains viable.)
+#
+# For future developers who are reading this message, are daring and would want
+# to fix this, some options are:
+#
+#   - Just test a single node cluster instead (boring and less thorough).
+#   - Move all CI to bare metal packet builders, and we can at least do x86_64-linux.
+#   - Get virtualized clocking working in aarch64, somehow.
+#   - Add a 4th node that acts as an NTP service and uses no PTP clocks for
+#     references, at the client level. This bloats the node and memory
+#     requirements, but would probably allow both aarch64/x86_64 to work.
+#
+
+let
+
+  # Creates a node. If 'joinNode' parameter, a string containing an IP address,
+  # is non-null, then the CockroachDB server will attempt to join/connect to
+  # the cluster node specified at that address.
+  makeNode = locality: myAddr: joinNode:
+    { nodes, pkgs, lib, config, ... }:
+
+    {
+      # Bank/TPC-C benchmarks take some memory to complete
+      virtualisation.memorySize = 2048;
+
+      # Install the KVM PTP "Virtualized Clock" driver. This allows a /dev/ptp0
+      # device to appear as a reference clock, synchronized to the host clock.
+      # Because CockroachDB *requires* a time-synchronization mechanism for
+      # the system time in a cluster scenario, this is necessary to work.
+      boot.kernelModules = [ "ptp_kvm" ];
+
+      # Enable and configure Chrony, using the given virtualized clock passed
+      # through by KVM.
+      services.chrony.enable = true;
+      services.chrony.servers = lib.mkForce [ ];
+      services.chrony.extraConfig = ''
+        refclock PHC /dev/ptp0 poll 2 prefer require refid KVM
+        makestep 0.1 3
+      '';
+
+      # Enable CockroachDB. In order to ensure that Chrony has performed its
+      # first synchronization at boot-time (which may take ~10 seconds) before
+      # starting CockroachDB, we block the ExecStartPre directive using the
+      # 'waitsync' command. This ensures Cockroach doesn't have its system time
+      # leap forward out of nowhere during startup/execution.
+      #
+      # Note that the default threshold for NTP-based skew in CockroachDB is
+      # ~500ms by default, so making sure it's started *after* accurate time
+      # synchronization is extremely important.
+      services.cockroachdb.enable = true;
+      services.cockroachdb.insecure = true;
+      services.cockroachdb.openPorts = true;
+      services.cockroachdb.locality = locality;
+      services.cockroachdb.listen.address = myAddr;
+      services.cockroachdb.join = lib.mkIf (joinNode != null) joinNode;
+
+      systemd.services.chronyd.unitConfig.ConditionPathExists = "/dev/ptp0";
+
+      # Hold startup until Chrony has performed its first measurement (which
+      # will probably result in a full timeskip, thanks to makestep)
+      systemd.services.cockroachdb.preStart = ''
+        ${pkgs.chrony}/bin/chronyc waitsync
+      '';
+    };
+
+in import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "cockroachdb";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers;
+    [ thoughtpolice ];
+
+  nodes = {
+    node1 = makeNode "country=us,region=east,dc=1"  "192.168.1.1" null;
+    node2 = makeNode "country=us,region=west,dc=2b" "192.168.1.2" "192.168.1.1";
+    node3 = makeNode "country=eu,region=west,dc=2"  "192.168.1.3" "192.168.1.1";
+  };
+
+  # NOTE: All the nodes must start in order and you must NOT use startAll, because
+  # there's otherwise no way to guarantee that node1 will start before the others try
+  # to join it.
+  testScript = ''
+    for node in node1, node2, node3:
+        node.start()
+        node.wait_for_unit("cockroachdb")
+    node1.succeed(
+        "cockroach sql --host=192.168.1.1 --insecure -e 'SHOW ALL CLUSTER SETTINGS' 2>&1",
+        "cockroach workload init bank 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
+        "cockroach workload run bank --duration=1m 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/codimd.nix b/nixpkgs/nixos/tests/codimd.nix
new file mode 100644
index 000000000000..b1acbf4a8322
--- /dev/null
+++ b/nixpkgs/nixos/tests/codimd.nix
@@ -0,0 +1,52 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "codimd";
+
+  meta = with lib.maintainers; {
+    maintainers = [ willibutz ];
+  };
+
+  nodes = {
+    codimdSqlite = { ... }: {
+      services = {
+        codimd = {
+          enable = true;
+          configuration.dbURL = "sqlite:///var/lib/codimd/codimd.db";
+        };
+      };
+    };
+
+    codimdPostgres = { ... }: {
+      systemd.services.codimd.after = [ "postgresql.service" ];
+      services = {
+        codimd = {
+          enable = true;
+          configuration.dbURL = "postgres://codimd:snakeoilpassword@localhost:5432/codimddb";
+        };
+        postgresql = {
+          enable = true;
+          initialScript = pkgs.writeText "pg-init-script.sql" ''
+            CREATE ROLE codimd LOGIN PASSWORD 'snakeoilpassword';
+            CREATE DATABASE codimddb OWNER codimd;
+          '';
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("CodiMD sqlite"):
+        codimdSqlite.wait_for_unit("codimd.service")
+        codimdSqlite.wait_for_open_port(3000)
+        codimdSqlite.wait_until_succeeds("curl -sSf http://localhost:3000/new")
+
+    with subtest("CodiMD postgres"):
+        codimdPostgres.wait_for_unit("postgresql.service")
+        codimdPostgres.wait_for_unit("codimd.service")
+        codimdPostgres.wait_for_open_port(5432)
+        codimdPostgres.wait_for_open_port(3000)
+        codimdPostgres.wait_until_succeeds("curl -sSf http://localhost:3000/new")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/common/acme/client/default.nix b/nixpkgs/nixos/tests/common/acme/client/default.nix
new file mode 100644
index 000000000000..80893da02524
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/client/default.nix
@@ -0,0 +1,15 @@
+{ lib, nodes, pkgs, ... }:
+
+let
+  acme-ca = nodes.acme.config.test-support.acme.caCert;
+in
+
+{
+  security.acme = {
+    server = "https://acme.test/dir";
+    email = "hostmaster@example.test";
+    acceptTerms = true;
+  };
+
+  security.pki.certificateFiles = [ acme-ca ];
+}
diff --git a/nixpkgs/nixos/tests/common/acme/server/default.nix b/nixpkgs/nixos/tests/common/acme/server/default.nix
new file mode 100644
index 000000000000..1a0ee882572c
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/default.nix
@@ -0,0 +1,135 @@
+# The certificate for the ACME service is exported as:
+#
+#   config.test-support.acme.caCert
+#
+# This value can be used inside the configuration of other test nodes to inject
+# the snakeoil certificate into security.pki.certificateFiles or into package
+# overlays.
+#
+# Another value that's needed if you don't use a custom resolver (see below for
+# notes on that) is to add the acme node as a nameserver to every node
+# that needs to acquire certificates using ACME, because otherwise the API host
+# for acme.test can't be resolved.
+#
+# A configuration example of a full node setup using this would be this:
+#
+# {
+#   acme = import ./common/acme/server;
+#
+#   example = { nodes, ... }: {
+#     networking.nameservers = [
+#       nodes.acme.config.networking.primaryIPAddress
+#     ];
+#     security.pki.certificateFiles = [
+#       nodes.acme.config.test-support.acme.caCert
+#     ];
+#   };
+# }
+#
+# By default, this module runs a local resolver, generated using resolver.nix
+# from the parent directory to automatically discover all zones in the network.
+#
+# If you do not want this and want to use your own resolver, you can just
+# override networking.nameservers like this:
+#
+# {
+#   acme = { nodes, lib, ... }: {
+#     imports = [ ./common/acme/server ];
+#     networking.nameservers = lib.mkForce [
+#       nodes.myresolver.config.networking.primaryIPAddress
+#     ];
+#   };
+#
+#   myresolver = ...;
+# }
+#
+# Keep in mind, that currently only _one_ resolver is supported, if you have
+# more than one resolver in networking.nameservers only the first one will be
+# used.
+#
+# Also make sure that whenever you use a resolver from a different test node
+# that it has to be started _before_ the ACME service.
+{ config, pkgs, lib, ... }:
+
+
+let
+  snakeOilCerts = import ./snakeoil-certs.nix;
+
+  wfeDomain = "acme.test";
+  wfeCertFile = snakeOilCerts.${wfeDomain}.cert;
+  wfeKeyFile = snakeOilCerts.${wfeDomain}.key;
+
+  siteDomain = "acme.test";
+  siteCertFile = snakeOilCerts.${siteDomain}.cert;
+  siteKeyFile = snakeOilCerts.${siteDomain}.key;
+  pebble = pkgs.pebble;
+  resolver = let
+    message = "You need to define a resolver for the acme test module.";
+    firstNS = lib.head config.networking.nameservers;
+  in if config.networking.nameservers == [] then throw message else firstNS;
+
+  pebbleConf.pebble = {
+    listenAddress = "0.0.0.0:443";
+    managementListenAddress = "0.0.0.0:15000";
+    certificate = snakeOilCerts.${wfeDomain}.cert;
+    privateKey = snakeOilCerts.${wfeDomain}.key;
+    httpPort = 80;
+    tlsPort = 443;
+    ocspResponderURL = "http://0.0.0.0:4002";
+    strict = true;
+  };
+
+  pebbleConfFile = pkgs.writeText "pebble.conf" (builtins.toJSON pebbleConf);
+  pebbleDataDir = "/root/pebble";
+
+in {
+  imports = [ ../../resolver.nix ];
+
+  options.test-support.acme.caCert = lib.mkOption {
+    type = lib.types.path;
+    description = ''
+      A certificate file to use with the <literal>nodes</literal> attribute to
+      inject the snakeoil CA certificate used in the ACME server into
+      <option>security.pki.certificateFiles</option>.
+    '';
+  };
+
+  config = {
+    test-support = {
+      resolver.enable = let
+        isLocalResolver = config.networking.nameservers == [ "127.0.0.1" ];
+      in lib.mkOverride 900 isLocalResolver;
+      acme.caCert = snakeOilCerts.ca.cert;
+    };
+
+    # This has priority 140, because modules/testing/test-instrumentation.nix
+    # already overrides this with priority 150.
+    networking.nameservers = lib.mkOverride 140 [ "127.0.0.1" ];
+    networking.firewall.enable = false;
+
+    networking.extraHosts = ''
+      127.0.0.1 ${wfeDomain}
+      ${config.networking.primaryIPAddress} ${wfeDomain} ${siteDomain}
+    '';
+
+    systemd.services = {
+      pebble = {
+        enable = true;
+        description = "Pebble ACME server";
+        requires = [ ];
+        wantedBy = [ "network.target" ];
+        preStart = ''
+          mkdir ${pebbleDataDir}
+        '';
+        script = ''
+          cd ${pebbleDataDir}
+          ${pebble}/bin/pebble -config ${pebbleConfFile}
+        '';
+        serviceConfig = {
+          # Required to bind on privileged ports.
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/common/acme/server/mkcerts.nix b/nixpkgs/nixos/tests/common/acme/server/mkcerts.nix
new file mode 100644
index 000000000000..2474019cbac3
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/mkcerts.nix
@@ -0,0 +1,68 @@
+{ pkgs ? import <nixpkgs> {}
+, lib ? pkgs.lib
+, domains ? [ "acme.test" ]
+}:
+
+pkgs.runCommand "acme-snakeoil-ca" {
+  nativeBuildInputs = [ pkgs.openssl ];
+} ''
+  addpem() {
+    local file="$1"; shift
+    local storeFileName="$(IFS=.; echo "$*")"
+
+    echo -n "  " >> "$out"
+
+    # Every following argument is an attribute, so let's recurse and check
+    # every attribute whether it must be quoted and write it into $out.
+    while [ -n "$1" ]; do
+      if expr match "$1" '^[a-zA-Z][a-zA-Z0-9]*$' > /dev/null; then
+        echo -n "$1" >> "$out"
+      else
+        echo -n '"' >> "$out"
+        echo -n "$1" | sed -e 's/["$]/\\&/g' >> "$out"
+        echo -n '"' >> "$out"
+      fi
+      shift
+      [ -z "$1" ] || echo -n . >> "$out"
+    done
+
+    echo " = builtins.toFile \"$storeFileName\" '''" >> "$out"
+    sed -e 's/^/    /' "$file" >> "$out"
+
+    echo "  ''';" >> "$out"
+  }
+
+  echo '# Generated via mkcert.sh in the same directory.' > "$out"
+  echo '{' >> "$out"
+
+  openssl req -newkey rsa:4096 -x509 -sha256 -days 36500 \
+    -subj '/CN=Snakeoil CA' -nodes -out ca.pem -keyout ca.key
+
+  addpem ca.key ca key
+  addpem ca.pem ca cert
+
+  ${lib.concatMapStrings (fqdn: let
+    opensslConfig = pkgs.writeText "snakeoil.cnf" ''
+      [req]
+      default_bits = 4096
+      prompt = no
+      default_md = sha256
+      req_extensions = req_ext
+      distinguished_name = dn
+      [dn]
+      CN = ${fqdn}
+      [req_ext]
+      subjectAltName = DNS:${fqdn}
+    '';
+  in ''
+    export OPENSSL_CONF=${lib.escapeShellArg opensslConfig}
+    openssl genrsa -out snakeoil.key 4096
+    openssl req -new -key snakeoil.key -out snakeoil.csr
+    openssl x509 -req -in snakeoil.csr -sha256 -set_serial 666 \
+      -CA ca.pem -CAkey ca.key -out snakeoil.pem -days 36500
+    addpem snakeoil.key ${lib.escapeShellArg fqdn} key
+    addpem snakeoil.pem ${lib.escapeShellArg fqdn} cert
+  '') domains}
+
+  echo '}' >> "$out"
+''
diff --git a/nixpkgs/nixos/tests/common/acme/server/mkcerts.sh b/nixpkgs/nixos/tests/common/acme/server/mkcerts.sh
new file mode 100755
index 000000000000..cc7f8ca650dd
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/mkcerts.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env nix-shell
+#!nix-shell -p nix bash -i bash
+set -e
+cd "$(dirname "$0")"
+storepath="$(nix-build --no-out-link mkcerts.nix)"
+cat "$storepath" > snakeoil-certs.nix
diff --git a/nixpkgs/nixos/tests/common/acme/server/snakeoil-certs.nix b/nixpkgs/nixos/tests/common/acme/server/snakeoil-certs.nix
new file mode 100644
index 000000000000..fd537c3260f1
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/snakeoil-certs.nix
@@ -0,0 +1,171 @@
+# Generated via mkcert.sh in the same directory.
+{
+  ca.key = builtins.toFile "ca.key" ''
+    -----BEGIN PRIVATE KEY-----
+    MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDCnVZGEn68ezXl
+    DWE5gjsCPqutR4nxw/wvIbAxB2Vk2WeQ6HGvt2Jdrz5qer2IXd76YtpQeqd+ffet
+    aLtMeFTr+Xy9yqEpx2AfvmEEcLnuiWbsUGZzsHwW7/4kPgAFBy9TwJn/k892lR6u
+    QYa0QS39CX85kLMZ/LZXUyClIBa+IxT1OovmGqMOr4nGASRQP6d/nnyn41Knat/d
+    tpyaa5zgfYwA6YW6UxcywvBSpMOXM0/82BFZGyALt3nQ+ffmrtKcvMjsNLBFaslV
+    +zYO1PMbLbTCW8SmJTjhzuapXtBHruvoe24133XWlvcP1ylaTx0alwiQWJr1XEOU
+    WLEFTgOTeRyiVDxDunpz+7oGcwzcdOG8nCgd6w0aYaECz1zvS3FYTQz+MiqmEkx6
+    s4bj1U90I0kwUJbeWjjrGO7Y9Qq4i19GafDg7cAMn9eHCiNbNrPj6t/gfaVbCrbk
+    m3ZVjkvLTQ2mb2lv7+tVii45227iNPuNS6lx2FVlr/DXiRrOVfghPvoOxUfXzogJ
+    hZLV4Zki+ycbGQa5w8YMDYCv4c08dKA7AatVhNS60c1zgQNjuWF3BvocSySyGUon
+    VT6h1DYlJ9YAqgqNpedgNR9kpp034SMhB7dj9leB6LRMA+c1fG/T+1lDbkA+vope
+    pt4+30oDcCTYfEifl1HwqNw/bXDm1wIDAQABAoICABPbd/UYaAQVUk93yQbUKe81
+    s9CvbvzTMYUhm9e02Hyszitz/D2gqZHDksvMkFA8u8aylXIGwdZfRglUmV/ZG1kk
+    kLzQ0xbvN/ilNUL9uYsETBMqtPly9YZloHnUNa5NqF+UVGJGk7GWz5WaLANybx3V
+    fTzDbfLl3TkVy0vt9UQbUkUfXyzwZNjXwmgIr8rcY9vasP90a3eXqRX3Tw1Wk6A4
+    TzO8oB994O0WBO150Fc6Lhwvc72yzddENlLDXq8UAXtqq9mmGqJKnhZ+1mo3AkMw
+    q7P1JyCIxcAMm26GtRvLVljXV0x5640kxDrCin6jeeW/qWkJEW6dpmuZjR5scmLI
+    /9n8H+fGzdZH8bOPPotMy12doj3vJqvew3p0eIkmVctYMJKD0j/CWjvKJNE3Yx4O
+    Ls47X/dEypX6anR1HQUXcpd6JfRWdIJANo2Duaz+HYbyA88bHcJL9shFYcjLs3sX
+    R/TvnnKHvw/ud7XBgvLGwGAf/cDEuLI2tv+V7tkMGrMUv+gUJNZaJaCpdt+1iUwO
+    QFq8APyBNn6FFw54TwXWfSjfSNh3geIMLHuErYVu9MIXvB7Yhh+ZvLcfLbmckhAX
+    wb39RRHnCWvnw5Bm9hnsDhqfDsIoP+2wvUkViyHOmrKi8nSJhSk19C8AuQtSVcJg
+    5op+epEmjt70GHt52nuBAoIBAQD2a4Ftp4QxWE2d6oAFI6WPrX7nAwI5/ezCbO/h
+    yoYAn6ucTVnn5/5ITJ8V4WTWZ4lkoZP3YSJiCyBhs8fN63J+RaJ/bFRblHDns1HA
+    2nlMVdNLg6uOfjgUJ8Y6xVM0J2dcFtwIFyK5pfZ7loxMZfvuovg74vDOi2vnO3dO
+    16DP3zUx6B/yIt57CYn8NWTq+MO2bzKUnczUQRx0yEzPOfOmVbcqGP8f7WEdDWXm
+    7scjjN53OPyKzLOVEhOMsUhIMBMO25I9ZpcVkyj3/nj+fFLf/XjOTM00M/S/KnOj
+    RwaWffx6mSYS66qNc5JSsojhIiYyiGVEWIznBpNWDU35y/uXAoIBAQDKLj0dyig2
+    kj1r3HvdgK4sRULqBQFMqE9ylxDmpJxAj6/A8hJ0RCBR57vnIIZMzK4+6K0l3VBJ
+    ukzXJHJLPkZ0Uuo2zLuRLkyjBECH6KYznyTkUVRn50Oq6IoP6WTCfd3Eg+7AKYY1
+    VFo2iR8sxeSQQ+AylFy6QcQ1xPIW30Jj1/LFjrRdRggapPEekpJec0pEqhasT8rR
+    UFhRL2NdZnL5b7ZlsJc7gZKEJgNfxgzaCzloqLcjCgGpOhLKx0fFsNOqHcbIGMwG
+    6wQCOyNghQJ6AZtRD5TYCJow92FchWjoTIaMJ8RjMKQmxpiwM6wQG4J78Hd3mbhf
+    q0hiQhPHaNbBAoIBAFeIeMFq8BpXM7sUwcURlI4lIx8Mgo33FVM7PzsFpfQyw9MR
+    5w3p6vnjvd8X4aoHvVZxzw3hA0WwjiAmrKMJL/KK6d45rP2bDUBBAplvAgeLtTLt
+    4tMLIwCF4HSgA55TIPQlaqO1FDC+M4BTSiMZVxS970/WnZPBEuNgzFDFZ+pvb4X6
+    3t40ZLNwAAQHM4IEPAFiHqWMKGZ9eo5BWIeEHnjHmfjqSDYfLJAVYk1WJIcMUzom
+    lA76CBC8CxW/I94AtcRhWuFUv/Z5/+OYEYLUxtuqPm+J+JrCmf4OJmWppT1wI2+p
+    V00BSeRVWXTm1piieM8ahF5y1hp6y3uV3k0NmKECggEBAMC42Ms3s6NpPSE+99eJ
+    3P0YPJOkl7uByNGbTKH+kW89SDRsy8iGVCSe9892gm5cwU/4LWyljO3qp2qBNG2i
+    /DfP/bCk8bqPXsAZwoWK8DrO3bTCDepJWYhlx40pVkHLBwVXGdOVAXh+YswPY2cj
+    cB9QhDrSj52AKU9z36yLvtY7uBA3Wph6tCjpx2n0H4/m6AmR9LDmEpf5tWYV/OrA
+    SKKaqUw/y7kOZyKOtbKqr/98qYmpIYFF/ZVZZSZkVXcNeoZzgdOlR37ksVqLEsrj
+    nxu7wli/uItBj/FTLjyqcvjUUYDyO1KtwBuyPUPgzYhBIN2Rt9+K6WRQelwnToFL
+    30ECggEBALzozykZj2sr3z8tQQRZuXLGotUFGsQCB8ikeqoeB8FbNNkC+qgflQGv
+    zLRB2KWOvnboc94wVgBJH43xG0HBibZnBhUO8/HBI/WlmyEj9KQ/ZskUK4GVZkB6
+    r/81ASLwH+P/rqrLEjcp1SIPPevjzCWD9VYR5m/qPHLNxStwGSrPjtPzgaFxhq84
+    Jl+YVmNqVlrOKYYfIPh8exPLiTti3wfM61pVYFv56PI2gd5ysMWYnuN+vK0sbmZh
+    cIWwykcKlODIngI7IzYqt8NuIJI0jrYyHgtUw4jaJzdF4mEOplGONxdz15jAGHtg
+    JUsBXFNz132nP4iIr3UKrPedQZijSi4=
+    -----END PRIVATE KEY-----
+  '';
+  ca.cert = builtins.toFile "ca.cert" ''
+    -----BEGIN CERTIFICATE-----
+    MIIFDzCCAvegAwIBAgIUTRDYSWJvmlhwIR3pzVrIQfnboLEwDQYJKoZIhvcNAQEL
+    BQAwFjEUMBIGA1UEAwwLU25ha2VvaWwgQ0EwIBcNMjAwMzIyMjI1NjE3WhgPMjEy
+    MDAyMjcyMjU2MTdaMBYxFDASBgNVBAMMC1NuYWtlb2lsIENBMIICIjANBgkqhkiG
+    9w0BAQEFAAOCAg8AMIICCgKCAgEAwp1WRhJ+vHs15Q1hOYI7Aj6rrUeJ8cP8LyGw
+    MQdlZNlnkOhxr7diXa8+anq9iF3e+mLaUHqnfn33rWi7THhU6/l8vcqhKcdgH75h
+    BHC57olm7FBmc7B8Fu/+JD4ABQcvU8CZ/5PPdpUerkGGtEEt/Ql/OZCzGfy2V1Mg
+    pSAWviMU9TqL5hqjDq+JxgEkUD+nf558p+NSp2rf3bacmmuc4H2MAOmFulMXMsLw
+    UqTDlzNP/NgRWRsgC7d50Pn35q7SnLzI7DSwRWrJVfs2DtTzGy20wlvEpiU44c7m
+    qV7QR67r6HtuNd911pb3D9cpWk8dGpcIkFia9VxDlFixBU4Dk3kcolQ8Q7p6c/u6
+    BnMM3HThvJwoHesNGmGhAs9c70txWE0M/jIqphJMerOG49VPdCNJMFCW3lo46xju
+    2PUKuItfRmnw4O3ADJ/XhwojWzaz4+rf4H2lWwq25Jt2VY5Ly00Npm9pb+/rVYou
+    Odtu4jT7jUupcdhVZa/w14kazlX4IT76DsVH186ICYWS1eGZIvsnGxkGucPGDA2A
+    r+HNPHSgOwGrVYTUutHNc4EDY7lhdwb6HEskshlKJ1U+odQ2JSfWAKoKjaXnYDUf
+    ZKadN+EjIQe3Y/ZXgei0TAPnNXxv0/tZQ25APr6KXqbePt9KA3Ak2HxIn5dR8Kjc
+    P21w5tcCAwEAAaNTMFEwHQYDVR0OBBYEFCIoeYSYjtMiPrmxfHmcrsZkyTpvMB8G
+    A1UdIwQYMBaAFCIoeYSYjtMiPrmxfHmcrsZkyTpvMA8GA1UdEwEB/wQFMAMBAf8w
+    DQYJKoZIhvcNAQELBQADggIBAHPdwOgAxyhIhbqFObNftW8K3sptorB/Fj6jwYCm
+    mHleFueqQnjTHMWsflOjREvQp1M307FWooGj+KQkjwvAyDc/Hmy7WgJxBg9p3vc+
+    /Xf/e7ZfBl8rv7vH8VXW/BC1vVsILdFncrgTrP8/4psV50/cl1F4+nPBiekvvxwZ
+    k+R7SgeSvcWT7YlOG8tm1M3al4F4mWzSRkYjkrXmwRCKAiya9xcGSt0Bob+LoM/O
+    mpDGV/PMC1WAoDc1mMuXN2hSc0n68xMcuFs+dj/nQYn8uL5pzOxpX9560ynKyLDv
+    yOzQlM2VuZ7H2hSIeYOFgrtHJJwhDtzjmUNDQpQdp9Fx+LONQTS1VLCTXND2i/3F
+    10X6PkdnLEn09RiPt5qy20pQkICxoEydmlwpFs32musYfJPdBPkZqZWrwINBv2Wb
+    HfOmEB4xUvXufZ5Ju5icgggBkyNA3PCLo0GZFRrMtvA7i9IXOcXNR+njhKa9246V
+    QQfeWiz05RmIvgShJYVsnZWtael8ni366d+UXypBYncohimyNlAD1n+Bh3z0PvBB
+    +FK4JgOSeouM4SuBHdwmlZ/H0mvfUG81Y8Jbrw0yuRHtuCtX5HpN5GKpZPHDE7aQ
+    fEShVB/GElC3n3DvgK9OJBeVVhYQgUEfJi4rsSxt3cdEI0NrdckUoZbApWVJ3CBc
+    F8Y7
+    -----END CERTIFICATE-----
+  '';
+  "acme.test".key = builtins.toFile "acme.test.key" ''
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIJKAIBAAKCAgEAlgQTZjKfs3aHw0J993k7jFAs+hVRPf//zHMAiUkPKUYPTSl1
+    TxS/bPbhWzSoom00j4SLhGGGhbd+lnvTg0uvKbxskgATfw5clbm1ZN+gx4DuxwjL
+    V3xIxpeSY+PKzs5z8w/k+AJh+zOPyXwH3ut3C+ogp1S/5IhmzV3a/yU/6k0zpGxj
+    N6ZPRTXFrz93I1pPeCkJz90l7tj+2uFc9xtM20NQX52f0Y2oShcG8fKdNZVzuHHk
+    ZXkrZIhou55/nRy2jKgFeD3GQQfa9rwPWrVybQ6tKMMkoazB/Unky9xcTI2LJarf
+    xgHDO9v9yFBvmR4UM8B3kM82NHoENtHaZ2mmiMGZzTEQlf8xwYyHFrqBFIVRWEUr
+    7rr/O5Qr9gIN0T4u367HCexVYAKzbO2P9h75czzjMMoGkbXze9SMQ/ikrxEmwAHg
+    r1Xxh6iQYmgPNk8AR3d9+o2I7WJZMUYZARLnuhVr9BNXv510iqZTqX8lcyL5fEj3
+    ST4Ab+H7rfevZt6NU26iJLBYAjrA2mSvH+wvkboxrgSS8xYPkOW8NLNEbbodzofI
+    pB+SaK53OIk0bj9c1YAgrSNER/TDTgDXrWUNrlfVZ/M7+AEdeU06wi7sVhVif6OB
+    D3OpgKSNjeE6TuJH80Pi5MWugSFBr792Xb6uhVoPiVOFN+qiGB6UkwBgSKkCAwEA
+    AQKCAgAmN7OZfZwh5DiCDhZ5TXFWNba/n16rJOTN+R5R20L5iNetGLrCAs8hu2N+
+    ENRFTPzu8x14BEB5IF4niDRCZq2hPFeMemh9HfOIUV9c63vSV459NkhXaVpA/axV
+    tlqchQwVCB+U70Z28JPZCLgYmnQhnOvktTqNxhIqj5aTGbJGxpQ5d0Nvkfbv8tsB
+    4nE/mGpWel39jqFzT+Tdbjx414Ok+GkpcsacZDJTbbpfOSfD1uc8PgepskzTt8y2
+    v5JTPFVlUAjUsSgouQ+XfCGNQlx8XBjRIaXbal+hX4niRald91FTr0yC7UAHp+vn
+    dFZ586fB526OfbuZctxP+vZhEhFSseQKxHQ0tB8me81xH44daVNr9PPUM69FDT3j
+    ygJaUJjNEG3vVzePCDzhmxTmz2/rAClp77WTWziBWDoA6YWDDzhgNPrXWzLIbZIx
+    ue9ZbGEOh/u5ZzrEXxKCz9FjDe9wQu3TeYUe0M+ejzwWgn7zdWDvjjmtLUUuun2Y
+    wW7WANpu32qvB/V+qssw4O63tbRiwneRCnb8AF2ixgyWr6xyZwch4kacv1KMiixf
+    gO/5GTj7ba5GcdGoktJb29cUEgz13yPd106RsHK4vcggFxfMbOVauNRIo6ddLwyS
+    8UMxLf2i2cToOLkHZrIb8FgimmzRoBd3yYzwVJBydiVcsrHQAQKCAQEAxlzFYCiQ
+    hjEtblGnrkOC7Hx6HvqMelViOkGN8Y9VczG4GhwntmSE2nbpaAKhFBGdLfuSI3tJ
+    Lf24f0IGgAhzPmpo2TjbxPO3YSKFTH71fznVBhtQ1iSxwZ1InXktnuhot6VSDx6A
+    sbHSy1hMFy3nj+Zj5+fQ89tclzBzG9bCShaauO39KrPMwKi6CYoYdGhXBC3+OpHY
+    zBNvmDTxG2kW8L42rlf14EH4pAlgKs4eeZbpcbZ6fXURP2hToHJ8swyKw/1p12WA
+    cc19BKFJXL8nNP4uCf/fI0mVYpytz5KwUzG+z+umDqk+RRCH4mNB28xvEEuEyp/e
+    /C5Is+WrlDAA6QKCAQEAwZsK4AJ/w4Xf4Q/SsnZJO9bfP1ejJjzKElt8rG28JXeb
+    +FjykZZ6vw2gt2Boest2n9N4fBwaRkaHVtVS4iAmaDXozTlcvCLs2rVjPSguuQtW
+    80CKg6+dux+6gFN8IGzDCiX3pWUnhhiXvCcRYEcvgpH6GA5vuCNrXrjH0JFC0kef
+    aaDMGMTbzhc2IIRztmWU4v8YJSSy5KOkIQLWO+7u9aGx9IqT5/z3gx3XrItyl0Bk
+    aQmZEh7JOSyhmGhhf5LdeTLu2YgRw3/tzS+lPMX3+UPw99k9MdTOFn2pww5AdRmg
+    aBIzV+/LBYG0pPRl0D8/6yzGVBPuUDQpmK9Z3gsxwQKCAQEAnNkMZN2Ocd1+6+V7
+    LmtJog9HTSmWXMEZG7FsOJ661Yxx44txx2IyPsCaDNlPXxwSaiKrSo0Yr1oZQd8G
+    XsTPw4HGiETSWijQTulJ99PH8SLck6iTwdBgEhV5LrN75FQnQVdizHu1DUzrvkiC
+    Wi29FWb6howiCEDjNNVln5SwKn83NpVQgyyK8ag4+oQMlDdQ3wgzJ0Ld53hS3Eq4
+    f5EYR6JQgIki7YGcxrB3L0GujTxMONMuhfdEfRvUTGFawwVe0FyYDW7AIrx2Z2vV
+    I5YuvVNjOhrt6OwtSD1VnnWCITaLh8LwmlUu3NOWbudHUzKSe5MLXGEPo95BNKad
+    hl5yyQKCAQBNo0gMJtRnawMpdLfwewDJL1SdSR6S0ePS0r8/Qk4l1D5GrByyB183
+    yFY/0zhyra7nTt1NH9PlhJj3WFqBdZURSzUNP0iR5YuH9R9Twg5ihEqdB6/EOSOO
+    i521okTvl83q/ui9ecAMxUXr3NrZ+hHyUWmyRe/FLub6uCzg1a+vNauWpzXRZPgk
+    QCijh5oDdd7r3JIpKvtWNs01s7aHmDxZYjtDrmK7sDTtboUzm0QbpWXevUuV+aSF
+    +gDfZlRa3WFVHfisYSWGeYG6O7YOlfDoE7fJHGOu3QC8Ai6Wmtt8Wgd6VHokdHO8
+    xJPVZnCBvyt5up3Zz5hMr25S3VazdVfBAoIBAHVteqTGqWpKFxekGwR0RqE30wmN
+    iIEwFhgOZ8sQ+6ViZJZUR4Nn2fchn2jVwF8V8J1GrJbTknqzAwdXtO3FbgfmmyF2
+    9VbS/GgomXhA9vJkM4KK3Iwo/y/nE9hRhtzuVE0QPudz2fyfaDgnWjcNM59064tH
+    88361LVJm3ixyWSBD41UZ7NgWWJX1y2f073vErsfcPpavF5lhn1oSkQnOlgMJsnl
+    24qeuzAgTWu/2rFpIA2EK30Bgvsl3pjJxHwyNDAgklV7C783LIoAHi7VO7tzZ6iF
+    dmD5XLfcUZc3eaB7XehNQKBXDGLJeI5AFmjsHka5GUoitkU2PFrg/3+nJmg=
+    -----END RSA PRIVATE KEY-----
+  '';
+  "acme.test".cert = builtins.toFile "acme.test.cert" ''
+    -----BEGIN CERTIFICATE-----
+    MIIEoTCCAokCAgKaMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNVBAMMC1NuYWtlb2ls
+    IENBMCAXDTIwMDMyMjIyNTYxOFoYDzIxMjAwMjI3MjI1NjE4WjAUMRIwEAYDVQQD
+    DAlhY21lLnRlc3QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCWBBNm
+    Mp+zdofDQn33eTuMUCz6FVE9///McwCJSQ8pRg9NKXVPFL9s9uFbNKiibTSPhIuE
+    YYaFt36We9ODS68pvGySABN/DlyVubVk36DHgO7HCMtXfEjGl5Jj48rOznPzD+T4
+    AmH7M4/JfAfe63cL6iCnVL/kiGbNXdr/JT/qTTOkbGM3pk9FNcWvP3cjWk94KQnP
+    3SXu2P7a4Vz3G0zbQ1BfnZ/RjahKFwbx8p01lXO4ceRleStkiGi7nn+dHLaMqAV4
+    PcZBB9r2vA9atXJtDq0owyShrMH9SeTL3FxMjYslqt/GAcM72/3IUG+ZHhQzwHeQ
+    zzY0egQ20dpnaaaIwZnNMRCV/zHBjIcWuoEUhVFYRSvuuv87lCv2Ag3RPi7frscJ
+    7FVgArNs7Y/2HvlzPOMwygaRtfN71IxD+KSvESbAAeCvVfGHqJBiaA82TwBHd336
+    jYjtYlkxRhkBEue6FWv0E1e/nXSKplOpfyVzIvl8SPdJPgBv4fut969m3o1TbqIk
+    sFgCOsDaZK8f7C+RujGuBJLzFg+Q5bw0s0Rtuh3Oh8ikH5Jornc4iTRuP1zVgCCt
+    I0RH9MNOANetZQ2uV9Vn8zv4AR15TTrCLuxWFWJ/o4EPc6mApI2N4TpO4kfzQ+Lk
+    xa6BIUGvv3Zdvq6FWg+JU4U36qIYHpSTAGBIqQIDAQABMA0GCSqGSIb3DQEBCwUA
+    A4ICAQBCDs0V4z00Ze6Ask3qDOLAPo4k85QCfItlRZmwl2XbPZq7kbe13MqF2wxx
+    yiLalm6veK+ehU9MYN104hJZnuce5iEcZurk+8A+Pwn1Ifz+oWKVbUtUP3uV8Sm3
+    chktJ2H1bebXtNJE5TwvdHiUkXU9ywQt2FkxiTSl6+eac7JKEQ8lVN/o6uYxF5ds
+    +oIZplb7bv2XxsRCzq55F2tJX7fIzqXrSa+lQTnfLGmDVMAQX4TRB/lx0Gqd1a9y
+    qGfFnZ7xVyW97f6PiL8MoxPfd2I2JzrzGyP/igNbFOW0ho1OwfxVmvZeS7fQSc5e
+    +qu+nwnFfl0S4cHRif3G3zmz8Ryx9LM5TYkH41qePIHxoEO2sV0DgWJvbSjysV2S
+    EU2a31dJ0aZ+z6YtZVpHlujKMVzxVTrqj74trS4LvU5h/9hv7e1gjYdox1TO0HMK
+    mtDfgBevB21Tvxpz67Ijf31HvfTmCerKJEOjGnbYmyYpMeMNSONRDcToWk8sUwvi
+    OWa5jlUFRAxgXNM09vCTPi9aRUhcFqACqfAd6I1NqGVlfplLWrc7SWaSa+PsLfBf
+    4EOZfk8iEKBVeYXNjg+CcD8j8yk/oEs816/jpihIk8haCDRWYWGKyyGnwn6OQb8d
+    MdRO2b7Oi/AAmEF3jMlICqv286GIYK5qTKk2/CKHlOLPnsWEuA==
+    -----END CERTIFICATE-----
+  '';
+}
diff --git a/nixpkgs/nixos/tests/common/auto.nix b/nixpkgs/nixos/tests/common/auto.nix
new file mode 100644
index 000000000000..2c21a8d51673
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/auto.nix
@@ -0,0 +1,68 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+
+  dmcfg = config.services.xserver.displayManager;
+  cfg = config.test-support.displayManager.auto;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    test-support.displayManager.auto = {
+
+      enable = mkOption {
+        default = false;
+        description = ''
+          Whether to enable the fake "auto" display manager, which
+          automatically logs in the user specified in the
+          <option>user</option> option.  This is mostly useful for
+          automated tests.
+        '';
+      };
+
+      user = mkOption {
+        default = "root";
+        description = "The user account to login automatically.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.xserver.displayManager.lightdm = {
+      enable = true;
+      autoLogin = {
+        enable = true;
+        user = cfg.user;
+      };
+    };
+
+    # lightdm by default doesn't allow auto login for root, which is
+    # required by some nixos tests. Override it here.
+    security.pam.services.lightdm-autologin.text = lib.mkForce ''
+        auth     requisite pam_nologin.so
+        auth     required  pam_succeed_if.so quiet
+        auth     required  pam_permit.so
+
+        account  include   lightdm
+
+        password include   lightdm
+
+        session  include   lightdm
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/tests/common/ec2.nix b/nixpkgs/nixos/tests/common/ec2.nix
new file mode 100644
index 000000000000..ba087bb60090
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/ec2.nix
@@ -0,0 +1,49 @@
+{ pkgs, makeTest }:
+
+with pkgs.lib;
+
+{
+  makeEc2Test = { name, image, userData, script, hostname ? "ec2-instance", sshPublicKey ? null }:
+    let
+      metaData = pkgs.stdenv.mkDerivation {
+        name = "metadata";
+        buildCommand = ''
+          mkdir -p $out/1.0/meta-data
+          ln -s ${pkgs.writeText "userData" userData} $out/1.0/user-data
+          echo "${hostname}" > $out/1.0/meta-data/hostname
+          echo "(unknown)" > $out/1.0/meta-data/ami-manifest-path
+        '' + optionalString (sshPublicKey != null) ''
+          mkdir -p $out/1.0/meta-data/public-keys/0
+          ln -s ${pkgs.writeText "sshPublicKey" sshPublicKey} $out/1.0/meta-data/public-keys/0/openssh-key
+        '';
+      };
+    in makeTest {
+      name = "ec2-" + name;
+      nodes = {};
+      testScript =
+        ''
+          my $imageDir = ($ENV{'TMPDIR'} // "/tmp") . "/vm-state-machine";
+          mkdir $imageDir, 0700;
+          my $diskImage = "$imageDir/machine.qcow2";
+          system("qemu-img create -f qcow2 -o backing_file=${image} $diskImage") == 0 or die;
+          system("qemu-img resize $diskImage 10G") == 0 or die;
+
+          # Note: we use net=169.0.0.0/8 rather than
+          # net=169.254.0.0/16 to prevent dhcpcd from getting horribly
+          # confused. (It would get a DHCP lease in the 169.254.*
+          # range, which it would then configure and prompty delete
+          # again when it deletes link-local addresses.) Ideally we'd
+          # turn off the DHCP server, but qemu does not have an option
+          # to do that.
+          my $startCommand = "qemu-kvm -m 1024";
+          $startCommand .= " -device virtio-net-pci,netdev=vlan0";
+          $startCommand .= " -netdev 'user,id=vlan0,net=169.0.0.0/8,guestfwd=tcp:169.254.169.254:80-cmd:${pkgs.micro-httpd}/bin/micro_httpd ${metaData}'";
+          $startCommand .= " -drive file=$diskImage,if=virtio,werror=report";
+          $startCommand .= " \$QEMU_OPTS";
+
+          my $machine = createMachine({ startCommand => $startCommand });
+
+          ${script}
+        '';
+    };
+}
diff --git a/nixpkgs/nixos/tests/common/resolver.nix b/nixpkgs/nixos/tests/common/resolver.nix
new file mode 100644
index 000000000000..09a74de20faa
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/resolver.nix
@@ -0,0 +1,141 @@
+# This module automatically discovers zones in BIND and NSD NixOS
+# configurations and creates zones for all definitions of networking.extraHosts
+# (except those that point to 127.0.0.1 or ::1) within the current test network
+# and delegates these zones using a fake root zone served by a BIND recursive
+# name server.
+{ config, nodes, pkgs, lib, ... }:
+
+{
+  options.test-support.resolver.enable = lib.mkOption {
+    type = lib.types.bool;
+    default = true;
+    internal = true;
+    description = ''
+      Whether to enable the resolver that automatically discovers zone in the
+      test network.
+
+      This option is <literal>true</literal> by default, because the module
+      defining this option needs to be explicitly imported.
+
+      The reason this option exists is for the
+      <filename>nixos/tests/common/acme/server</filename> module, which
+      needs that option to disable the resolver once the user has set its own
+      resolver.
+    '';
+  };
+
+  config = lib.mkIf config.test-support.resolver.enable {
+    networking.firewall.enable = false;
+    services.bind.enable = true;
+    services.bind.cacheNetworks = lib.mkForce [ "any" ];
+    services.bind.forwarders = lib.mkForce [];
+    services.bind.zones = lib.singleton {
+      name = ".";
+      file = let
+        addDot = zone: zone + lib.optionalString (!lib.hasSuffix "." zone) ".";
+        mkNsdZoneNames = zones: map addDot (lib.attrNames zones);
+        mkBindZoneNames = zones: map (zone: addDot zone.name) zones;
+        getZones = cfg: mkNsdZoneNames cfg.services.nsd.zones
+                     ++ mkBindZoneNames cfg.services.bind.zones;
+
+        getZonesForNode = attrs: {
+          ip = attrs.config.networking.primaryIPAddress;
+          zones = lib.filter (zone: zone != ".") (getZones attrs.config);
+        };
+
+        zoneInfo = lib.mapAttrsToList (lib.const getZonesForNode) nodes;
+
+        # A and AAAA resource records for all the definitions of
+        # networking.extraHosts except those for 127.0.0.1 or ::1.
+        #
+        # The result is an attribute set with keys being the host name and the
+        # values are either { ipv4 = ADDR; } or { ipv6 = ADDR; } where ADDR is
+        # the IP address for the corresponding key.
+        recordsFromExtraHosts = let
+          getHostsForNode = lib.const (n: n.config.networking.extraHosts);
+          allHostsList = lib.mapAttrsToList getHostsForNode nodes;
+          allHosts = lib.concatStringsSep "\n" allHostsList;
+
+          reIp = "[a-fA-F0-9.:]+";
+          reHost = "[a-zA-Z0-9.-]+";
+
+          matchAliases = str: let
+            matched = builtins.match "[ \t]+(${reHost})(.*)" str;
+            continue = lib.singleton (lib.head matched)
+                    ++ matchAliases (lib.last matched);
+          in if matched == null then [] else continue;
+
+          matchLine = str: let
+            result = builtins.match "[ \t]*(${reIp})[ \t]+(${reHost})(.*)" str;
+          in if result == null then null else {
+            ipAddr = lib.head result;
+            hosts = lib.singleton (lib.elemAt result 1)
+                 ++ matchAliases (lib.last result);
+          };
+
+          skipLine = str: let
+            rest = builtins.match "[^\n]*\n(.*)" str;
+          in if rest == null then "" else lib.head rest;
+
+          getEntries = str: acc: let
+            result = matchLine str;
+            next = getEntries (skipLine str);
+            newEntry = acc ++ lib.singleton result;
+            continue = if result == null then next acc else next newEntry;
+          in if str == "" then acc else continue;
+
+          isIPv6 = str: builtins.match ".*:.*" str != null;
+          loopbackIps = [ "127.0.0.1" "::1" ];
+          filterLoopback = lib.filter (e: !lib.elem e.ipAddr loopbackIps);
+
+          allEntries = lib.concatMap (entry: map (host: {
+            inherit host;
+            ${if isIPv6 entry.ipAddr then "ipv6" else "ipv4"} = entry.ipAddr;
+          }) entry.hosts) (filterLoopback (getEntries (allHosts + "\n") []));
+
+          mkRecords = entry: let
+            records = lib.optional (entry ? ipv6) "AAAA ${entry.ipv6}"
+                   ++ lib.optional (entry ? ipv4) "A ${entry.ipv4}";
+            mkRecord = typeAndData: "${entry.host}. IN ${typeAndData}";
+          in lib.concatMapStringsSep "\n" mkRecord records;
+
+        in lib.concatMapStringsSep "\n" mkRecords allEntries;
+
+        # All of the zones that are subdomains of existing zones.
+        # For example if there is only "example.com" the following zones would
+        # be 'subZones':
+        #
+        #  * foo.example.com.
+        #  * bar.example.com.
+        #
+        # While the following would *not* be 'subZones':
+        #
+        #  * example.com.
+        #  * com.
+        #
+        subZones = let
+          allZones = lib.concatMap (zi: zi.zones) zoneInfo;
+          isSubZoneOf = z1: z2: lib.hasSuffix z2 z1 && z1 != z2;
+        in lib.filter (z: lib.any (isSubZoneOf z) allZones) allZones;
+
+        # All the zones without 'subZones'.
+        filteredZoneInfo = map (zi: zi // {
+          zones = lib.filter (x: !lib.elem x subZones) zi.zones;
+        }) zoneInfo;
+
+      in pkgs.writeText "fake-root.zone" ''
+        $TTL 3600
+        . IN SOA ns.fakedns. admin.fakedns. ( 1 3h 1h 1w 1d )
+        ns.fakedns. IN A ${config.networking.primaryIPAddress}
+        . IN NS ns.fakedns.
+        ${lib.concatImapStrings (num: { ip, zones }: ''
+          ns${toString num}.fakedns. IN A ${ip}
+          ${lib.concatMapStrings (zone: ''
+          ${zone} IN NS ns${toString num}.fakedns.
+          '') zones}
+        '') (lib.filter (zi: zi.zones != []) filteredZoneInfo)}
+        ${recordsFromExtraHosts}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/common/user-account.nix b/nixpkgs/nixos/tests/common/user-account.nix
new file mode 100644
index 000000000000..a57ee2d59ae3
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/user-account.nix
@@ -0,0 +1,15 @@
+{ ... }:
+
+{ users.users.alice =
+    { isNormalUser = true;
+      description = "Alice Foobar";
+      password = "foobar";
+      uid = 1000;
+    };
+
+  users.users.bob =
+    { isNormalUser = true;
+      description = "Bob Foobar";
+      password = "foobar";
+    };
+}
diff --git a/nixpkgs/nixos/tests/common/webroot/news-rss.xml b/nixpkgs/nixos/tests/common/webroot/news-rss.xml
new file mode 100644
index 000000000000..b8099bf0364a
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/webroot/news-rss.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rss xmlns:blogChannel="http://backend.userland.com/blogChannelModule" version="2.0">
+ <channel>
+  <title>NixOS News</title><link>https://nixos.org</link>
+  <description>News for NixOS, the purely functional Linux distribution.</description>
+  
+  <item>
+   <title>NixOS 18.09 released</title><link>https://nixos.org/news.html</link>
+   <description>
+    <a href="https://github.com/NixOS/nixos-artwork/blob/master/releases/18.09-jellyfish/jellyfish.png">
+     <img class="inline" src="logo/nixos-logo-18.09-jellyfish-lores.png" alt="18.09 Jellyfish logo" with="100" height="87"/>
+    </a>
+      NixOS 18.09 “Jellyfish” has been released, the tenth stable release branch.
+      See the <a href="/nixos/manual/release-notes.html#sec-release-18.09">release notes</a>
+      for details. You can get NixOS 18.09 ISOs and VirtualBox appliances
+      from the <a href="nixos/download.html">download page</a>.
+      For information on how to upgrade from older release branches
+      to 18.09, check out the
+      <a href="/nixos/manual/index.html#sec-upgrading">manual section on upgrading</a>.
+    </description>
+   <pubDate>Sat Oct 06 2018 00:00:00 GMT</pubDate>
+  </item>
+ </channel>
+</rss>
diff --git a/nixpkgs/nixos/tests/common/x11.nix b/nixpkgs/nixos/tests/common/x11.nix
new file mode 100644
index 000000000000..0d76a0e972ff
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/x11.nix
@@ -0,0 +1,17 @@
+{ lib, ... }:
+
+{
+  imports = [
+    ./auto.nix
+  ];
+
+  services.xserver.enable = true;
+
+  # Automatically log in.
+  test-support.displayManager.auto.enable = true;
+
+  # Use IceWM as the window manager.
+  # Don't use a desktop manager.
+  services.xserver.displayManager.defaultSession = lib.mkDefault "none+icewm";
+  services.xserver.windowManager.icewm.enable = true;
+}
diff --git a/nixpkgs/nixos/tests/consul.nix b/nixpkgs/nixos/tests/consul.nix
new file mode 100644
index 000000000000..6600dae4770b
--- /dev/null
+++ b/nixpkgs/nixos/tests/consul.nix
@@ -0,0 +1,143 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+
+let
+  # Settings for both servers and agents
+  webUi = true;
+  retry_interval = "1s";
+  raft_multiplier = 1;
+
+  defaultExtraConfig = {
+    inherit retry_interval;
+    performance = {
+      inherit raft_multiplier;
+    };
+  };
+
+  allConsensusServerHosts = [
+    "192.168.1.1"
+    "192.168.1.2"
+    "192.168.1.3"
+  ];
+
+  allConsensusClientHosts = [
+    "192.168.2.1"
+    "192.168.2.2"
+  ];
+
+  firewallSettings = {
+    # See https://www.consul.io/docs/install/ports.html
+    allowedTCPPorts = [ 8301 8302 8600 8500 8300 ];
+    allowedUDPPorts = [ 8301 8302 8600 ];
+  };
+
+  client = index: { pkgs, ... }:
+    let
+      ip = builtins.elemAt allConsensusClientHosts index;
+    in
+      {
+        environment.systemPackages = [ pkgs.consul ];
+
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = ip; prefixLength = 16; }
+        ];
+        networking.firewall = firewallSettings;
+
+        services.consul = {
+          enable = true;
+          inherit webUi;
+          extraConfig = defaultExtraConfig // {
+            server = false;
+            retry_join = allConsensusServerHosts;
+            bind_addr = ip;
+          };
+        };
+      };
+
+  server = index: { pkgs, ... }:
+    let
+      ip = builtins.elemAt allConsensusServerHosts index;
+    in
+      {
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = builtins.elemAt allConsensusServerHosts index; prefixLength = 16; }
+        ];
+        networking.firewall = firewallSettings;
+
+        services.consul =
+          let
+            thisConsensusServerHost = builtins.elemAt allConsensusServerHosts index;
+          in
+          assert builtins.elem thisConsensusServerHost allConsensusServerHosts;
+          {
+            enable = true;
+            inherit webUi;
+            extraConfig = defaultExtraConfig // {
+              server = true;
+              bootstrap_expect = builtins.length allConsensusServerHosts;
+              retry_join =
+                # If there's only 1 node in the network, we allow self-join;
+                # otherwise, the node must not try to join itself, and join only the other servers.
+                # See https://github.com/hashicorp/consul/issues/2868
+                if builtins.length allConsensusServerHosts == 1
+                  then allConsensusServerHosts
+                  else builtins.filter (h: h != thisConsensusServerHost) allConsensusServerHosts;
+              bind_addr = ip;
+            };
+          };
+      };
+in {
+  name = "consul";
+
+  nodes = {
+    server1 = server 0;
+    server2 = server 1;
+    server3 = server 2;
+
+    client1 = client 0;
+    client2 = client 1;
+  };
+
+  testScript = ''
+    servers = [server1, server2, server3]
+    machines = [server1, server2, server3, client1, client2]
+
+    for m in machines:
+        m.wait_for_unit("consul.service")
+
+    for m in machines:
+        m.wait_until_succeeds("[ $(consul members | grep -o alive | wc -l) == 5 ]")
+
+    client1.succeed("consul kv put testkey 42")
+    client2.succeed("[ $(consul kv get testkey) == 42 ]")
+
+    # Test that the cluster can tolearate failures of any single server:
+    for server in servers:
+        server.crash()
+
+        # For each client, wait until they have connection again
+        # using `kv get -recurse` before issuing commands.
+        client1.wait_until_succeeds("consul kv get -recurse")
+        client2.wait_until_succeeds("consul kv get -recurse")
+
+        # Do some consul actions while one server is down.
+        client1.succeed("consul kv put testkey 43")
+        client2.succeed("[ $(consul kv get testkey) == 43 ]")
+        client2.succeed("consul kv delete testkey")
+
+        # Restart crashed machine.
+        server.start()
+
+        # Wait for recovery.
+        for m in machines:
+            m.wait_until_succeeds("[ $(consul members | grep -o alive | wc -l) == 5 ]")
+
+        # Wait for client connections.
+        client1.wait_until_succeeds("consul kv get -recurse")
+        client2.wait_until_succeeds("consul kv get -recurse")
+
+        # Do some consul actions with server back up.
+        client1.succeed("consul kv put testkey 44")
+        client2.succeed("[ $(consul kv get testkey) == 44 ]")
+        client2.succeed("consul kv delete testkey")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-bridge.nix b/nixpkgs/nixos/tests/containers-bridge.nix
new file mode 100644
index 000000000000..2c8e8fa5370f
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-bridge.nix
@@ -0,0 +1,102 @@
+# Test for NixOS' container support.
+
+let
+  hostIp = "192.168.0.1";
+  containerIp = "192.168.0.100/24";
+  hostIp6 = "fc00::1";
+  containerIp6 = "fc00::2/7";
+in
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-bridge";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ aristid aszlig eelco kampfschlaefer ];
+  };
+
+  machine =
+    { pkgs, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation.writableStore = true;
+      virtualisation.memorySize = 768;
+
+      networking.bridges = {
+        br0 = {
+          interfaces = [];
+        };
+      };
+      networking.interfaces = {
+        br0 = {
+          ipv4.addresses = [{ address = hostIp; prefixLength = 24; }];
+          ipv6.addresses = [{ address = hostIp6; prefixLength = 7; }];
+        };
+      };
+
+      containers.webserver =
+        {
+          autoStart = true;
+          privateNetwork = true;
+          hostBridge = "br0";
+          localAddress = containerIp;
+          localAddress6 = containerIp6;
+          config =
+            { services.httpd.enable = true;
+              services.httpd.adminAddr = "foo@example.org";
+              networking.firewall.allowedTCPPorts = [ 80 ];
+            };
+        };
+
+      containers.web-noip =
+        {
+          autoStart = true;
+          privateNetwork = true;
+          hostBridge = "br0";
+          config =
+            { services.httpd.enable = true;
+              services.httpd.adminAddr = "foo@example.org";
+              networking.firewall.allowedTCPPorts = [ 80 ];
+            };
+        };
+
+
+      virtualisation.pathsInNixDB = [ pkgs.stdenv ];
+    };
+
+  testScript = ''
+    machine.wait_for_unit("default.target")
+    assert "webserver" in machine.succeed("nixos-container list")
+
+    with subtest("Start the webserver container"):
+        assert "up" in machine.succeed("nixos-container status webserver")
+
+    with subtest("Bridges exist inside containers"):
+        machine.succeed(
+            "nixos-container run webserver -- ip link show eth0",
+            "nixos-container run web-noip -- ip link show eth0",
+        )
+
+    ip = "${containerIp}".split("/")[0]
+    machine.succeed(f"ping -n -c 1 {ip}")
+    machine.succeed(f"curl --fail http://{ip}/ > /dev/null")
+
+    ip6 = "${containerIp6}".split("/")[0]
+    machine.succeed(f"ping -n -c 1 {ip6}")
+    machine.succeed(f"curl --fail http://[{ip6}]/ > /dev/null")
+
+    with subtest(
+        "nixos-container show-ip works in case of an ipv4 address "
+        + "with subnetmask in CIDR notation."
+    ):
+        result = machine.succeed("nixos-container show-ip webserver").rstrip()
+        assert result == ip
+
+    with subtest("Stop the container"):
+        machine.succeed("nixos-container stop webserver")
+        machine.fail(
+            f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null",
+            f"curl --fail --connect-timeout 2 http://[{ip6}]/ > /dev/null",
+        )
+
+    # Destroying a declarative container should fail.
+    machine.fail("nixos-container destroy webserver")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-custom-pkgs.nix b/nixpkgs/nixos/tests/containers-custom-pkgs.nix
new file mode 100644
index 000000000000..397a4a905e6d
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-custom-pkgs.nix
@@ -0,0 +1,42 @@
+# Test for NixOS' container support.
+
+import ./make-test-python.nix ({ pkgs, lib, ...} : let
+
+  customPkgs = pkgs // {
+    hello = pkgs.hello.overrideAttrs(old: {
+      name = "custom-hello";
+    });
+  };
+
+in {
+  name = "containers-hosts";
+  meta = with lib.maintainers; {
+    maintainers = [ adisbladis ];
+  };
+
+  machine =
+    { ... }:
+    {
+      virtualisation.memorySize = 256;
+      virtualisation.vlans = [];
+
+      containers.simple = {
+        autoStart = true;
+        pkgs = customPkgs;
+        config = {pkgs, config, ... }: {
+          environment.systemPackages = [
+            pkgs.hello
+          ];
+        };
+      };
+
+    };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("default.target")
+    machine.succeed(
+        "test $(nixos-container run simple -- readlink -f /run/current-system/sw/bin/hello) = ${customPkgs.hello}/bin/hello"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-ephemeral.nix b/nixpkgs/nixos/tests/containers-ephemeral.nix
new file mode 100644
index 000000000000..692554ac0ba2
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-ephemeral.nix
@@ -0,0 +1,54 @@
+# Test for NixOS' container support.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-ephemeral";
+
+  machine = { pkgs, ... }: {
+    virtualisation.memorySize = 768;
+    virtualisation.writableStore = true;
+
+    containers.webserver = {
+      ephemeral = true;
+      privateNetwork = true;
+      hostAddress = "10.231.136.1";
+      localAddress = "10.231.136.2";
+      config = {
+        services.nginx = {
+          enable = true;
+          virtualHosts.localhost = {
+            root = pkgs.runCommand "localhost" {} ''
+              mkdir "$out"
+              echo hello world > "$out/index.html"
+            '';
+          };
+        };
+        networking.firewall.allowedTCPPorts = [ 80 ];
+      };
+    };
+  };
+
+  testScript = ''
+    assert "webserver" in machine.succeed("nixos-container list")
+
+    machine.succeed("nixos-container start webserver")
+
+    with subtest("Container got its own root folder"):
+        machine.succeed("ls /run/containers/webserver")
+
+    with subtest("Container persistent directory is not created"):
+        machine.fail("ls /var/lib/containers/webserver")
+
+    # Since "start" returns after the container has reached
+    # multi-user.target, we should now be able to access it.
+    ip = machine.succeed("nixos-container show-ip webserver").rstrip()
+    machine.succeed(f"ping -n -c1 {ip}")
+    machine.succeed(f"curl --fail http://{ip}/ > /dev/null")
+
+    with subtest("Stop the container"):
+        machine.succeed("nixos-container stop webserver")
+        machine.fail(f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null")
+
+    with subtest("Container's root folder was removed"):
+        machine.fail("ls /run/containers/webserver")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-extra_veth.nix b/nixpkgs/nixos/tests/containers-extra_veth.nix
new file mode 100644
index 000000000000..7d30b3f76cd7
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-extra_veth.nix
@@ -0,0 +1,94 @@
+# Test for NixOS' container support.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-extra_veth";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ kampfschlaefer ];
+  };
+
+  machine =
+    { pkgs, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation.writableStore = true;
+      virtualisation.memorySize = 768;
+      virtualisation.vlans = [];
+
+      networking.useDHCP = false;
+      networking.bridges = {
+        br0 = {
+          interfaces = [];
+        };
+        br1 = { interfaces = []; };
+      };
+      networking.interfaces = {
+        br0 = {
+          ipv4.addresses = [{ address = "192.168.0.1"; prefixLength = 24; }];
+          ipv6.addresses = [{ address = "fc00::1"; prefixLength = 7; }];
+        };
+        br1 = {
+          ipv4.addresses = [{ address = "192.168.1.1"; prefixLength = 24; }];
+        };
+      };
+
+      containers.webserver =
+        {
+          autoStart = true;
+          privateNetwork = true;
+          hostBridge = "br0";
+          localAddress = "192.168.0.100/24";
+          localAddress6 = "fc00::2/7";
+          extraVeths = {
+            veth1 = { hostBridge = "br1"; localAddress = "192.168.1.100/24"; };
+            veth2 = { hostAddress = "192.168.2.1"; localAddress = "192.168.2.100"; };
+          };
+          config =
+            {
+              networking.firewall.allowedTCPPorts = [ 80 ];
+            };
+        };
+
+      virtualisation.pathsInNixDB = [ pkgs.stdenv ];
+    };
+
+  testScript =
+    ''
+      machine.wait_for_unit("default.target")
+      assert "webserver" in machine.succeed("nixos-container list")
+
+      with subtest("Status of the webserver container is up"):
+          assert "up" in machine.succeed("nixos-container status webserver")
+
+      with subtest("Ensure that the veths are inside the container"):
+          assert "state UP" in machine.succeed(
+              "nixos-container run webserver -- ip link show veth1"
+          )
+          assert "state UP" in machine.succeed(
+              "nixos-container run webserver -- ip link show veth2"
+          )
+
+      with subtest("Ensure the presence of the extra veths"):
+          assert "state UP" in machine.succeed("ip link show veth1")
+          assert "state UP" in machine.succeed("ip link show veth2")
+
+      with subtest("Ensure the veth1 is part of br1 on the host"):
+          assert "master br1" in machine.succeed("ip link show veth1")
+
+      with subtest("Ping on main veth"):
+          machine.succeed("ping -n -c 1 192.168.0.100")
+          machine.succeed("ping -n -c 1 fc00::2")
+
+      with subtest("Ping on the first extra veth"):
+          machine.succeed("ping -n -c 1 192.168.1.100 >&2")
+
+      with subtest("Ping on the second extra veth"):
+          machine.succeed("ping -n -c 1 192.168.2.100 >&2")
+
+      with subtest("Container can be stopped"):
+          machine.succeed("nixos-container stop webserver")
+          machine.fail("ping -n -c 1 192.168.1.100 >&2")
+          machine.fail("ping -n -c 1 192.168.2.100 >&2")
+
+      with subtest("Destroying a declarative container should fail"):
+          machine.fail("nixos-container destroy webserver")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/containers-hosts.nix b/nixpkgs/nixos/tests/containers-hosts.nix
new file mode 100644
index 000000000000..d6fb4a761eef
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-hosts.nix
@@ -0,0 +1,52 @@
+# Test for NixOS' container support.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-hosts";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ montag451 ];
+  };
+
+  machine =
+    { lib, ... }:
+    {
+      virtualisation.memorySize = 256;
+      virtualisation.vlans = [];
+
+      networking.bridges.br0.interfaces = [];
+      networking.interfaces.br0.ipv4.addresses = [
+        { address = "10.11.0.254"; prefixLength = 24; }
+      ];
+
+      # Force /etc/hosts to be the only source for host name resolution
+      environment.etc."nsswitch.conf".text = lib.mkForce ''
+        hosts: files
+      '';
+
+      containers.simple = {
+        autoStart = true;
+        privateNetwork = true;
+        localAddress = "10.10.0.1";
+        hostAddress = "10.10.0.254";
+
+        config = {};
+      };
+
+      containers.netmask = {
+        autoStart = true;
+        privateNetwork = true;
+        hostBridge = "br0";
+        localAddress = "10.11.0.1/24";
+
+        config = {};
+      };
+    };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("default.target")
+
+    with subtest("Ping the containers using the entries added in /etc/hosts"):
+        for host in "simple.containers", "netmask.containers":
+            machine.succeed(f"ping -n -c 1 {host}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-imperative.nix b/nixpkgs/nixos/tests/containers-imperative.nix
new file mode 100644
index 000000000000..c4f2002918fc
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-imperative.nix
@@ -0,0 +1,149 @@
+# Test for NixOS' container support.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-imperative";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ aristid aszlig eelco kampfschlaefer ];
+  };
+
+  machine =
+    { config, pkgs, lib, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+
+      # XXX: Sandbox setup fails while trying to hardlink files from the host's
+      #      store file system into the prepared chroot directory.
+      nix.useSandbox = false;
+      nix.binaryCaches = []; # don't try to access cache.nixos.org
+
+      virtualisation.writableStore = true;
+      virtualisation.memorySize = 1024;
+      # Make sure we always have all the required dependencies for creating a
+      # container available within the VM, because we don't have network access.
+      virtualisation.pathsInNixDB = let
+        emptyContainer = import ../lib/eval-config.nix {
+          inherit (config.nixpkgs.localSystem) system;
+          modules = lib.singleton {
+            containers.foo.config = {
+              system.stateVersion = "18.03";
+            };
+          };
+        };
+      in with pkgs; [
+        stdenv stdenvNoCC emptyContainer.config.containers.foo.path
+        libxslt desktop-file-utils texinfo docbook5 libxml2
+        docbook_xsl_ns xorg.lndir documentation-highlighter
+      ];
+    };
+
+  testScript = let
+      tmpfilesContainerConfig = pkgs.writeText "container-config-tmpfiles" ''
+        {
+          systemd.tmpfiles.rules = [ "d /foo - - - - -" ];
+          systemd.services.foo = {
+            serviceConfig.Type = "oneshot";
+            script = "ls -al /foo";
+            wantedBy = [ "multi-user.target" ];
+          };
+        }
+      '';
+      brokenCfg = pkgs.writeText "broken.nix" ''
+        {
+          assertions = [
+            { assertion = false;
+              message = "I never evaluate";
+            }
+          ];
+        }
+      '';
+    in ''
+      with subtest("Make sure we have a NixOS tree (required by ‘nixos-container create’)"):
+          machine.succeed("PAGER=cat nix-env -qa -A nixos.hello >&2")
+
+      id1, id2 = None, None
+
+      with subtest("Create some containers imperatively"):
+          id1 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip()
+          machine.log(f"created container {id1}")
+
+          id2 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip()
+          machine.log(f"created container {id2}")
+
+          assert id1 != id2
+
+      with subtest(f"Put the root of {id2} into a bind mount"):
+          machine.succeed(
+              f"mv /var/lib/containers/{id2} /id2-bindmount",
+              f"mount --bind /id2-bindmount /var/lib/containers/{id1}",
+          )
+
+          ip1 = machine.succeed(f"nixos-container show-ip {id1}").rstrip()
+          ip2 = machine.succeed(f"nixos-container show-ip {id2}").rstrip()
+          assert ip1 != ip2
+
+      with subtest(
+          "Create a directory and a file we can later check if it still exists "
+          + "after destruction of the container"
+      ):
+          machine.succeed("mkdir /nested-bindmount")
+          machine.succeed("echo important data > /nested-bindmount/dummy")
+
+      with subtest(
+          "Create a directory with a dummy file and bind-mount it into both containers."
+      ):
+          for id in id1, id2:
+              important_path = f"/var/lib/containers/{id}/very/important/data"
+              machine.succeed(
+                  f"mkdir -p {important_path}",
+                  f"mount --bind /nested-bindmount {important_path}",
+              )
+
+      with subtest("Start one of them"):
+          machine.succeed(f"nixos-container start {id1}")
+
+      with subtest("Execute commands via the root shell"):
+          assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname")
+
+      with subtest("Execute a nix command via the root shell. (regression test for #40355)"):
+          machine.succeed(
+              f"nixos-container run {id1} -- nix-instantiate -E "
+              + '\'derivation { name = "empty"; builder = "false"; system = "false"; }\' '
+          )
+
+      with subtest("Stop and start (regression test for #4989)"):
+          machine.succeed(f"nixos-container stop {id1}")
+          machine.succeed(f"nixos-container start {id1}")
+
+      with subtest("tmpfiles are present"):
+          machine.log("creating container tmpfiles")
+          machine.succeed(
+              "nixos-container create tmpfiles --config-file ${tmpfilesContainerConfig}"
+          )
+          machine.log("created, starting…")
+          machine.succeed("nixos-container start tmpfiles")
+          machine.log("done starting, investigating…")
+          machine.succeed(
+              "echo $(nixos-container run tmpfiles -- systemctl is-active foo.service) | grep -q active;"
+          )
+          machine.succeed("nixos-container destroy tmpfiles")
+
+      with subtest("Execute commands via the root shell"):
+          assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname")
+
+      with subtest("Destroy the containers"):
+          for id in id1, id2:
+              machine.succeed(f"nixos-container destroy {id}")
+
+      with subtest("Check whether destruction of any container has killed important data"):
+          machine.succeed("grep -qF 'important data' /nested-bindmount/dummy")
+
+      with subtest("Ensure that the container path is gone"):
+          print(machine.succeed("ls -lsa /var/lib/containers"))
+          machine.succeed(f"test ! -e /var/lib/containers/{id1}")
+
+      with subtest("Ensure that a failed container creation doesn'leave any state"):
+          machine.fail(
+              "nixos-container create b0rk --config-file ${brokenCfg}"
+          )
+          machine.succeed(f"test ! -e /var/lib/containers/b0rk")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/containers-ip.nix b/nixpkgs/nixos/tests/containers-ip.nix
new file mode 100644
index 000000000000..8583a08c6258
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-ip.nix
@@ -0,0 +1,77 @@
+# Test for NixOS' container support.
+
+let
+  webserverFor = hostAddress: localAddress: {
+    inherit hostAddress localAddress;
+    privateNetwork = true;
+    config = {
+      services.httpd = {
+        enable = true;
+        adminAddr = "foo@example.org";
+      };
+      networking.firewall.allowedTCPPorts = [ 80 ];
+    };
+  };
+
+in import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-ipv4-ipv6";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ aristid aszlig eelco kampfschlaefer ];
+  };
+
+  machine =
+    { pkgs, ... }: {
+      imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation = {
+        writableStore = true;
+        memorySize = 768;
+      };
+
+      containers.webserver4 = webserverFor "10.231.136.1" "10.231.136.2";
+      containers.webserver6 = webserverFor "fc00::2" "fc00::1";
+      virtualisation.pathsInNixDB = [ pkgs.stdenv ];
+    };
+
+  testScript = { nodes, ... }: ''
+    import time
+
+
+    def curl_host(ip):
+        # put [] around ipv6 addresses for curl
+        host = ip if ":" not in ip else f"[{ip}]"
+        return f"curl --fail --connect-timeout 2 http://{host}/ > /dev/null"
+
+
+    def get_ip(container):
+        # need to distinguish because show-ip won't work for ipv6
+        if container == "webserver4":
+            ip = machine.succeed(f"nixos-container show-ip {container}").rstrip()
+            assert ip == "${nodes.machine.config.containers.webserver4.localAddress}"
+            return ip
+        return "${nodes.machine.config.containers.webserver6.localAddress}"
+
+
+    for container in "webserver4", "webserver6":
+        assert container in machine.succeed("nixos-container list")
+
+        with subtest(f"Start container {container}"):
+            machine.succeed(f"nixos-container start {container}")
+            # wait 2s for container to start and network to be up
+            time.sleep(2)
+
+        # Since "start" returns after the container has reached
+        # multi-user.target, we should now be able to access it.
+
+        ip = get_ip(container)
+        with subtest(f"{container} reacts to pings and HTTP requests"):
+            machine.succeed(f"ping -n -c1 {ip}")
+            machine.succeed(curl_host(ip))
+
+        with subtest(f"Stop container {container}"):
+            machine.succeed(f"nixos-container stop {container}")
+            machine.fail(curl_host(ip))
+
+        # Destroying a declarative container should fail.
+        machine.fail(f"nixos-container destroy {container}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-macvlans.nix b/nixpkgs/nixos/tests/containers-macvlans.nix
new file mode 100644
index 000000000000..0e8f67bc76f0
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-macvlans.nix
@@ -0,0 +1,86 @@
+# Test for NixOS' container support.
+
+let
+  # containers IP on VLAN 1
+  containerIp1 = "192.168.1.253";
+  containerIp2 = "192.168.1.254";
+in
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-macvlans";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ montag451 ];
+  };
+
+  nodes = {
+
+    machine1 =
+      { lib, ... }:
+      {
+        virtualisation.memorySize = 256;
+        virtualisation.vlans = [ 1 ];
+
+        # To be able to ping containers from the host, it is necessary
+        # to create a macvlan on the host on the VLAN 1 network.
+        networking.macvlans.mv-eth1-host = {
+          interface = "eth1";
+          mode = "bridge";
+        };
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [];
+        networking.interfaces.mv-eth1-host = {
+          ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+        };
+
+        containers.test1 = {
+          autoStart = true;
+          macvlans = [ "eth1" ];
+
+          config = {
+            networking.interfaces.mv-eth1 = {
+              ipv4.addresses = [ { address = containerIp1; prefixLength = 24; } ];
+            };
+          };
+        };
+
+        containers.test2 = {
+          autoStart = true;
+          macvlans = [ "eth1" ];
+
+          config = {
+            networking.interfaces.mv-eth1 = {
+              ipv4.addresses = [ { address = containerIp2; prefixLength = 24; } ];
+            };
+          };
+        };
+      };
+
+    machine2 =
+      { ... }:
+      {
+        virtualisation.memorySize = 256;
+        virtualisation.vlans = [ 1 ];
+      };
+
+  };
+
+  testScript = ''
+    start_all()
+    machine1.wait_for_unit("default.target")
+    machine2.wait_for_unit("default.target")
+
+    with subtest(
+        "Ping between containers to check that macvlans are created in bridge mode"
+    ):
+        machine1.succeed("nixos-container run test1 -- ping -n -c 1 ${containerIp2}")
+
+    with subtest("Ping containers from the host (machine1)"):
+        machine1.succeed("ping -n -c 1 ${containerIp1}")
+        machine1.succeed("ping -n -c 1 ${containerIp2}")
+
+    with subtest(
+        "Ping containers from the second machine to check that containers are reachable from the outside"
+    ):
+        machine2.succeed("ping -n -c 1 ${containerIp1}")
+        machine2.succeed("ping -n -c 1 ${containerIp2}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-physical_interfaces.nix b/nixpkgs/nixos/tests/containers-physical_interfaces.nix
new file mode 100644
index 000000000000..e800751a23c2
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-physical_interfaces.nix
@@ -0,0 +1,136 @@
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-physical_interfaces";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ kampfschlaefer ];
+  };
+
+  nodes = {
+    server = { ... }:
+      {
+        virtualisation.memorySize = 256;
+        virtualisation.vlans = [ 1 ];
+
+        containers.server = {
+          privateNetwork = true;
+          interfaces = [ "eth1" ];
+
+          config = {
+            networking.interfaces.eth1.ipv4.addresses = [
+              { address = "10.10.0.1"; prefixLength = 24; }
+            ];
+            networking.firewall.enable = false;
+          };
+        };
+      };
+    bridged = { ... }: {
+      virtualisation.memorySize = 128;
+      virtualisation.vlans = [ 1 ];
+
+      containers.bridged = {
+        privateNetwork = true;
+        interfaces = [ "eth1" ];
+
+        config = {
+          networking.bridges.br0.interfaces = [ "eth1" ];
+          networking.interfaces.br0.ipv4.addresses = [
+            { address = "10.10.0.2"; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+        };
+      };
+    };
+
+    bonded = { ... }: {
+      virtualisation.memorySize = 128;
+      virtualisation.vlans = [ 1 ];
+
+      containers.bonded = {
+        privateNetwork = true;
+        interfaces = [ "eth1" ];
+
+        config = {
+          networking.bonds.bond0 = {
+            interfaces = [ "eth1" ];
+            driverOptions.mode = "active-backup";
+          };
+          networking.interfaces.bond0.ipv4.addresses = [
+            { address = "10.10.0.3"; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+        };
+      };
+    };
+
+    bridgedbond = { ... }: {
+      virtualisation.memorySize = 128;
+      virtualisation.vlans = [ 1 ];
+
+      containers.bridgedbond = {
+        privateNetwork = true;
+        interfaces = [ "eth1" ];
+
+        config = {
+          networking.bonds.bond0 = {
+            interfaces = [ "eth1" ];
+            driverOptions.mode = "active-backup";
+          };
+          networking.bridges.br0.interfaces = [ "bond0" ];
+          networking.interfaces.br0.ipv4.addresses = [
+            { address = "10.10.0.4"; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("Prepare server"):
+        server.wait_for_unit("default.target")
+        server.succeed("ip link show dev eth1 >&2")
+
+    with subtest("Simple physical interface is up"):
+        server.succeed("nixos-container start server")
+        server.wait_for_unit("container@server")
+        server.succeed(
+            "systemctl -M server list-dependencies network-addresses-eth1.service >&2"
+        )
+
+        # The other tests will ping this container on its ip. Here we just check
+        # that the device is present in the container.
+        server.succeed("nixos-container run server -- ip a show dev eth1 >&2")
+
+    with subtest("Physical device in bridge in container can ping server"):
+        bridged.wait_for_unit("default.target")
+        bridged.succeed("nixos-container start bridged")
+        bridged.wait_for_unit("container@bridged")
+        bridged.succeed(
+            "systemctl -M bridged list-dependencies network-addresses-br0.service >&2",
+            "systemctl -M bridged status -n 30 -l network-addresses-br0.service",
+            "nixos-container run bridged -- ping -w 10 -c 1 -n 10.10.0.1",
+        )
+
+    with subtest("Physical device in bond in container can ping server"):
+        bonded.wait_for_unit("default.target")
+        bonded.succeed("nixos-container start bonded")
+        bonded.wait_for_unit("container@bonded")
+        bonded.succeed(
+            "systemctl -M bonded list-dependencies network-addresses-bond0 >&2",
+            "systemctl -M bonded status -n 30 -l network-addresses-bond0 >&2",
+            "nixos-container run bonded -- ping -w 10 -c 1 -n 10.10.0.1",
+        )
+
+    with subtest("Physical device in bond in bridge in container can ping server"):
+        bridgedbond.wait_for_unit("default.target")
+        bridgedbond.succeed("nixos-container start bridgedbond")
+        bridgedbond.wait_for_unit("container@bridgedbond")
+        bridgedbond.succeed(
+            "systemctl -M bridgedbond list-dependencies network-addresses-br0.service >&2",
+            "systemctl -M bridgedbond status -n 30 -l network-addresses-br0.service",
+            "nixos-container run bridgedbond -- ping -w 10 -c 1 -n 10.10.0.1",
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-portforward.nix b/nixpkgs/nixos/tests/containers-portforward.nix
new file mode 100644
index 000000000000..fc90e151bd9e
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-portforward.nix
@@ -0,0 +1,62 @@
+# Test for NixOS' container support.
+
+let
+  hostIp = "192.168.0.1";
+  hostPort = 10080;
+  containerIp = "192.168.0.100";
+  containerPort = 80;
+in 
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-portforward";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ aristid aszlig eelco kampfschlaefer ianwookim ];
+  };
+
+  machine =
+    { pkgs, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation.writableStore = true;
+      virtualisation.memorySize = 768;
+
+      containers.webserver =
+        { privateNetwork = true;
+          hostAddress = hostIp;
+          localAddress = containerIp;
+          forwardPorts = [ { protocol = "tcp"; hostPort = hostPort; containerPort = containerPort; } ];
+          config =
+            { services.httpd.enable = true;
+              services.httpd.adminAddr = "foo@example.org";
+              networking.firewall.allowedTCPPorts = [ 80 ];
+            };
+        };
+
+      virtualisation.pathsInNixDB = [ pkgs.stdenv ];
+    };
+
+  testScript =
+    ''
+      container_list = machine.succeed("nixos-container list")
+      assert "webserver" in container_list
+
+      # Start the webserver container.
+      machine.succeed("nixos-container start webserver")
+
+      # wait two seconds for the container to start and the network to be up
+      machine.sleep(2)
+
+      # Since "start" returns after the container has reached
+      # multi-user.target, we should now be able to access it.
+      # ip = machine.succeed("nixos-container show-ip webserver").strip()
+      machine.succeed("ping -n -c1 ${hostIp}")
+      machine.succeed("curl --fail http://${hostIp}:${toString hostPort}/ > /dev/null")
+
+      # Stop the container.
+      machine.succeed("nixos-container stop webserver")
+      machine.fail("curl --fail --connect-timeout 2 http://${hostIp}:${toString hostPort}/ > /dev/null")
+
+      # Destroying a declarative container should fail.
+      machine.fail("nixos-container destroy webserver")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/containers-reloadable.nix b/nixpkgs/nixos/tests/containers-reloadable.nix
new file mode 100644
index 000000000000..35aff91e85b5
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-reloadable.nix
@@ -0,0 +1,72 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+let
+  client_base = {
+
+    containers.test1 = {
+      autoStart = true;
+      config = {
+        environment.etc.check.text = "client_base";
+      };
+    };
+
+    # prevent make-test.nix to change IP
+    networking.interfaces = {
+      eth1.ipv4.addresses = lib.mkOverride 0 [ ];
+    };
+  };
+in {
+  name = "cotnainers-reloadable";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ danbst ];
+  };
+
+  nodes = {
+    client = { ... }: {
+      imports = [ client_base ];
+    };
+
+    client_c1 = { lib, ... }: {
+      imports = [ client_base ];
+
+      containers.test1.config = {
+        environment.etc.check.text = lib.mkForce "client_c1";
+        services.httpd.enable = true;
+        services.httpd.adminAddr = "nixos@example.com";
+      };
+    };
+    client_c2 = { lib, ... }: {
+      imports = [ client_base ];
+
+      containers.test1.config = {
+        environment.etc.check.text = lib.mkForce "client_c2";
+        services.nginx.enable = true;
+      };
+    };
+  };
+
+  testScript = {nodes, ...}: let
+    c1System = nodes.client_c1.config.system.build.toplevel;
+    c2System = nodes.client_c2.config.system.build.toplevel;
+  in ''
+    client.start()
+    client.wait_for_unit("default.target")
+
+    assert "client_base" in client.succeed("nixos-container run test1 cat /etc/check")
+
+    with subtest("httpd is available after activating config1"):
+        client.succeed(
+            "${c1System}/bin/switch-to-configuration test >&2",
+            "[[ $(nixos-container run test1 cat /etc/check) == client_c1 ]] >&2",
+            "systemctl status httpd -M test1 >&2",
+        )
+
+    with subtest("httpd is not available any longer after switching to config2"):
+        client.succeed(
+            "${c2System}/bin/switch-to-configuration test >&2",
+            "[[ $(nixos-container run test1 cat /etc/check) == client_c2 ]] >&2",
+            "systemctl status nginx -M test1 >&2",
+        )
+        client.fail("systemctl status httpd -M test1 >&2")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/containers-restart_networking.nix b/nixpkgs/nixos/tests/containers-restart_networking.nix
new file mode 100644
index 000000000000..b50dadd13e47
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-restart_networking.nix
@@ -0,0 +1,115 @@
+# Test for NixOS' container support.
+
+let
+  client_base = {
+    networking.firewall.enable = false;
+
+    containers.webserver = {
+      autoStart = true;
+      privateNetwork = true;
+      hostBridge = "br0";
+      config = {
+        networking.firewall.enable = false;
+        networking.interfaces.eth0.ipv4.addresses = [
+          { address = "192.168.1.122"; prefixLength = 24; }
+        ];
+      };
+    };
+  };
+in import ./make-test-python.nix ({ pkgs, ...} :
+{
+  name = "containers-restart_networking";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ kampfschlaefer ];
+  };
+
+  nodes = {
+    client = { lib, ... }: client_base // {
+      virtualisation.vlans = [ 1 ];
+
+      networking.bridges.br0 = {
+        interfaces = [];
+        rstp = false;
+      };
+      networking.interfaces = {
+        eth1.ipv4.addresses = lib.mkOverride 0 [ ];
+        br0.ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+      };
+
+    };
+    client_eth1 = { lib, ... }: client_base // {
+      networking.bridges.br0 = {
+        interfaces = [ "eth1" ];
+        rstp = false;
+      };
+      networking.interfaces = {
+        eth1.ipv4.addresses = lib.mkOverride 0 [ ];
+        br0.ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
+      };
+    };
+    client_eth1_rstp = { lib, ... }: client_base // {
+      networking.bridges.br0 = {
+        interfaces = [ "eth1" ];
+        rstp = true;
+      };
+      networking.interfaces = {
+        eth1.ipv4.addresses = lib.mkOverride 0 [ ];
+        br0.ipv4.addresses =  [ { address = "192.168.1.2"; prefixLength = 24; } ];
+      };
+    };
+  };
+
+  testScript = {nodes, ...}: let
+    originalSystem = nodes.client.config.system.build.toplevel;
+    eth1_bridged = nodes.client_eth1.config.system.build.toplevel;
+    eth1_rstp = nodes.client_eth1_rstp.config.system.build.toplevel;
+  in ''
+    client.start()
+
+    client.wait_for_unit("default.target")
+
+    with subtest("Initial configuration connectivity check"):
+        client.succeed("ping 192.168.1.122 -c 1 -n >&2")
+        client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2")
+
+        client.fail("ip l show eth1 |grep 'master br0' >&2")
+        client.fail("grep eth1 /run/br0.interfaces >&2")
+
+    with subtest("Bridged configuration without STP preserves connectivity"):
+        client.succeed(
+            "${eth1_bridged}/bin/switch-to-configuration test >&2"
+        )
+
+        client.succeed(
+            "ping 192.168.1.122 -c 1 -n >&2",
+            "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2",
+            "ip l show eth1 |grep 'master br0' >&2",
+            "grep eth1 /run/br0.interfaces >&2",
+        )
+
+    #  activating rstp needs another service, therefore the bridge will restart and the container will lose its connectivity
+    # with subtest("Bridged configuration with STP"):
+    #     client.succeed("${eth1_rstp}/bin/switch-to-configuration test >&2")
+    #     client.execute("ip -4 a >&2")
+    #     client.execute("ip l >&2")
+    #
+    #     client.succeed(
+    #         "ping 192.168.1.122 -c 1 -n >&2",
+    #         "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2",
+    #         "ip l show eth1 |grep 'master br0' >&2",
+    #         "grep eth1 /run/br0.interfaces >&2",
+    #     )
+
+    with subtest("Reverting to initial configuration preserves connectivity"):
+        client.succeed(
+            "${originalSystem}/bin/switch-to-configuration test >&2"
+        )
+
+        client.succeed("ping 192.168.1.122 -c 1 -n >&2")
+        client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2")
+
+        client.fail("ip l show eth1 |grep 'master br0' >&2")
+        client.fail("grep eth1 /run/br0.interfaces >&2")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/containers-tmpfs.nix b/nixpkgs/nixos/tests/containers-tmpfs.nix
new file mode 100644
index 000000000000..171e8f01c7b9
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-tmpfs.nix
@@ -0,0 +1,93 @@
+# Test for NixOS' container support.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "containers-tmpfs";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ kampka ];
+  };
+
+  machine =
+    { pkgs, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation.writableStore = true;
+      virtualisation.memorySize = 768;
+
+      containers.tmpfs =
+        {
+          autoStart = true;
+          tmpfs = [
+            # Mount var as a tmpfs
+            "/var"
+
+            # Add a nested mount inside a tmpfs
+            "/var/log"
+
+            # Add a tmpfs on a path that does not exist
+            "/some/random/path"
+          ];
+          config = { };
+        };
+
+      virtualisation.pathsInNixDB = [ pkgs.stdenv ];
+    };
+
+  testScript = ''
+      machine.wait_for_unit("default.target")
+      assert "tmpfs" in machine.succeed("nixos-container list")
+
+      with subtest("tmpfs container is up"):
+          assert "up" in machine.succeed("nixos-container status tmpfs")
+
+
+      def tmpfs_cmd(command):
+          return f"nixos-container run tmpfs -- {command} 2>/dev/null"
+
+
+      with subtest("/var is mounted as a tmpfs"):
+          machine.succeed(tmpfs_cmd("mountpoint -q /var"))
+
+      with subtest("/var/log is mounted as a tmpfs"):
+          assert "What: tmpfs" in machine.succeed(
+              tmpfs_cmd("systemctl status var-log.mount --no-pager")
+          )
+          machine.succeed(tmpfs_cmd("mountpoint -q /var/log"))
+
+      with subtest("/some/random/path is mounted as a tmpfs"):
+          assert "What: tmpfs" in machine.succeed(
+              tmpfs_cmd("systemctl status some-random-path.mount --no-pager")
+          )
+          machine.succeed(tmpfs_cmd("mountpoint -q /some/random/path"))
+
+      with subtest(
+          "files created in the container in a non-tmpfs directory are visible on the host."
+      ):
+          # This establishes legitimacy for the following tests
+          machine.succeed(
+              tmpfs_cmd("touch /root/test.file"),
+              tmpfs_cmd("ls -l  /root | grep -q test.file"),
+              "test -e /var/lib/containers/tmpfs/root/test.file",
+          )
+
+      with subtest(
+          "/some/random/path is writable and that files created there are not "
+          + "in the hosts container dir but in the tmpfs"
+      ):
+          machine.succeed(
+              tmpfs_cmd("touch /some/random/path/test.file"),
+              tmpfs_cmd("test -e /some/random/path/test.file"),
+          )
+          machine.fail("test -e /var/lib/containers/tmpfs/some/random/path/test.file")
+
+      with subtest(
+          "files created in the hosts container dir in a path where a tmpfs "
+          + "file system has been mounted are not visible to the container as "
+          + "the do not exist in the tmpfs"
+      ):
+          machine.succeed(
+              "touch /var/lib/containers/tmpfs/var/test.file",
+              "test -e /var/lib/containers/tmpfs/var/test.file",
+              "ls -l /var/lib/containers/tmpfs/var/ | grep -q test.file 2>/dev/null",
+          )
+          machine.fail(tmpfs_cmd("ls -l /var | grep -q test.file"))
+    '';
+})
diff --git a/nixpkgs/nixos/tests/corerad.nix b/nixpkgs/nixos/tests/corerad.nix
new file mode 100644
index 000000000000..741fa448f680
--- /dev/null
+++ b/nixpkgs/nixos/tests/corerad.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix (
+  {
+    nodes = {
+      router = {config, pkgs, ...}: { 
+        config = {
+          # This machines simulates a router with IPv6 forwarding and a static IPv6 address.
+          boot.kernel.sysctl = {
+            "net.ipv6.conf.all.forwarding" = true;
+          };
+          networking.interfaces.eth1 = {
+            ipv6.addresses = [ { address = "fd00:dead:beef:dead::1"; prefixLength = 64; } ];
+          };
+          services.corerad = {
+            enable = true;
+            # Serve router advertisements to the client machine with prefix information matching
+            # any IPv6 /64 prefixes configured on this interface.
+            configFile = pkgs.writeText "corerad.toml" ''
+              [[interfaces]]
+              name = "eth1"
+              advertise = true
+                [[interfaces.prefix]]
+                prefix = "::/64"
+            '';
+          };
+        };
+      };
+      client = {config, pkgs, ...}: {
+        # Use IPv6 SLAAC from router advertisements, and install rdisc6 so we can
+        # trigger one immediately.
+        config = {
+          boot.kernel.sysctl = {
+            "net.ipv6.conf.all.autoconf" = true;
+          };
+          environment.systemPackages = with pkgs; [
+            ndisc6
+          ];
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      with subtest("Wait for CoreRAD and network ready"):
+          # Ensure networking is online and CoreRAD is ready.
+          router.wait_for_unit("network-online.target")
+          client.wait_for_unit("network-online.target")
+          router.wait_for_unit("corerad.service")
+
+          # Ensure the client can reach the router.
+          client.wait_until_succeeds("ping -c 1 fd00:dead:beef:dead::1")
+
+      with subtest("Verify SLAAC on client"):
+          # Trigger a router solicitation and verify a SLAAC address is assigned from
+          # the prefix configured on the router.
+          client.wait_until_succeeds("rdisc6 -1 -r 10 eth1")
+          client.wait_until_succeeds(
+              "ip -6 addr show dev eth1 | grep -q 'fd00:dead:beef:dead:'"
+          )
+
+          addrs = client.succeed("ip -6 addr show dev eth1")
+
+          assert (
+              "fd00:dead:beef:dead:" in addrs
+          ), "SLAAC prefix was not found in client addresses after router advertisement"
+          assert (
+              "/64 scope global temporary" in addrs
+          ), "SLAAC temporary address was not configured on client after router advertisement"
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/couchdb.nix b/nixpkgs/nixos/tests/couchdb.nix
new file mode 100644
index 000000000000..10e95701acdb
--- /dev/null
+++ b/nixpkgs/nixos/tests/couchdb.nix
@@ -0,0 +1,76 @@
+import ./make-test-python.nix ({ pkgs, lib, ...}:
+
+with lib;
+
+{
+  name = "couchdb";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ fpletz ];
+  };
+
+  nodes = {
+    couchdb1 =
+      { pkgs, ... }:
+
+      { environment.systemPackages = with pkgs; [ jq ];
+        services.couchdb.enable = true;
+      };
+
+    couchdb2 =
+      { pkgs, ... }:
+
+      { environment.systemPackages = with pkgs; [ jq ];
+        services.couchdb.enable = true;
+        services.couchdb.package = pkgs.couchdb2;
+      };
+  };
+
+  testScript = let
+    curlJqCheck = action: path: jqexpr: result:
+      pkgs.writeScript "curl-jq-check-${action}-${path}.sh" ''
+        RESULT=$(curl -X ${action} http://127.0.0.1:5984/${path} | jq -r '${jqexpr}')
+        echo $RESULT >&2
+        if [ "$RESULT" != "${result}" ]; then
+          exit 1
+        fi
+      '';
+  in ''
+    start_all()
+
+    couchdb1.wait_for_unit("couchdb.service")
+    couchdb1.wait_until_succeeds(
+        "${curlJqCheck "GET" "" ".couchdb" "Welcome"}"
+    )
+    couchdb1.wait_until_succeeds(
+        "${curlJqCheck "GET" "_all_dbs" ". | length" "2"}"
+    )
+    couchdb1.succeed("${curlJqCheck "PUT" "foo" ".ok" "true"}")
+    couchdb1.succeed(
+        "${curlJqCheck "GET" "_all_dbs" ". | length" "3"}"
+    )
+    couchdb1.succeed(
+        "${curlJqCheck "DELETE" "foo" ".ok" "true"}"
+    )
+    couchdb1.succeed(
+        "${curlJqCheck "GET" "_all_dbs" ". | length" "2"}"
+    )
+
+    couchdb2.wait_for_unit("couchdb.service")
+    couchdb2.wait_until_succeeds(
+        "${curlJqCheck "GET" "" ".couchdb" "Welcome"}"
+    )
+    couchdb2.wait_until_succeeds(
+        "${curlJqCheck "GET" "_all_dbs" ". | length" "0"}"
+    )
+    couchdb2.succeed("${curlJqCheck "PUT" "foo" ".ok" "true"}")
+    couchdb2.succeed(
+        "${curlJqCheck "GET" "_all_dbs" ". | length" "1"}"
+    )
+    couchdb2.succeed(
+        "${curlJqCheck "DELETE" "foo" ".ok" "true"}"
+    )
+    couchdb2.succeed(
+        "${curlJqCheck "GET" "_all_dbs" ". | length" "0"}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/deluge.nix b/nixpkgs/nixos/tests/deluge.nix
new file mode 100644
index 000000000000..3cf179a38216
--- /dev/null
+++ b/nixpkgs/nixos/tests/deluge.nix
@@ -0,0 +1,107 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "deluge";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ flokli ];
+  };
+
+  nodes = {
+    simple1 = {
+      services.deluge = {
+        enable = true;
+        package = pkgs.deluge-1_x;
+        web = {
+          enable = true;
+          openFirewall = true;
+        };
+      };
+    };
+
+    declarative1 = {
+      services.deluge = {
+        enable = true;
+        package = pkgs.deluge-1_x;
+        openFirewall = true;
+        declarative = true;
+        config = {
+          allow_remote = true;
+          download_location = "/var/lib/deluge/my-download";
+          daemon_port = 58846;
+          listen_ports = [ 6881 6889 ];
+        };
+        web = {
+          enable = true;
+          port =  3142;
+        };
+        authFile = pkgs.writeText "deluge-auth" ''
+          localclient:a7bef72a890:10
+          andrew:password:10
+          user3:anotherpass:5
+        '';
+      };
+    };
+
+    simple2 = {
+      services.deluge = {
+        enable = true;
+        package = pkgs.deluge-2_x;
+        web = {
+          enable = true;
+          openFirewall = true;
+        };
+      };
+    };
+
+    declarative2 = {
+      services.deluge = {
+        enable = true;
+        package = pkgs.deluge-2_x;
+        openFirewall = true;
+        declarative = true;
+        config = {
+          allow_remote = true;
+          download_location = "/var/lib/deluge/my-download";
+          daemon_port = 58846;
+          listen_ports = [ 6881 6889 ];
+        };
+        web = {
+          enable = true;
+          port =  3142;
+        };
+        authFile = pkgs.writeText "deluge-auth" ''
+          localclient:a7bef72a890:10
+          andrew:password:10
+          user3:anotherpass:5
+        '';
+      };
+    };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    simple1.wait_for_unit("deluged")
+    simple2.wait_for_unit("deluged")
+    simple1.wait_for_unit("delugeweb")
+    simple2.wait_for_unit("delugeweb")
+    simple1.wait_for_open_port("8112")
+    simple2.wait_for_open_port("8112")
+    declarative1.wait_for_unit("network.target")
+    declarative2.wait_for_unit("network.target")
+    declarative1.wait_until_succeeds("curl --fail http://simple1:8112")
+    declarative2.wait_until_succeeds("curl --fail http://simple2:8112")
+
+    declarative1.wait_for_unit("deluged")
+    declarative2.wait_for_unit("deluged")
+    declarative1.wait_for_unit("delugeweb")
+    declarative2.wait_for_unit("delugeweb")
+    declarative1.wait_until_succeeds("curl --fail http://declarative1:3142")
+    declarative2.wait_until_succeeds("curl --fail http://declarative2:3142")
+    declarative1.succeed(
+        "deluge-console 'connect 127.0.0.1:58846 andrew password; help' | grep -q 'rm.*Remove a torrent'"
+    )
+    declarative2.succeed(
+        "deluge-console 'connect 127.0.0.1:58846 andrew password; help' | grep -q 'rm.*Remove a torrent'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dhparams.nix b/nixpkgs/nixos/tests/dhparams.nix
new file mode 100644
index 000000000000..a0de2911777c
--- /dev/null
+++ b/nixpkgs/nixos/tests/dhparams.nix
@@ -0,0 +1,142 @@
+let
+  common = { pkgs, ... }: {
+    security.dhparams.enable = true;
+    environment.systemPackages = [ pkgs.openssl ];
+  };
+
+in import ./make-test-python.nix {
+  name = "dhparams";
+
+  nodes.generation1 = { pkgs, config, ... }: {
+    imports = [ common ];
+    security.dhparams.params = {
+      # Use low values here because we don't want the test to run for ages.
+      foo.bits = 16;
+      # Also use the old format to make sure the type is coerced in the right
+      # way.
+      bar = 17;
+    };
+
+    systemd.services.foo = {
+      description = "Check systemd Ordering";
+      wantedBy = [ "multi-user.target" ];
+      unitConfig = {
+        # This is to make sure that the dhparams generation of foo occurs
+        # before this service so we need this service to start as early as
+        # possible to provoke a race condition.
+        DefaultDependencies = false;
+
+        # We check later whether the service has been started or not.
+        ConditionPathExists = config.security.dhparams.params.foo.path;
+      };
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      # The reason we only provide an ExecStop here is to ensure that we don't
+      # accidentally trigger an error because a file system is not yet ready
+      # during very early startup (we might not even have the Nix store
+      # available, for example if future changes in NixOS use systemd mount
+      # units to do early file system initialisation).
+      serviceConfig.ExecStop = "${pkgs.coreutils}/bin/true";
+    };
+  };
+
+  nodes.generation2 = {
+    imports = [ common ];
+    security.dhparams.params.foo.bits = 18;
+  };
+
+  nodes.generation3 = common;
+
+  nodes.generation4 = {
+    imports = [ common ];
+    security.dhparams.stateful = false;
+    security.dhparams.params.foo2.bits = 18;
+    security.dhparams.params.bar2.bits = 19;
+  };
+
+  nodes.generation5 = {
+    imports = [ common ];
+    security.dhparams.defaultBitSize = 30;
+    security.dhparams.params.foo3 = {};
+    security.dhparams.params.bar3 = {};
+  };
+
+  testScript = { nodes, ... }: let
+    getParamPath = gen: name: let
+      node = "generation${toString gen}";
+    in nodes.${node}.config.security.dhparams.params.${name}.path;
+
+    switchToGeneration = gen: let
+      node = "generation${toString gen}";
+      inherit (nodes.${node}.config.system.build) toplevel;
+      switchCmd = "${toplevel}/bin/switch-to-configuration test";
+    in ''
+      with machine.nested("switch to generation ${toString gen}"):
+          machine.succeed(
+              "${switchCmd}"
+          )
+          machine = ${node}
+    '';
+
+  in ''
+    import re
+
+
+    def assert_param_bits(path, bits):
+        with machine.nested(f"check bit size of {path}"):
+            output = machine.succeed(f"openssl dhparam -in {path} -text")
+            pattern = re.compile(r"^\s*DH Parameters:\s+\((\d+)\s+bit\)\s*$", re.M)
+            match = pattern.match(output)
+            if match is None:
+                raise Exception("bla")
+            if match[1] != str(bits):
+                raise Exception(f"bit size should be {bits} but it is {match[1]} instead.")
+
+
+    machine = generation1
+
+    machine.wait_for_unit("multi-user.target")
+
+    with subtest("verify startup order"):
+        machine.succeed("systemctl is-active foo.service")
+
+    with subtest("check bit sizes of dhparam files"):
+        assert_param_bits("${getParamPath 1 "foo"}", 16)
+        assert_param_bits("${getParamPath 1 "bar"}", 17)
+
+    ${switchToGeneration 2}
+
+    with subtest("check whether bit size has changed"):
+        assert_param_bits("${getParamPath 2 "foo"}", 18)
+
+    with subtest("ensure that dhparams file for 'bar' was deleted"):
+        machine.fail("test -e ${getParamPath 1 "bar"}")
+
+    ${switchToGeneration 3}
+
+    with subtest("ensure that 'security.dhparams.path' has been deleted"):
+        machine.fail("test -e ${nodes.generation3.config.security.dhparams.path}")
+
+    ${switchToGeneration 4}
+
+    with subtest("check bit sizes dhparam files"):
+        assert_param_bits(
+            "${getParamPath 4 "foo2"}", 18
+        )
+        assert_param_bits(
+            "${getParamPath 4 "bar2"}", 19
+        )
+
+    with subtest("check whether dhparam files are in the Nix store"):
+        machine.succeed(
+            "expr match ${getParamPath 4 "foo2"} ${builtins.storeDir}",
+            "expr match ${getParamPath 4 "bar2"} ${builtins.storeDir}",
+        )
+
+    ${switchToGeneration 5}
+
+    with subtest("check whether defaultBitSize works as intended"):
+        assert_param_bits("${getParamPath 5 "foo3"}", 30)
+        assert_param_bits("${getParamPath 5 "bar3"}", 30)
+  '';
+}
diff --git a/nixpkgs/nixos/tests/dnscrypt-proxy2.nix b/nixpkgs/nixos/tests/dnscrypt-proxy2.nix
new file mode 100644
index 000000000000..b614d912a9f4
--- /dev/null
+++ b/nixpkgs/nixos/tests/dnscrypt-proxy2.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "dnscrypt-proxy2";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ joachifm ];
+  };
+
+  nodes = {
+    # A client running the recommended setup: DNSCrypt proxy as a forwarder
+    # for a caching DNS client.
+    client =
+    { ... }:
+    let localProxyPort = 43; in
+    {
+      security.apparmor.enable = true;
+
+      services.dnscrypt-proxy2.enable = true;
+      services.dnscrypt-proxy2.settings = {
+        listen_addresses = [ "127.0.0.1:${toString localProxyPort}" ];
+        sources.public-resolvers = {
+          urls = [ "https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md" ];
+          cache_file = "public-resolvers.md";
+          minisign_key = "RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3";
+          refresh_delay = 72;
+        };
+      };
+
+      services.dnsmasq.enable = true;
+      services.dnsmasq.servers = [ "127.0.0.1#${toString localProxyPort}" ];
+    };
+  };
+
+  testScript = ''
+    client.wait_for_unit("dnsmasq")
+    client.wait_for_unit("dnscrypt-proxy2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/docker-containers.nix b/nixpkgs/nixos/tests/docker-containers.nix
new file mode 100644
index 000000000000..0e318a52d9f1
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-containers.nix
@@ -0,0 +1,27 @@
+# Test Docker containers as systemd units
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "docker-containers";
+  meta = {
+    maintainers = with lib.maintainers; [ benley mkaito ];
+  };
+
+  nodes = {
+    docker = { pkgs, ... }: {
+      virtualisation.docker.enable = true;
+
+      docker-containers.nginx = {
+        image = "nginx-container";
+        imageFile = pkgs.dockerTools.examples.nginx;
+        ports = ["8181:80"];
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    docker.wait_for_unit("docker-nginx.service")
+    docker.wait_for_open_port(8181)
+    docker.wait_until_succeeds("curl http://localhost:8181 | grep Hello")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/docker-edge.nix b/nixpkgs/nixos/tests/docker-edge.nix
new file mode 100644
index 000000000000..96de885a554a
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-edge.nix
@@ -0,0 +1,49 @@
+# This test runs docker and checks if simple container starts
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "docker";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ nequissimus offline ];
+  };
+
+  nodes = {
+    docker =
+      { pkgs, ... }:
+        {
+          virtualisation.docker.enable = true;
+          virtualisation.docker.package = pkgs.docker-edge;
+
+          users.users = {
+            noprivs = {
+              isNormalUser = true;
+              description = "Can't access the docker daemon";
+              password = "foobar";
+            };
+
+            hasprivs = {
+              isNormalUser = true;
+              description = "Can access the docker daemon";
+              password = "foobar";
+              extraGroups = [ "docker" ];
+            };
+          };
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    docker.wait_for_unit("sockets.target")
+    docker.succeed("tar cv --files-from /dev/null | docker import - scratchimg")
+    docker.succeed(
+        "docker run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+    )
+    docker.succeed("docker ps | grep sleeping")
+    docker.succeed("sudo -u hasprivs docker ps")
+    docker.fail("sudo -u noprivs docker ps")
+    docker.succeed("docker stop sleeping")
+
+    # Must match version twice to ensure client and server versions are correct
+    docker.succeed('[ $(docker version | grep ${pkgs.docker-edge.version} | wc -l) = "2" ]')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/docker-preloader.nix b/nixpkgs/nixos/tests/docker-preloader.nix
new file mode 100644
index 000000000000..eeedec9a392e
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-preloader.nix
@@ -0,0 +1,27 @@
+import ./make-test.nix ({ pkgs, ...} : {
+  name = "docker-preloader";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lewo ];
+  };
+
+  nodes = {
+    docker =
+      { pkgs, ... }:
+        {
+          virtualisation.docker.enable = true;
+          virtualisation.dockerPreloader.images = [ pkgs.dockerTools.examples.nix pkgs.dockerTools.examples.bash ];
+
+          services.openssh.enable = true;
+          services.openssh.permitRootLogin = "yes";
+          services.openssh.extraConfig = "PermitEmptyPasswords yes";
+          users.extraUsers.root.password = "";
+        };
+  };    
+  testScript = ''
+    startAll;
+    
+    $docker->waitForUnit("sockets.target");
+    $docker->succeed("docker run nix nix-store --version");
+    $docker->succeed("docker run bash bash --version");
+  '';
+})
diff --git a/nixpkgs/nixos/tests/docker-registry.nix b/nixpkgs/nixos/tests/docker-registry.nix
new file mode 100644
index 000000000000..2928fd8141a4
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-registry.nix
@@ -0,0 +1,61 @@
+# This test runs docker-registry and check if it works
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "docker-registry";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ globin ma27 ironpinguin ];
+  };
+
+  nodes = {
+    registry = { ... }: {
+      services.dockerRegistry.enable = true;
+      services.dockerRegistry.enableDelete = true;
+      services.dockerRegistry.port = 8080;
+      services.dockerRegistry.listenAddress = "0.0.0.0";
+      services.dockerRegistry.enableGarbageCollect = true;
+      networking.firewall.allowedTCPPorts = [ 8080 ];
+    };
+
+    client1 = { ... }: {
+      virtualisation.docker.enable = true;
+      virtualisation.docker.extraOptions = "--insecure-registry registry:8080";
+    };
+
+    client2 = { ... }: {
+      virtualisation.docker.enable = true;
+      virtualisation.docker.extraOptions = "--insecure-registry registry:8080";
+    };
+  };
+
+  testScript = ''
+    client1.start()
+    client1.wait_for_unit("docker.service")
+    client1.succeed("tar cv --files-from /dev/null | docker import - scratch")
+    client1.succeed("docker tag scratch registry:8080/scratch")
+
+    registry.start()
+    registry.wait_for_unit("docker-registry.service")
+    registry.wait_for_open_port("8080")
+    client1.succeed("docker push registry:8080/scratch")
+
+    client2.start()
+    client2.wait_for_unit("docker.service")
+    client2.succeed("docker pull registry:8080/scratch")
+    client2.succeed("docker images | grep scratch")
+
+    client2.succeed(
+        "curl -fsS -X DELETE registry:8080/v2/scratch/manifests/$(curl -fsS -I -H\"Accept: application/vnd.docker.distribution.manifest.v2+json\" registry:8080/v2/scratch/manifests/latest | grep Docker-Content-Digest | sed -e 's/Docker-Content-Digest: //' | tr -d '\\r')"
+    )
+
+    registry.systemctl("start docker-registry-garbage-collect.service")
+    registry.wait_until_fails("systemctl status docker-registry-garbage-collect.service")
+    registry.wait_for_unit("docker-registry.service")
+
+    registry.fail("ls -l /var/lib/docker-registry/docker/registry/v2/blobs/sha256/*/*/data")
+
+    client1.succeed("docker push registry:8080/scratch")
+    registry.succeed(
+        "ls -l /var/lib/docker-registry/docker/registry/v2/blobs/sha256/*/*/data"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/docker-tools-overlay.nix b/nixpkgs/nixos/tests/docker-tools-overlay.nix
new file mode 100644
index 000000000000..1a0e0ea67750
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-tools-overlay.nix
@@ -0,0 +1,33 @@
+# this test creates a simple GNU image with docker tools and sees if it executes
+
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "docker-tools-overlay";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lnl7 ];
+  };
+
+  nodes = {
+    docker =
+      { ... }:
+      {
+        virtualisation.docker.enable = true;
+        virtualisation.docker.storageDriver = "overlay";  # defaults to overlay2
+      };
+  };
+
+  testScript = ''
+      docker.wait_for_unit("sockets.target")
+
+      docker.succeed(
+          "docker load --input='${pkgs.dockerTools.examples.bash}'",
+          "docker run --rm ${pkgs.dockerTools.examples.bash.imageName} bash --version",
+      )
+
+      # Check if the nix store has correct user permissions depending on what
+      # storage driver is used, incorrectly built images can show up as readonly.
+      # drw-------  3 0 0   3 Apr 14 11:36 /nix
+      # drw------- 99 0 0 100 Apr 14 11:36 /nix/store
+      docker.succeed("docker run --rm -u 1000:1000 ${pkgs.dockerTools.examples.bash.imageName} bash --version")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/docker-tools.nix b/nixpkgs/nixos/tests/docker-tools.nix
new file mode 100644
index 000000000000..51b472fcf9ce
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-tools.nix
@@ -0,0 +1,158 @@
+# this test creates a simple GNU image with docker tools and sees if it executes
+
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "docker-tools";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lnl7 ];
+  };
+
+  nodes = {
+    docker = { ... }: {
+      virtualisation = {
+        diskSize = 2048;
+        docker.enable = true;
+      };
+    };
+  };
+
+  testScript = with pkgs.dockerTools; ''
+    unix_time_second1 = "1970-01-01T00:00:01Z"
+
+    docker.wait_for_unit("sockets.target")
+
+    with subtest("Ensure Docker images use a stable date by default"):
+        docker.succeed(
+            "docker load --input='${examples.bash}'"
+        )
+        assert unix_time_second1 in docker.succeed(
+            "docker inspect ${examples.bash.imageName} "
+            + "| ${pkgs.jq}/bin/jq -r .[].Created",
+        )
+
+    docker.succeed("docker run --rm ${examples.bash.imageName} bash --version")
+    docker.succeed("docker rmi ${examples.bash.imageName}")
+
+    with subtest(
+        "Check if the nix store is correctly initialized by listing "
+        "dependencies of the installed Nix binary"
+    ):
+        docker.succeed(
+            "docker load --input='${examples.nix}'",
+            "docker run --rm ${examples.nix.imageName} nix-store -qR ${pkgs.nix}",
+            "docker rmi ${examples.nix.imageName}",
+        )
+
+    with subtest("The pullImage tool works"):
+        docker.succeed(
+            "docker load --input='${examples.nixFromDockerHub}'",
+            "docker run --rm nix:2.2.1 nix-store --version",
+            "docker rmi nix:2.2.1",
+        )
+
+    with subtest("runAsRoot and entry point work"):
+        docker.succeed(
+            "docker load --input='${examples.nginx}'",
+            "docker run --name nginx -d -p 8000:80 ${examples.nginx.imageName}",
+        )
+        docker.wait_until_succeeds("curl http://localhost:8000/")
+        docker.succeed(
+            "docker rm --force nginx", "docker rmi '${examples.nginx.imageName}'",
+        )
+
+    with subtest("A pulled image can be used as base image"):
+        docker.succeed(
+            "docker load --input='${examples.onTopOfPulledImage}'",
+            "docker run --rm ontopofpulledimage hello",
+            "docker rmi ontopofpulledimage",
+        )
+
+    with subtest("Regression test for issue #34779"):
+        docker.succeed(
+            "docker load --input='${examples.runAsRootExtraCommands}'",
+            "docker run --rm runasrootextracommands cat extraCommands",
+            "docker run --rm runasrootextracommands cat runAsRoot",
+            "docker rmi '${examples.runAsRootExtraCommands.imageName}'",
+        )
+
+    with subtest("Ensure Docker images can use an unstable date"):
+        docker.succeed(
+            "docker load --input='${examples.bash}'"
+        )
+        assert unix_time_second1 not in docker.succeed(
+            "docker inspect ${examples.unstableDate.imageName} "
+            + "| ${pkgs.jq}/bin/jq -r .[].Created"
+        )
+
+    with subtest("Ensure Layered Docker images work"):
+        docker.succeed(
+            "docker load --input='${examples.layered-image}'",
+            "docker run --rm ${examples.layered-image.imageName}",
+            "docker run --rm ${examples.layered-image.imageName} cat extraCommands",
+        )
+
+    with subtest("Ensure building an image on top of a layered Docker images work"):
+        docker.succeed(
+            "docker load --input='${examples.layered-on-top}'",
+            "docker run --rm ${examples.layered-on-top.imageName}",
+        )
+
+
+    def set_of_layers(image_name):
+        return set(
+            docker.succeed(
+                f"docker inspect {image_name} "
+                + "| ${pkgs.jq}/bin/jq -r '.[] | .RootFS.Layers | .[]'"
+            ).split()
+        )
+
+
+    with subtest("Ensure layers are shared between images"):
+        docker.succeed(
+            "docker load --input='${examples.another-layered-image}'"
+        )
+        layers1 = set_of_layers("${examples.layered-image.imageName}")
+        layers2 = set_of_layers("${examples.another-layered-image.imageName}")
+        assert bool(layers1 & layers2)
+
+    with subtest("Ensure order of layers is correct"):
+        docker.succeed(
+            "docker load --input='${examples.layersOrder}'"
+        )
+
+        for index in 1, 2, 3:
+            assert f"layer{index}" in docker.succeed(
+                f"docker run --rm  ${examples.layersOrder.imageName} cat /tmp/layer{index}"
+            )
+
+    with subtest("Ensure image with only 2 layers can be loaded"):
+        docker.succeed(
+            "docker load --input='${examples.two-layered-image}'"
+        )
+
+    with subtest(
+        "Ensure the bulk layer doesn't miss store paths (regression test for #78744)"
+    ):
+        docker.succeed(
+            "docker load --input='${pkgs.dockerTools.examples.bulk-layer}'",
+            # Ensure the two output paths (ls and hello) are in the layer
+            "docker run bulk-layer ls /bin/hello",
+        )
+
+    with subtest("Ensure correct behavior when no store is needed"):
+        # This check tests two requirements simultaneously
+        #  1. buildLayeredImage can build images that don't need a store.
+        #  2. Layers of symlinks are eliminated by the customization layer.
+        #
+        docker.succeed(
+            "docker load --input='${pkgs.dockerTools.examples.no-store-paths}'"
+        )
+
+        # Busybox will not recognize argv[0] and print an error message with argv[0],
+        # but it confirms that the custom-true symlink is present.
+        docker.succeed("docker run --rm no-store-paths custom-true |& grep custom-true")
+
+        # This check may be loosened to allow an *empty* store rather than *no* store.
+        docker.succeed("docker run --rm no-store-paths ls /")
+        docker.fail("docker run --rm no-store-paths ls /nix/store")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/docker.nix b/nixpkgs/nixos/tests/docker.nix
new file mode 100644
index 000000000000..8fda7c1395ef
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker.nix
@@ -0,0 +1,49 @@
+# This test runs docker and checks if simple container starts
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "docker";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ nequissimus offline ];
+  };
+
+  nodes = {
+    docker =
+      { pkgs, ... }:
+        {
+          virtualisation.docker.enable = true;
+          virtualisation.docker.package = pkgs.docker;
+
+          users.users = {
+            noprivs = {
+              isNormalUser = true;
+              description = "Can't access the docker daemon";
+              password = "foobar";
+            };
+
+            hasprivs = {
+              isNormalUser = true;
+              description = "Can access the docker daemon";
+              password = "foobar";
+              extraGroups = [ "docker" ];
+            };
+          };
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    docker.wait_for_unit("sockets.target")
+    docker.succeed("tar cv --files-from /dev/null | docker import - scratchimg")
+    docker.succeed(
+        "docker run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+    )
+    docker.succeed("docker ps | grep sleeping")
+    docker.succeed("sudo -u hasprivs docker ps")
+    docker.fail("sudo -u noprivs docker ps")
+    docker.succeed("docker stop sleeping")
+
+    # Must match version twice to ensure client and server versions are correct
+    docker.succeed('[ $(docker version | grep ${pkgs.docker.version} | wc -l) = "2" ]')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/documize.nix b/nixpkgs/nixos/tests/documize.nix
new file mode 100644
index 000000000000..3be20a780d31
--- /dev/null
+++ b/nixpkgs/nixos/tests/documize.nix
@@ -0,0 +1,62 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "documize";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ma27 ];
+  };
+
+  machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.jq ];
+
+    services.documize = {
+      enable = true;
+      port = 3000;
+      dbtype = "postgresql";
+      db = "host=localhost port=5432 sslmode=disable user=documize password=documize dbname=documize";
+    };
+
+    systemd.services.documize-server = {
+      after = [ "postgresql.service" ];
+      requires = [ "postgresql.service" ];
+    };
+
+    services.postgresql = {
+      enable = true;
+      initialScript = pkgs.writeText "psql-init" ''
+        CREATE ROLE documize WITH LOGIN PASSWORD 'documize';
+        CREATE DATABASE documize WITH OWNER documize;
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("documize-server.service")
+    machine.wait_for_open_port(3000)
+
+    dbhash = machine.succeed(
+        "curl -f localhost:3000 | grep 'property=\"dbhash' | grep -Po 'content=\"\\K[^\"]*'"
+    )
+
+    dbhash = dbhash.strip()
+
+    machine.succeed(
+        (
+            "curl -X POST"
+            " --data 'dbname=documize'"
+            " --data 'dbhash={}'"
+            " --data 'title=NixOS'"
+            " --data 'message=Docs'"
+            " --data 'firstname=John'"
+            " --data 'lastname=Doe'"
+            " --data 'email=john.doe@nixos.org'"
+            " --data 'password=verysafe'"
+            " -f localhost:3000/api/setup"
+        ).format(dbhash)
+    )
+
+    machine.succeed(
+        'test "$(curl -f localhost:3000/api/public/meta | jq ".title" | xargs echo)" = "NixOS"'
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dokuwiki.nix b/nixpkgs/nixos/tests/dokuwiki.nix
new file mode 100644
index 000000000000..2b907133ed5a
--- /dev/null
+++ b/nixpkgs/nixos/tests/dokuwiki.nix
@@ -0,0 +1,74 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  template-bootstrap3 = pkgs.stdenv.mkDerivation {
+    name = "bootstrap3";
+    # Download the theme from the dokuwiki site
+    src = pkgs.fetchurl {
+      url = https://github.com/giterlizzi/dokuwiki-template-bootstrap3/archive/v2019-05-22.zip;
+      sha256 = "4de5ff31d54dd61bbccaf092c9e74c1af3a4c53e07aa59f60457a8f00cfb23a6";
+    };
+    # We need unzip to build this package
+    buildInputs = [ pkgs.unzip ];
+    # Installing simply means copying all files to the output directory
+    installPhase = "mkdir -p $out; cp -R * $out/";
+  };
+
+
+  # Let's package the icalevents plugin
+  plugin-icalevents = pkgs.stdenv.mkDerivation {
+    name = "icalevents";
+    # Download the plugin from the dokuwiki site
+    src = pkgs.fetchurl {
+      url = https://github.com/real-or-random/dokuwiki-plugin-icalevents/releases/download/2017-06-16/dokuwiki-plugin-icalevents-2017-06-16.zip;
+      sha256 = "e40ed7dd6bbe7fe3363bbbecb4de481d5e42385b5a0f62f6a6ce6bf3a1f9dfa8";
+    };
+    # We need unzip to build this package
+    buildInputs = [ pkgs.unzip ];
+    sourceRoot = ".";
+    # Installing simply means copying all files to the output directory
+    installPhase = "mkdir -p $out; cp -R * $out/";
+  };
+
+in {
+  name = "dokuwiki";
+  meta.maintainers = with pkgs.lib.maintainers; [ "1000101" ];
+
+  machine = { ... }: {
+    services.dokuwiki."site1.local" = {
+      aclUse = false;
+      superUser = "admin";
+      nginx = {
+        forceSSL = false;
+        enableACME = false;
+      };
+    };
+    services.dokuwiki."site2.local" = {
+      aclUse = true;
+      superUser = "admin";
+      nginx = {
+        forceSSL = false;
+        enableACME = false;
+      };
+      templates = [ template-bootstrap3 ];
+      plugins = [ plugin-icalevents ];
+    };
+    networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+  };
+
+  testScript = ''
+    site_names = ["site1.local", "site2.local"]
+
+    start_all()
+
+    machine.wait_for_unit("phpfpm-dokuwiki-site1.local.service")
+    machine.wait_for_unit("phpfpm-dokuwiki-site2.local.service")
+
+    machine.wait_for_unit("nginx.service")
+
+    machine.wait_for_open_port(80)
+
+    machine.succeed("curl -sSfL http://site1.local/ | grep 'DokuWiki'")
+    machine.succeed("curl -sSfL http://site2.local/ | grep 'DokuWiki'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dovecot.nix b/nixpkgs/nixos/tests/dovecot.nix
new file mode 100644
index 000000000000..bcbe234fd805
--- /dev/null
+++ b/nixpkgs/nixos/tests/dovecot.nix
@@ -0,0 +1,77 @@
+import ./make-test-python.nix {
+  name = "dovecot";
+
+  machine = { pkgs, ... }: {
+    imports = [ common/user-account.nix ];
+    services.postfix.enable = true;
+    services.dovecot2.enable = true;
+    services.dovecot2.protocols = [ "imap" "pop3" ];
+    environment.systemPackages = let
+      sendTestMail = pkgs.writeScriptBin "send-testmail" ''
+        #!${pkgs.runtimeShell}
+        exec sendmail -vt <<MAIL
+        From: root@localhost
+        To: alice@localhost
+        Subject: Very important!
+
+        Hello world!
+        MAIL
+      '';
+
+      sendTestMailViaDeliveryAgent = pkgs.writeScriptBin "send-lda" ''
+        #!${pkgs.runtimeShell}
+
+        exec ${pkgs.dovecot}/libexec/dovecot/deliver -d bob <<MAIL
+        From: root@localhost
+        To: bob@localhost
+        Subject: Something else...
+
+        I'm running short of ideas!
+        MAIL
+      '';
+
+      testImap = pkgs.writeScriptBin "test-imap" ''
+        #!${pkgs.python3.interpreter}
+        import imaplib
+
+        with imaplib.IMAP4('localhost') as imap:
+          imap.login('alice', 'foobar')
+          imap.select()
+          status, refs = imap.search(None, 'ALL')
+          assert status == 'OK'
+          assert len(refs) == 1
+          status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+          assert status == 'OK'
+          assert msg[0][1].strip() == b'Hello world!'
+      '';
+
+      testPop = pkgs.writeScriptBin "test-pop" ''
+        #!${pkgs.python3.interpreter}
+        import poplib
+
+        pop = poplib.POP3('localhost')
+        try:
+          pop.user('bob')
+          pop.pass_('foobar')
+          assert len(pop.list()[1]) == 1
+          status, fullmail, size = pop.retr(1)
+          assert status.startswith(b'+OK ')
+          body = b"".join(fullmail[fullmail.index(b""):]).strip()
+          assert body == b"I'm running short of ideas!"
+        finally:
+          pop.quit()
+      '';
+
+    in [ sendTestMail sendTestMailViaDeliveryAgent testImap testPop ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("postfix.service")
+    machine.wait_for_unit("dovecot2.service")
+    machine.succeed("send-testmail")
+    machine.succeed("send-lda")
+    machine.wait_until_fails('[ "$(postqueue -p)" != "Mail queue is empty" ]')
+    machine.succeed("test-imap")
+    machine.succeed("test-pop")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/ec2.nix b/nixpkgs/nixos/tests/ec2.nix
new file mode 100644
index 000000000000..6aeeb17ba31a
--- /dev/null
+++ b/nixpkgs/nixos/tests/ec2.nix
@@ -0,0 +1,149 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
+
+with import common/ec2.nix { inherit makeTest pkgs; };
+
+let
+  imageCfg =
+    (import ../lib/eval-config.nix {
+      inherit system;
+      modules = [
+        ../maintainers/scripts/ec2/amazon-image.nix
+        ../modules/testing/test-instrumentation.nix
+        ../modules/profiles/qemu-guest.nix
+        { ec2.hvm = true;
+
+          # Hack to make the partition resizing work in QEMU.
+          boot.initrd.postDeviceCommands = mkBefore
+            ''
+              ln -s vda /dev/xvda
+              ln -s vda1 /dev/xvda1
+            '';
+
+          # Needed by nixos-rebuild due to the lack of network
+          # access. Determined by trial and error.
+          system.extraDependencies =
+            with pkgs; (
+              [
+                # Needed for a nixos-rebuild.
+                busybox
+                stdenv
+                stdenvNoCC
+                mkinitcpio-nfs-utils
+                unionfs-fuse
+                cloud-utils
+                desktop-file-utils
+                texinfo
+                libxslt.bin
+                xorg.lndir
+
+                # These are used in the configure-from-userdata tests
+                # for EC2. Httpd and valgrind are requested by the
+                # configuration.
+                apacheHttpd apacheHttpd.doc apacheHttpd.man valgrind.doc
+              ]
+            );
+        }
+      ];
+    }).config;
+  image = "${imageCfg.system.build.amazonImage}/${imageCfg.amazonImage.name}.vhd";
+
+  sshKeys = import ./ssh-keys.nix pkgs;
+  snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
+  snakeOilPublicKey = sshKeys.snakeOilPublicKey;
+
+in {
+  boot-ec2-nixops = makeEc2Test {
+    name         = "nixops-userdata";
+    inherit image;
+    sshPublicKey = snakeOilPublicKey; # That's right folks! My user's key is also the host key!
+
+    userData = ''
+      SSH_HOST_ED25519_KEY_PUB:${snakeOilPublicKey}
+      SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey}
+    '';
+    script = ''
+      $machine->start;
+      $machine->waitForFile("/etc/ec2-metadata/user-data");
+      $machine->waitForUnit("sshd.service");
+
+      $machine->succeed("grep unknown /etc/ec2-metadata/ami-manifest-path");
+
+      # We have no keys configured on the client side yet, so this should fail
+      $machine->fail("ssh -o BatchMode=yes localhost exit");
+
+      # Let's install our client private key
+      $machine->succeed("mkdir -p ~/.ssh");
+
+      $machine->succeed("echo '${snakeOilPrivateKey}' > ~/.ssh/id_ed25519");
+      $machine->succeed("chmod 600 ~/.ssh/id_ed25519");
+
+      # We haven't configured the host key yet, so this should still fail
+      $machine->fail("ssh -o BatchMode=yes localhost exit");
+
+      # Add the host key; ssh should finally succeed
+      $machine->succeed("echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts");
+      $machine->succeed("ssh -o BatchMode=yes localhost exit");
+
+      # Test whether the root disk was resized.
+      my $blocks = $machine->succeed("stat -c %b -f /");
+      my $bsize = $machine->succeed("stat -c %S -f /");
+      my $size = $blocks * $bsize;
+      die "wrong free space $size" if $size < 9.7 * 1024 * 1024 * 1024 || $size > 10 * 1024 * 1024 * 1024;
+
+      # Just to make sure resizing is idempotent.
+      $machine->shutdown;
+      $machine->start;
+      $machine->waitForFile("/etc/ec2-metadata/user-data");
+    '';
+  };
+
+  boot-ec2-config = makeEc2Test {
+    name         = "config-userdata";
+    inherit image;
+    sshPublicKey = snakeOilPublicKey;
+
+    # ### http://nixos.org/channels/nixos-unstable nixos
+    userData = ''
+      { pkgs, ... }:
+
+      {
+        imports = [
+          <nixpkgs/nixos/modules/virtualisation/amazon-image.nix>
+          <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          <nixpkgs/nixos/modules/profiles/qemu-guest.nix>
+        ];
+        environment.etc.testFile = {
+          text = "whoa";
+        };
+
+        networking.hostName = "ec2-test-vm"; # required by services.httpd
+
+        services.httpd = {
+          enable = true;
+          adminAddr = "test@example.org";
+          virtualHosts.localhost.documentRoot = "''${pkgs.valgrind.doc}/share/doc/valgrind/html";
+        };
+        networking.firewall.allowedTCPPorts = [ 80 ];
+      }
+    '';
+    script = ''
+      $machine->start;
+
+      # amazon-init must succeed. if it fails, make the test fail
+      # immediately instead of timing out in waitForFile.
+      $machine->waitForUnit('amazon-init.service');
+
+      $machine->waitForFile("/etc/testFile");
+      $machine->succeed("cat /etc/testFile | grep -q 'whoa'");
+
+      $machine->waitForUnit("httpd.service");
+      $machine->succeed("curl http://localhost | grep Valgrind");
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/ecryptfs.nix b/nixpkgs/nixos/tests/ecryptfs.nix
new file mode 100644
index 000000000000..ef7bd13eb92c
--- /dev/null
+++ b/nixpkgs/nixos/tests/ecryptfs.nix
@@ -0,0 +1,85 @@
+import ./make-test-python.nix ({ ... }:
+{
+  name = "ecryptfs";
+
+  machine = { pkgs, ... }: {
+    imports = [ ./common/user-account.nix ];
+    boot.kernelModules = [ "ecryptfs" ];
+    security.pam.enableEcryptfs = true;
+    environment.systemPackages = with pkgs; [ keyutils ];
+  };
+
+  testScript = ''
+    def login_as_alice():
+        machine.wait_until_tty_matches(1, "login: ")
+        machine.send_chars("alice\n")
+        machine.wait_until_tty_matches(1, "Password: ")
+        machine.send_chars("foobar\n")
+        machine.wait_until_tty_matches(1, "alice\@machine")
+
+
+    def logout():
+        machine.send_chars("logout\n")
+        machine.wait_until_tty_matches(1, "login: ")
+
+
+    machine.wait_for_unit("default.target")
+
+    with subtest("Set alice up with a password and a home"):
+        machine.succeed("(echo foobar; echo foobar) | passwd alice")
+        machine.succeed("chown -R alice.users ~alice")
+
+    with subtest("Migrate alice's home"):
+        out = machine.succeed("echo foobar | ecryptfs-migrate-home -u alice")
+        machine.log(f"ecryptfs-migrate-home said: {out}")
+
+    with subtest("Log alice in (ecryptfs passwhrase is wrapped during first login)"):
+        login_as_alice()
+        machine.send_chars("logout\n")
+        machine.wait_until_tty_matches(1, "login: ")
+
+    # Why do I need to do this??
+    machine.succeed("su alice -c ecryptfs-umount-private || true")
+    machine.sleep(1)
+
+    with subtest("check that encrypted home is not mounted"):
+        machine.fail("mount | grep ecryptfs")
+
+    with subtest("Show contents of the user keyring"):
+        out = machine.succeed("su - alice -c 'keyctl list \@u'")
+        machine.log(f"keyctl unlink said: {out}")
+
+    with subtest("Log alice again"):
+        login_as_alice()
+
+    with subtest("Create some files in encrypted home"):
+        machine.succeed("su alice -c 'touch ~alice/a'")
+        machine.succeed("su alice -c 'echo c > ~alice/b'")
+
+    with subtest("Logout"):
+        logout()
+
+    # Why do I need to do this??
+    machine.succeed("su alice -c ecryptfs-umount-private || true")
+    machine.sleep(1)
+
+    with subtest("Check that the filesystem is not accessible"):
+        machine.fail("mount | grep ecryptfs")
+        machine.succeed("su alice -c 'test \! -f ~alice/a'")
+        machine.succeed("su alice -c 'test \! -f ~alice/b'")
+
+    with subtest("Log alice once more"):
+        login_as_alice()
+
+    with subtest("Check that the files are there"):
+        machine.sleep(1)
+        machine.succeed("su alice -c 'test -f ~alice/a'")
+        machine.succeed("su alice -c 'test -f ~alice/b'")
+        machine.succeed('test "$(cat ~alice/b)" = "c"')
+
+    with subtest("Catch https://github.com/NixOS/nixpkgs/issues/16766"):
+        machine.succeed("su alice -c 'ls -lh ~alice/'")
+
+    logout()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/elk.nix b/nixpkgs/nixos/tests/elk.nix
new file mode 100644
index 000000000000..d3dc6dde1359
--- /dev/null
+++ b/nixpkgs/nixos/tests/elk.nix
@@ -0,0 +1,212 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; },
+  enableUnfree ? false
+  # To run the test on the unfree ELK use the folllowing command:
+  # NIXPKGS_ALLOW_UNFREE=1 nix-build nixos/tests/elk.nix -A ELK-6 --arg enableUnfree true
+}:
+
+let
+  esUrl = "http://localhost:9200";
+
+  mkElkTest = name : elk :
+    import ./make-test-python.nix ({
+    inherit name;
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ eelco offline basvandijk ];
+    };
+    nodes = {
+      one =
+        { pkgs, lib, ... }: {
+            # Not giving the machine at least 2060MB results in elasticsearch failing with the following error:
+            #
+            #   OpenJDK 64-Bit Server VM warning:
+            #     INFO: os::commit_memory(0x0000000085330000, 2060255232, 0)
+            #     failed; error='Cannot allocate memory' (errno=12)
+            #
+            #   There is insufficient memory for the Java Runtime Environment to continue.
+            #   Native memory allocation (mmap) failed to map 2060255232 bytes for committing reserved memory.
+            #
+            # When setting this to 2500 I got "Kernel panic - not syncing: Out of
+            # memory: compulsory panic_on_oom is enabled" so lets give it even a
+            # bit more room:
+            virtualisation.memorySize = 3000;
+
+            # For querying JSON objects returned from elasticsearch and kibana.
+            environment.systemPackages = [ pkgs.jq ];
+
+            services = {
+
+              journalbeat = let lt6 = builtins.compareVersions
+                                        elk.journalbeat.version "6" < 0; in {
+                enable = true;
+                package = elk.journalbeat;
+                extraConfig = pkgs.lib.mkOptionDefault (''
+                  logging:
+                    to_syslog: true
+                    level: warning
+                    metrics.enabled: false
+                  output.elasticsearch:
+                    hosts: [ "127.0.0.1:9200" ]
+                    ${pkgs.lib.optionalString lt6 "template.enabled: false"}
+                '' + pkgs.lib.optionalString (!lt6) ''
+                  journalbeat.inputs:
+                  - paths: []
+                    seek: cursor
+                '');
+              };
+
+              logstash = {
+                enable = true;
+                package = elk.logstash;
+                inputConfig = ''
+                  exec { command => "echo -n flowers" interval => 1 type => "test" }
+                  exec { command => "echo -n dragons" interval => 1 type => "test" }
+                '';
+                filterConfig = ''
+                  if [message] =~ /dragons/ {
+                    drop {}
+                  }
+                '';
+                outputConfig = ''
+                  file {
+                    path => "/tmp/logstash.out"
+                    codec => line { format => "%{message}" }
+                  }
+                  elasticsearch {
+                    hosts => [ "${esUrl}" ]
+                  }
+                '';
+              };
+
+              elasticsearch = {
+                enable = true;
+                package = elk.elasticsearch;
+              };
+
+              kibana = {
+                enable = true;
+                package = elk.kibana;
+              };
+
+              elasticsearch-curator = {
+                enable = true;
+                actionYAML = ''
+                ---
+                actions:
+                  1:
+                    action: delete_indices
+                    description: >-
+                      Delete indices older than 1 second (based on index name), for logstash-
+                      prefixed indices. Ignore the error if the filter does not result in an
+                      actionable list of indices (ignore_empty_list) and exit cleanly.
+                    options:
+                      ignore_empty_list: True
+                      disable_action: False
+                    filters:
+                    - filtertype: pattern
+                      kind: prefix
+                      value: logstash-
+                    - filtertype: age
+                      source: name
+                      direction: older
+                      timestring: '%Y.%m.%d'
+                      unit: seconds
+                      unit_count: 1
+                '';
+              };
+            };
+          };
+      };
+
+    testScript = ''
+      import json
+
+
+      def total_hits(message):
+          dictionary = {"query": {"match": {"message": message}}}
+          return (
+              "curl --silent --show-error '${esUrl}/_search' "
+              + "-H 'Content-Type: application/json' "
+              + "-d '{}' ".format(json.dumps(dictionary))
+              + "| jq .hits.total"
+          )
+
+
+      start_all()
+
+      one.wait_for_unit("elasticsearch.service")
+      one.wait_for_open_port(9200)
+
+      # Continue as long as the status is not "red". The status is probably
+      # "yellow" instead of "green" because we are using a single elasticsearch
+      # node which elasticsearch considers risky.
+      #
+      # TODO: extend this test with multiple elasticsearch nodes
+      #       and see if the status turns "green".
+      one.wait_until_succeeds(
+          "curl --silent --show-error '${esUrl}/_cluster/health' | jq .status | grep -v red"
+      )
+
+      with subtest("Perform some simple logstash tests"):
+          one.wait_for_unit("logstash.service")
+          one.wait_until_succeeds("cat /tmp/logstash.out | grep flowers")
+          one.wait_until_succeeds("cat /tmp/logstash.out | grep -v dragons")
+
+      with subtest("Kibana is healthy"):
+          one.wait_for_unit("kibana.service")
+          one.wait_until_succeeds(
+              "curl --silent --show-error 'http://localhost:5601/api/status' | jq .status.overall.state | grep green"
+          )
+
+      with subtest("Logstash messages arive in elasticsearch"):
+          one.wait_until_succeeds(total_hits("flowers") + " | grep -v 0")
+          one.wait_until_succeeds(total_hits("dragons") + " | grep 0")
+
+      with subtest(
+          "A message logged to the journal is ingested by elasticsearch via journalbeat"
+      ):
+          one.wait_for_unit("journalbeat.service")
+          one.execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat")
+          one.wait_until_succeeds(
+              total_hits("Supercalifragilisticexpialidocious") + " | grep -v 0"
+          )
+
+      with subtest("Elasticsearch-curator works"):
+          one.systemctl("stop logstash")
+          one.systemctl("start elasticsearch-curator")
+          one.wait_until_succeeds(
+              '! curl --silent --show-error "${esUrl}/_cat/indices" | grep logstash | grep -q ^'
+          )
+    '';
+  }) {};
+in pkgs.lib.mapAttrs mkElkTest {
+  ELK-6 =
+    if enableUnfree
+    then {
+      elasticsearch = pkgs.elasticsearch6;
+      logstash      = pkgs.logstash6;
+      kibana        = pkgs.kibana6;
+      journalbeat   = pkgs.journalbeat6;
+    }
+    else {
+      elasticsearch = pkgs.elasticsearch6-oss;
+      logstash      = pkgs.logstash6-oss;
+      kibana        = pkgs.kibana6-oss;
+      journalbeat   = pkgs.journalbeat6;
+    };
+  ELK-7 =
+    if enableUnfree
+    then {
+      elasticsearch = pkgs.elasticsearch7;
+      logstash      = pkgs.logstash7;
+      kibana        = pkgs.kibana7;
+      journalbeat   = pkgs.journalbeat7;
+    }
+    else {
+      elasticsearch = pkgs.elasticsearch7-oss;
+      logstash      = pkgs.logstash7-oss;
+      kibana        = pkgs.kibana7-oss;
+      journalbeat   = pkgs.journalbeat7;
+    };
+}
diff --git a/nixpkgs/nixos/tests/emacs-daemon.nix b/nixpkgs/nixos/tests/emacs-daemon.nix
new file mode 100644
index 000000000000..b89d9b1bde69
--- /dev/null
+++ b/nixpkgs/nixos/tests/emacs-daemon.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "emacs-daemon";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  enableOCR = true;
+
+  machine =
+    { ... }:
+
+    { imports = [ ./common/x11.nix ];
+      services.emacs = {
+        enable = true;
+        defaultEditor = true;
+      };
+
+      # Important to get the systemd service running for root
+      environment.variables.XDG_RUNTIME_DIR = "/run/user/0";
+
+      environment.variables.TEST_SYSTEM_VARIABLE = "system variable";
+    };
+
+  testScript = ''
+      machine.wait_for_unit("multi-user.target")
+
+      # checks that the EDITOR environment variable is set
+      machine.succeed('test $(basename "$EDITOR") = emacseditor')
+
+      # waits for the emacs service to be ready
+      machine.wait_until_succeeds(
+          "systemctl --user status emacs.service | grep 'Active: active'"
+      )
+
+      # connects to the daemon
+      machine.succeed("emacsclient --create-frame $EDITOR &")
+
+      # checks that Emacs shows the edited filename
+      machine.wait_for_text("emacseditor")
+
+      # makes sure environment variables are accessible from Emacs
+      machine.succeed(
+          "emacsclient --eval '(getenv \"TEST_SYSTEM_VARIABLE\")' | grep -q 'system variable'"
+      )
+
+      machine.screenshot("emacsclient")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/env.nix b/nixpkgs/nixos/tests/env.nix
new file mode 100644
index 000000000000..e603338e489b
--- /dev/null
+++ b/nixpkgs/nixos/tests/env.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "environment";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  machine = { pkgs, ... }:
+    {
+      boot.kernelPackages = pkgs.linuxPackages;
+      environment.etc.plainFile.text = ''
+        Hello World
+      '';
+      environment.etc."folder/with/file".text = ''
+        Foo Bar!
+      '';
+
+      environment.sessionVariables = {
+        TERMINFO_DIRS = "/run/current-system/sw/share/terminfo";
+        NIXCON = "awesome";
+      };
+    };
+
+  testScript = ''
+    machine.succeed('[ -L "/etc/plainFile" ]')
+    assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
+    machine.succeed('[ -d "/etc/folder" ]')
+    machine.succeed('[ -d "/etc/folder/with" ]')
+    machine.succeed('[ -L "/etc/folder/with/file" ]')
+    assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
+
+    assert "/run/current-system/sw/share/terminfo" in machine.succeed(
+        "echo ''${TERMINFO_DIRS}"
+    )
+    assert "awesome" in machine.succeed("echo ''${NIXCON}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/etcd-cluster.nix b/nixpkgs/nixos/tests/etcd-cluster.nix
new file mode 100644
index 000000000000..19c5d9158236
--- /dev/null
+++ b/nixpkgs/nixos/tests/etcd-cluster.nix
@@ -0,0 +1,154 @@
+# This test runs simple etcd cluster
+
+import ./make-test-python.nix ({ pkgs, ... } : let
+
+  runWithOpenSSL = file: cmd: pkgs.runCommand file {
+    buildInputs = [ pkgs.openssl ];
+  } cmd;
+
+  ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
+  ca_pem = runWithOpenSSL "ca.pem" ''
+    openssl req \
+      -x509 -new -nodes -key ${ca_key} \
+      -days 10000 -out $out -subj "/CN=etcd-ca"
+  '';
+  etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
+  etcd_csr = runWithOpenSSL "etcd.csr" ''
+    openssl req \
+       -new -key ${etcd_key} \
+       -out $out -subj "/CN=etcd" \
+       -config ${openssl_cnf}
+  '';
+  etcd_cert = runWithOpenSSL "etcd.pem" ''
+    openssl x509 \
+      -req -in ${etcd_csr} \
+      -CA ${ca_pem} -CAkey ${ca_key} \
+      -CAcreateserial -out $out \
+      -days 365 -extensions v3_req \
+      -extfile ${openssl_cnf}
+  '';
+
+  etcd_client_key = runWithOpenSSL "etcd-client-key.pem"
+    "openssl genrsa -out $out 2048";
+
+  etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" ''
+    openssl req \
+      -new -key ${etcd_client_key} \
+      -out $out -subj "/CN=etcd-client" \
+      -config ${client_openssl_cnf}
+  '';
+
+  etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
+    openssl x509 \
+      -req -in ${etcd_client_csr} \
+      -CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
+      -out $out -days 365 -extensions v3_req \
+      -extfile ${client_openssl_cnf}
+  '';
+
+  openssl_cnf = pkgs.writeText "openssl.cnf" ''
+    ions = v3_req
+    distinguished_name = req_distinguished_name
+    [req_distinguished_name]
+    [ v3_req ]
+    basicConstraints = CA:FALSE
+    keyUsage = digitalSignature, keyEncipherment
+    extendedKeyUsage = serverAuth
+    subjectAltName = @alt_names
+    [alt_names]
+    DNS.1 = node1
+    DNS.2 = node2
+    DNS.3 = node3
+    IP.1 = 127.0.0.1
+  '';
+
+  client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
+    ions = v3_req
+    distinguished_name = req_distinguished_name
+    [req_distinguished_name]
+    [ v3_req ]
+    basicConstraints = CA:FALSE
+    keyUsage = digitalSignature, keyEncipherment
+    extendedKeyUsage = clientAuth
+  '';
+
+  nodeConfig = {
+    services = {
+      etcd = {
+        enable = true;
+        keyFile = etcd_key;
+        certFile = etcd_cert;
+        trustedCaFile = ca_pem;
+        peerClientCertAuth = true;
+        listenClientUrls = ["https://127.0.0.1:2379"];
+        listenPeerUrls = ["https://0.0.0.0:2380"];
+      };
+    };
+
+    environment.variables = {
+      ETCDCTL_CERT_FILE = "${etcd_client_cert}";
+      ETCDCTL_KEY_FILE = "${etcd_client_key}";
+      ETCDCTL_CA_FILE = "${ca_pem}";
+      ETCDCTL_PEERS = "https://127.0.0.1:2379";
+    };
+
+    networking.firewall.allowedTCPPorts = [ 2380 ];
+  };
+in {
+  name = "etcd";
+
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = {
+    node1 = { ... }: {
+      require = [nodeConfig];
+      services.etcd = {
+        initialCluster = ["node1=https://node1:2380" "node2=https://node2:2380"];
+        initialAdvertisePeerUrls = ["https://node1:2380"];
+      };
+    };
+
+    node2 = { ... }: {
+      require = [nodeConfig];
+      services.etcd = {
+        initialCluster = ["node1=https://node1:2380" "node2=https://node2:2380"];
+        initialAdvertisePeerUrls = ["https://node2:2380"];
+      };
+    };
+
+    node3 = { ... }: {
+      require = [nodeConfig];
+      services.etcd = {
+        initialCluster = ["node1=https://node1:2380" "node2=https://node2:2380" "node3=https://node3:2380"];
+        initialAdvertisePeerUrls = ["https://node3:2380"];
+        initialClusterState = "existing";
+      };
+    };
+  };
+
+  testScript = ''
+    with subtest("should start etcd cluster"):
+        node1.start()
+        node2.start()
+        node1.wait_for_unit("etcd.service")
+        node2.wait_for_unit("etcd.service")
+        node2.wait_until_succeeds("etcdctl cluster-health")
+        node1.succeed("etcdctl set /foo/bar 'Hello world'")
+        node2.succeed("etcdctl get /foo/bar | grep 'Hello world'")
+
+    with subtest("should add another member"):
+        node1.wait_until_succeeds("etcdctl member add node3 https://node3:2380")
+        node3.start()
+        node3.wait_for_unit("etcd.service")
+        node3.wait_until_succeeds("etcdctl member list | grep 'node3'")
+        node3.succeed("etcdctl cluster-health")
+
+    with subtest("should survive member crash"):
+        node3.crash()
+        node1.succeed("etcdctl cluster-health")
+        node1.succeed("etcdctl set /foo/bar 'Hello degraded world'")
+        node1.succeed("etcdctl get /foo/bar | grep 'Hello degraded world'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/etcd.nix b/nixpkgs/nixos/tests/etcd.nix
new file mode 100644
index 000000000000..842724343841
--- /dev/null
+++ b/nixpkgs/nixos/tests/etcd.nix
@@ -0,0 +1,25 @@
+# This test runs simple etcd node
+
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "etcd";
+
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = {
+    node = { ... }: {
+      services.etcd.enable = true;
+    };
+  };
+
+  testScript = ''
+    with subtest("should start etcd node"):
+        node.start()
+        node.wait_for_unit("etcd.service")
+
+    with subtest("should write and read some values to etcd"):
+        node.succeed("etcdctl set /foo/bar 'Hello world'")
+        node.succeed("etcdctl get /foo/bar | grep 'Hello world'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fancontrol.nix b/nixpkgs/nixos/tests/fancontrol.nix
new file mode 100644
index 000000000000..356cd57ffa1a
--- /dev/null
+++ b/nixpkgs/nixos/tests/fancontrol.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "fancontrol";
+
+  machine =
+    { ... }:
+    { hardware.fancontrol.enable = true;
+      hardware.fancontrol.config = ''
+        INTERVAL=42
+        DEVPATH=hwmon1=devices/platform/dummy
+        DEVNAME=hwmon1=dummy
+        FCTEMPS=hwmon1/device/pwm1=hwmon1/device/temp1_input
+        FCFANS=hwmon1/device/pwm1=hwmon1/device/fan1_input
+        MINTEMP=hwmon1/device/pwm1=25
+        MAXTEMP=hwmon1/device/pwm1=65
+        MINSTART=hwmon1/device/pwm1=150
+        MINSTOP=hwmon1/device/pwm1=0
+      '';
+    };
+
+  # This configuration cannot be valid for the test VM, so it's expected to get an 'outdated' error.
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("fancontrol.service")
+    machine.wait_until_succeeds(
+        "journalctl -eu fancontrol | grep 'Configuration appears to be outdated'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fenics.nix b/nixpkgs/nixos/tests/fenics.nix
new file mode 100644
index 000000000000..7252d19e4e65
--- /dev/null
+++ b/nixpkgs/nixos/tests/fenics.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  fenicsScript = pkgs.writeScript "poisson.py" ''
+    #!/usr/bin/env python
+    from dolfin import *
+
+    mesh = UnitSquareMesh(4, 4)
+    V = FunctionSpace(mesh, "Lagrange", 1)
+
+    def boundary(x):
+        return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS
+
+    u0 = Constant(0.0)
+    bc = DirichletBC(V, u0, boundary)
+
+    u = TrialFunction(V)
+    v = TestFunction(V)
+    f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)", degree=2)
+    g = Expression("sin(5*x[0])", degree=2)
+    a = inner(grad(u), grad(v))*dx
+    L = f*v*dx + g*v*ds
+
+    u = Function(V)
+    solve(a == L, u, bc)
+    print(u)
+  '';
+in
+{
+  name = "fenics";
+  meta = {
+    maintainers = with pkgs.stdenv.lib.maintainers; [ knedlsepp ];
+  };
+
+  nodes = {
+    fenicsnode = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [
+        gcc
+        (python3.withPackages (ps: with ps; [ fenics ]))
+      ];
+      virtualisation.memorySize = 512;
+    };
+  };
+  testScript =
+    { nodes, ... }:
+    ''
+      start_all()
+      node1.succeed("${fenicsScript}")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/ferm.nix b/nixpkgs/nixos/tests/ferm.nix
new file mode 100644
index 000000000000..a73c9ce739cf
--- /dev/null
+++ b/nixpkgs/nixos/tests/ferm.nix
@@ -0,0 +1,74 @@
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "ferm";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mic92 ];
+  };
+
+  nodes =
+    { client =
+        { pkgs, ... }:
+        with pkgs.lib;
+        {
+          networking = {
+            dhcpcd.enable = false;
+            interfaces.eth1.ipv6.addresses = mkOverride 0 [ { address = "fd00::2"; prefixLength = 64; } ];
+            interfaces.eth1.ipv4.addresses = mkOverride 0 [ { address = "192.168.1.2"; prefixLength = 24; } ];
+          };
+      };
+      server =
+        { pkgs, ... }:
+        with pkgs.lib;
+        {
+          networking = {
+            dhcpcd.enable = false;
+            useNetworkd = true;
+            useDHCP = false;
+            interfaces.eth1.ipv6.addresses = mkOverride 0 [ { address = "fd00::1"; prefixLength = 64; } ];
+            interfaces.eth1.ipv4.addresses = mkOverride 0 [ { address = "192.168.1.1"; prefixLength = 24; } ];
+          };
+
+          services = {
+            ferm.enable = true;
+            ferm.config = ''
+              domain (ip ip6) table filter chain INPUT {
+                interface lo ACCEPT;
+                proto tcp dport 8080 REJECT reject-with tcp-reset;
+              }
+            '';
+            nginx.enable = true;
+            nginx.httpConfig = ''
+              server {
+                listen 80;
+                listen [::]:80;
+                listen 8080;
+                listen [::]:8080;
+
+                location /status { stub_status on; }
+              }
+            '';
+          };
+        };
+    };
+
+  testScript =
+    ''
+      start_all()
+
+      client.wait_for_unit("network-online.target")
+      server.wait_for_unit("ferm.service")
+      server.wait_for_unit("nginx.service")
+      server.wait_until_succeeds("ss -ntl | grep -q 80")
+
+      with subtest("port 80 is allowed"):
+          client.succeed("curl --fail -g http://192.168.1.1:80/status")
+          client.succeed("curl --fail -g http://[fd00::1]:80/status")
+
+      with subtest("port 8080 is not allowed"):
+          server.succeed("curl --fail -g http://192.168.1.1:8080/status")
+          server.succeed("curl --fail -g http://[fd00::1]:8080/status")
+
+          client.fail("curl --fail -g http://192.168.1.1:8080/status")
+          client.fail("curl --fail -g http://[fd00::1]:8080/status")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/firefox.nix b/nixpkgs/nixos/tests/firefox.nix
new file mode 100644
index 000000000000..7071baceba73
--- /dev/null
+++ b/nixpkgs/nixos/tests/firefox.nix
@@ -0,0 +1,39 @@
+import ./make-test-python.nix ({ pkgs, esr ? false, ... }: {
+  name = "firefox";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco shlevy ];
+  };
+
+  machine =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      environment.systemPackages =
+        (if esr then [ pkgs.firefox-esr ] else [ pkgs.firefox ])
+        ++ [ pkgs.xdotool ];
+    };
+
+  testScript = ''
+      machine.wait_for_x()
+
+      with subtest("wait until Firefox has finished loading the Valgrind docs page"):
+          machine.execute(
+              "xterm -e 'firefox file://${pkgs.valgrind.doc}/share/doc/valgrind/html/index.html' &"
+          )
+          machine.wait_for_window("Valgrind")
+          machine.sleep(40)
+
+      with subtest("Close default browser prompt"):
+          machine.execute("xdotool key space")
+
+      with subtest("Hide default browser window"):
+          machine.sleep(2)
+          machine.execute("xdotool key F12")
+
+      with subtest("wait until Firefox draws the developer tool panel"):
+          machine.sleep(10)
+          machine.succeed("xwininfo -root -tree | grep Valgrind")
+          machine.screenshot("screen")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/firewall.nix b/nixpkgs/nixos/tests/firewall.nix
new file mode 100644
index 000000000000..09a1fef852e6
--- /dev/null
+++ b/nixpkgs/nixos/tests/firewall.nix
@@ -0,0 +1,65 @@
+# Test the firewall module.
+
+import ./make-test-python.nix ( { pkgs, ... } : {
+  name = "firewall";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes =
+    { walled =
+        { ... }:
+        { networking.firewall.enable = true;
+          networking.firewall.logRefusedPackets = true;
+          services.httpd.enable = true;
+          services.httpd.adminAddr = "foo@example.org";
+        };
+
+      # Dummy configuration to check whether firewall.service will be honored
+      # during system activation. This only needs to be different to the
+      # original walled configuration so that there is a change in the service
+      # file.
+      walled2 =
+        { ... }:
+        { networking.firewall.enable = true;
+          networking.firewall.rejectPackets = true;
+        };
+
+      attacker =
+        { ... }:
+        { services.httpd.enable = true;
+          services.httpd.adminAddr = "foo@example.org";
+          networking.firewall.enable = false;
+        };
+    };
+
+  testScript = { nodes, ... }: let
+    newSystem = nodes.walled2.config.system.build.toplevel;
+  in ''
+    start_all()
+
+    walled.wait_for_unit("firewall")
+    walled.wait_for_unit("httpd")
+    attacker.wait_for_unit("network.target")
+
+    # Local connections should still work.
+    walled.succeed("curl -v http://localhost/ >&2")
+
+    # Connections to the firewalled machine should fail, but ping should succeed.
+    attacker.fail("curl --fail --connect-timeout 2 http://walled/ >&2")
+    attacker.succeed("ping -c 1 walled >&2")
+
+    # Outgoing connections/pings should still work.
+    walled.succeed("curl -v http://attacker/ >&2")
+    walled.succeed("ping -c 1 attacker >&2")
+
+    # If we stop the firewall, then connections should succeed.
+    walled.stop_job("firewall")
+    attacker.succeed("curl -v http://walled/ >&2")
+
+    # Check whether activation of a new configuration reloads the firewall.
+    walled.succeed(
+        "${newSystem}/bin/switch-to-configuration test 2>&1 | grep -qF firewall.service"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fish.nix b/nixpkgs/nixos/tests/fish.nix
new file mode 100644
index 000000000000..68fba428439b
--- /dev/null
+++ b/nixpkgs/nixos/tests/fish.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "fish";
+
+  machine =
+    { pkgs, ... }:
+
+    {
+      programs.fish.enable = true;
+      environment.systemPackages = with pkgs; [
+        coreutils
+        procps # kill collides with coreutils' to test https://github.com/NixOS/nixpkgs/issues/56432
+      ];
+    };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_file("/etc/fish/generated_completions/coreutils.fish")
+      machine.wait_for_file("/etc/fish/generated_completions/kill.fish")
+      machine.succeed(
+          "fish -ic 'echo $fish_complete_path' | grep -q '/share/fish/completions /etc/fish/generated_completions /root/.local/share/fish/generated_completions$'"
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/flannel.nix b/nixpkgs/nixos/tests/flannel.nix
new file mode 100644
index 000000000000..7615732c20ca
--- /dev/null
+++ b/nixpkgs/nixos/tests/flannel.nix
@@ -0,0 +1,57 @@
+import ./make-test-python.nix ({ lib, ...} : {
+  name = "flannel";
+
+  meta = with lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = let
+    flannelConfig = { pkgs, ... } : {
+      services.flannel = {
+        enable = true;
+        backend = {
+          Type = "udp";
+          Port = 8285;
+        };
+        network = "10.1.0.0/16";
+        iface = "eth1";
+        etcd.endpoints = ["http://etcd:2379"];
+      };
+
+      networking.firewall.allowedUDPPorts = [ 8285 ];
+    };
+  in {
+    etcd = { ... }: {
+      services = {
+        etcd = {
+          enable = true;
+          listenClientUrls = ["http://0.0.0.0:2379"]; # requires ip-address for binding
+          listenPeerUrls = ["http://0.0.0.0:2380"]; # requires ip-address for binding
+          advertiseClientUrls = ["http://etcd:2379"];
+          initialAdvertisePeerUrls = ["http://etcd:2379"];
+          initialCluster = ["etcd=http://etcd:2379"];
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 2379 ];
+    };
+
+    node1 = flannelConfig;
+    node2 = flannelConfig;
+  };
+
+  testScript = ''
+    start_all()
+
+    node1.wait_for_unit("flannel.service")
+    node2.wait_for_unit("flannel.service")
+
+    node1.wait_until_succeeds("ip l show dev flannel0")
+    ip1 = node1.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
+    node2.wait_until_succeeds("ip l show dev flannel0")
+    ip2 = node2.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
+
+    node1.wait_until_succeeds(f"ping -c 1 {ip2}")
+    node2.wait_until_succeeds(f"ping -c 1 {ip1}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fluentd.nix b/nixpkgs/nixos/tests/fluentd.nix
new file mode 100644
index 000000000000..918f2f87db17
--- /dev/null
+++ b/nixpkgs/nixos/tests/fluentd.nix
@@ -0,0 +1,49 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "fluentd";
+
+  machine = { pkgs, ... }: {
+    services.fluentd = {
+      enable = true;
+      config = ''
+        <source>
+          @type http
+          port 9880
+        </source>
+
+        <match **>
+          type copy
+          <store>
+            @type file
+            format json
+            path /tmp/fluentd
+            symlink_path /tmp/current-log
+          </store>
+          <store>
+            @type stdout
+          </store>
+        </match>
+      '';
+    };
+  };
+
+  testScript = let
+    testMessage = "an example log message";
+
+    payload = pkgs.writeText "test-message.json" (builtins.toJSON {
+      inherit testMessage;
+    });
+  in ''
+    machine.start()
+    machine.wait_for_unit("fluentd.service")
+    machine.wait_for_open_port(9880)
+
+    machine.succeed(
+        "curl -fsSL -X POST -H 'Content-type: application/json' -d @${payload} http://localhost:9880/test.tag"
+    )
+
+    # blocking flush
+    machine.succeed("systemctl stop fluentd")
+
+    machine.succeed("grep '${testMessage}' /tmp/current-log")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fontconfig-default-fonts.nix b/nixpkgs/nixos/tests/fontconfig-default-fonts.nix
new file mode 100644
index 000000000000..68c6ac9e9c83
--- /dev/null
+++ b/nixpkgs/nixos/tests/fontconfig-default-fonts.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ lib, ... }:
+{
+  name = "fontconfig-default-fonts";
+
+  meta.maintainers = with lib.maintainers; [
+    jtojnar
+    worldofpeace
+  ];
+
+  machine = { config, pkgs, ... }: {
+    fonts.enableDefaultFonts = true; # Background fonts
+    fonts.fonts = with pkgs; [
+      noto-fonts-emoji
+      cantarell-fonts
+      twitter-color-emoji
+      source-code-pro
+      gentium
+    ];
+    fonts.fontconfig.defaultFonts = {
+      serif = [ "Gentium Plus" ];
+      sansSerif = [ "Cantarell" ];
+      monospace = [ "Source Code Pro" ];
+      emoji = [ "Twitter Color Emoji" ];
+    };
+  };
+
+  testScript = ''
+    machine.succeed("fc-match serif | grep '\"Gentium Plus\"'")
+    machine.succeed("fc-match sans-serif | grep '\"Cantarell\"'")
+    machine.succeed("fc-match monospace | grep '\"Source Code Pro\"'")
+    machine.succeed("fc-match emoji | grep '\"Twitter Color Emoji\"'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/freeswitch.nix b/nixpkgs/nixos/tests/freeswitch.nix
new file mode 100644
index 000000000000..349d0e7bc6f0
--- /dev/null
+++ b/nixpkgs/nixos/tests/freeswitch.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "freeswitch";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ misuzu ];
+  };
+  nodes = {
+    node0 = { config, lib, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.1";
+            prefixLength = 24;
+          }
+        ];
+      };
+      services.freeswitch = {
+        enable = true;
+        enableReload = true;
+        configTemplate = "${config.services.freeswitch.package}/share/freeswitch/conf/minimal";
+      };
+    };
+  };
+  testScript = ''
+    node0.wait_for_unit("freeswitch.service")
+    # Wait for SIP port to be open
+    node0.wait_for_open_port("5060")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fsck.nix b/nixpkgs/nixos/tests/fsck.nix
new file mode 100644
index 000000000000..e522419fde2b
--- /dev/null
+++ b/nixpkgs/nixos/tests/fsck.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix {
+  name = "fsck";
+
+  machine = { lib, ... }: {
+    virtualisation.emptyDiskImages = [ 1 ];
+
+    fileSystems = lib.mkVMOverride {
+      "/mnt" = {
+        device = "/dev/vdb";
+        fsType = "ext4";
+        autoFormat = true;
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("default.target")
+
+    with subtest("root fs is fsckd"):
+        machine.succeed("journalctl -b | grep 'fsck.ext4.*/dev/vda'")
+
+    with subtest("mnt fs is fsckd"):
+        machine.succeed("journalctl -b | grep 'fsck.*/dev/vdb.*clean'")
+        machine.succeed(
+            "grep 'Requires=systemd-fsck@dev-vdb.service' /run/systemd/generator/mnt.mount"
+        )
+        machine.succeed(
+            "grep 'After=systemd-fsck@dev-vdb.service' /run/systemd/generator/mnt.mount"
+        )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/gerrit.nix b/nixpkgs/nixos/tests/gerrit.nix
new file mode 100644
index 000000000000..6cee64a20095
--- /dev/null
+++ b/nixpkgs/nixos/tests/gerrit.nix
@@ -0,0 +1,55 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  lfs = pkgs.fetchurl {
+    url = "https://gerrit-ci.gerritforge.com/job/plugin-lfs-bazel-master/90/artifact/bazel-bin/plugins/lfs/lfs.jar";
+    sha256 = "023b0kd8djm3cn1lf1xl67yv3j12yl8bxccn42lkfmwxjwjfqw6h";
+  };
+
+in {
+  name = "gerrit";
+
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ flokli zimbatm ];
+  };
+
+  nodes = {
+    server =
+      { config, pkgs, ... }: {
+        networking.firewall.allowedTCPPorts = [ 80 2222 ];
+
+        virtualisation.memorySize = 1024;
+
+        services.gerrit = {
+          enable = true;
+          serverId = "aa76c84b-50b0-4711-a0a0-1ee30e45bbd0";
+          listenAddress = "[::]:80";
+          jvmHeapLimit = "1g";
+
+          plugins = [ lfs ];
+          builtinPlugins = [ "hooks" "webhooks" ];
+          settings = {
+            gerrit.canonicalWebUrl = "http://server";
+            lfs.plugin = "lfs";
+            plugins.allowRemoteAdmin = true;
+            sshd.listenAddress = "[::]:2222";
+            sshd.advertisedAddress = "[::]:2222";
+          };
+        };
+      };
+
+    client =
+      { ... }: {
+      };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("gerrit.service")
+    server.wait_for_open_port(80)
+    client.succeed("curl http://server")
+
+    server.wait_for_open_port(2222)
+    client.succeed("nc -z server 2222")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gitdaemon.nix b/nixpkgs/nixos/tests/gitdaemon.nix
new file mode 100644
index 000000000000..b610caf06fb2
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitdaemon.nix
@@ -0,0 +1,64 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  hashes = pkgs.writeText "hashes" ''
+    b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c  /project/bar
+  '';
+in {
+  name = "gitdaemon";
+
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ tilpner ];
+  };
+
+  nodes = {
+    server =
+      { config, ... }: {
+        networking.firewall.allowedTCPPorts = [ config.services.gitDaemon.port ];
+
+        environment.systemPackages = [ pkgs.git ];
+
+        services.gitDaemon = {
+          enable = true;
+          basePath = "/git";
+        };
+      };
+
+    client =
+      { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("create project.git"):
+        server.succeed(
+            "mkdir /git",
+            "git init --bare /git/project.git",
+            "touch /git/project.git/git-daemon-export-ok",
+        )
+
+    with subtest("add file to project.git"):
+        server.succeed(
+            "git clone /git/project.git /project",
+            "echo foo > /project/bar",
+            "git config --global user.email 'you@example.com'",
+            "git config --global user.name 'Your Name'",
+            "git -C /project add bar",
+            "git -C /project commit -m 'quux'",
+            "git -C /project push",
+            "rm -r /project",
+        )
+
+    with subtest("git daemon starts"):
+        server.wait_for_unit("git-daemon.service")
+
+    with subtest("client can clone project.git"):
+        client.succeed(
+            "git clone git://server/project.git /project",
+            "sha256sum -c ${hashes}",
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gitea.nix b/nixpkgs/nixos/tests/gitea.nix
new file mode 100644
index 000000000000..aaed2486421f
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitea.nix
@@ -0,0 +1,109 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  supportedDbTypes = [ "mysql" "postgres" "sqlite3" ];
+  makeGiteaTest = type: nameValuePair type (makeTest {
+    name = "gitea-${type}";
+    meta.maintainers = with maintainers; [ aanderse kolaente ma27 ];
+
+    nodes = {
+      server = { config, pkgs, ... }: {
+        services.gitea = {
+          enable = true;
+          database = { inherit type; };
+          disableRegistration = true;
+        };
+        environment.systemPackages = [ pkgs.gitea pkgs.jq ];
+        services.openssh.enable = true;
+      };
+      client1 = { config, pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+      client2 = { config, pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+    };
+
+    testScript = let
+      inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+    in ''
+      GIT_SSH_COMMAND = "ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no"
+      REPO = "gitea@server:test/repo"
+      PRIVK = "${snakeOilPrivateKey}"
+
+      start_all()
+
+      client1.succeed("mkdir /tmp/repo")
+      client1.succeed("mkdir -p $HOME/.ssh")
+      client1.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
+      client1.succeed("chmod 0400 $HOME/.ssh/privk")
+      client1.succeed("git -C /tmp/repo init")
+      client1.succeed("echo hello world > /tmp/repo/testfile")
+      client1.succeed("git -C /tmp/repo add .")
+      client1.succeed("git config --global user.email test@localhost")
+      client1.succeed("git config --global user.name test")
+      client1.succeed("git -C /tmp/repo commit -m 'Initial import'")
+      client1.succeed(f"git -C /tmp/repo remote add origin {REPO}")
+
+      server.wait_for_unit("gitea.service")
+      server.wait_for_open_port(3000)
+      server.succeed("curl --fail http://localhost:3000/")
+
+      server.succeed(
+          "curl --fail http://localhost:3000/user/sign_up | grep 'Registration is disabled. "
+          + "Please contact your site administrator.'"
+      )
+      server.succeed(
+          "su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin create-user "
+          + "--username test --password totallysafe --email test@localhost'"
+      )
+
+      api_token = server.succeed(
+          "curl --fail -X POST http://test:totallysafe@localhost:3000/api/v1/users/test/tokens "
+          + "-H 'Accept: application/json' -H 'Content-Type: application/json' -d "
+          + "'{\"name\":\"token\"}' | jq '.sha1' | xargs echo -n"
+      )
+
+      server.succeed(
+          "curl --fail -X POST http://localhost:3000/api/v1/user/repos "
+          + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+          + f"-H 'Authorization: token {api_token}'"
+          + ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
+      )
+
+      server.succeed(
+          "curl --fail -X POST http://localhost:3000/api/v1/user/keys "
+          + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+          + f"-H 'Authorization: token {api_token}'"
+          + ' -d \'{"key":"${snakeOilPublicKey}","read_only":true,"title":"SSH"}\'''
+      )
+
+      client1.succeed(
+          f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git -C /tmp/repo push origin master"
+      )
+
+      client2.succeed("mkdir -p $HOME/.ssh")
+      client2.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
+      client2.succeed("chmod 0400 $HOME/.ssh/privk")
+      client2.succeed(f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git clone {REPO}")
+      client2.succeed('test "$(cat repo/testfile | xargs echo -n)" = "hello world"')
+
+      server.succeed(
+          'test "$(curl http://localhost:3000/api/v1/repos/test/repo/commits '
+          + '-H "Accept: application/json" | jq length)" = "1"'
+      )
+
+      client1.shutdown()
+      client2.shutdown()
+      server.shutdown()
+    '';
+  });
+in
+
+listToAttrs (map makeGiteaTest supportedDbTypes)
diff --git a/nixpkgs/nixos/tests/gitlab.nix b/nixpkgs/nixos/tests/gitlab.nix
new file mode 100644
index 000000000000..7e4e8bcef92d
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitlab.nix
@@ -0,0 +1,97 @@
+# This test runs gitlab and checks if it works
+
+let
+  initialRootPassword = "notproduction";
+in
+import ./make-test-python.nix ({ pkgs, lib, ...} : with lib; {
+  name = "gitlab";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ globin ];
+  };
+
+  nodes = {
+    gitlab = { ... }: {
+      virtualisation.memorySize = if pkgs.stdenv.is64bit then 4096 else 2047;
+      systemd.services.gitlab.serviceConfig.Restart = mkForce "no";
+      systemd.services.gitlab-workhorse.serviceConfig.Restart = mkForce "no";
+      systemd.services.gitaly.serviceConfig.Restart = mkForce "no";
+      systemd.services.gitlab-sidekiq.serviceConfig.Restart = mkForce "no";
+
+      services.nginx = {
+        enable = true;
+        recommendedProxySettings = true;
+        virtualHosts = {
+          localhost = {
+            locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
+          };
+        };
+      };
+
+      services.gitlab = {
+        enable = true;
+        databasePasswordFile = pkgs.writeText "dbPassword" "xo0daiF4";
+        initialRootPasswordFile = pkgs.writeText "rootPassword" initialRootPassword;
+        smtp.enable = true;
+        secrets = {
+          secretFile = pkgs.writeText "secret" "Aig5zaic";
+          otpFile = pkgs.writeText "otpsecret" "Riew9mue";
+          dbFile = pkgs.writeText "dbsecret" "we2quaeZ";
+          jwsFile = pkgs.runCommand "oidcKeyBase" {} "${pkgs.openssl}/bin/openssl genrsa 2048 > $out";
+        };
+      };
+    };
+  };
+
+  testScript =
+  let
+    auth = pkgs.writeText "auth.json" (builtins.toJSON {
+      grant_type = "password";
+      username = "root";
+      password = initialRootPassword;
+    });
+
+    createProject = pkgs.writeText "create-project.json" (builtins.toJSON {
+      name = "test";
+    });
+
+    putFile = pkgs.writeText "put-file.json" (builtins.toJSON {
+      branch = "master";
+      author_email = "author@example.com";
+      author_name = "Firstname Lastname";
+      content = "some content";
+      commit_message = "create a new file";
+    });
+  in
+  ''
+    gitlab.start()
+    gitlab.wait_for_unit("gitaly.service")
+    gitlab.wait_for_unit("gitlab-workhorse.service")
+    gitlab.wait_for_unit("gitlab.service")
+    gitlab.wait_for_unit("gitlab-sidekiq.service")
+    gitlab.wait_for_file("/var/gitlab/state/tmp/sockets/gitlab.socket")
+    gitlab.wait_until_succeeds("curl -sSf http://gitlab/users/sign_in")
+    gitlab.succeed(
+        "curl -isSf http://gitlab | grep -i location | grep -q http://gitlab/users/sign_in"
+    )
+    gitlab.succeed(
+        "${pkgs.sudo}/bin/sudo -u gitlab -H gitlab-rake gitlab:check 1>&2"
+    )
+    gitlab.succeed(
+        "echo \"Authorization: Bearer \$(curl -X POST -H 'Content-Type: application/json' -d @${auth} http://gitlab/oauth/token | ${pkgs.jq}/bin/jq -r '.access_token')\" >/tmp/headers"
+    )
+    gitlab.succeed(
+        "curl -X POST -H 'Content-Type: application/json' -H @/tmp/headers -d @${createProject} http://gitlab/api/v4/projects"
+    )
+    gitlab.succeed(
+        "curl -X POST -H 'Content-Type: application/json' -H @/tmp/headers -d @${putFile} http://gitlab/api/v4/projects/1/repository/files/some-file.txt"
+    )
+    gitlab.succeed(
+        "curl -H @/tmp/headers http://gitlab/api/v4/projects/1/repository/archive.tar.gz > /tmp/archive.tar.gz"
+    )
+    gitlab.succeed(
+        "curl -H @/tmp/headers http://gitlab/api/v4/projects/1/repository/archive.tar.bz2 > /tmp/archive.tar.bz2"
+    )
+    gitlab.succeed("test -s /tmp/archive.tar.gz")
+    gitlab.succeed("test -s /tmp/archive.tar.bz2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gitolite-fcgiwrap.nix b/nixpkgs/nixos/tests/gitolite-fcgiwrap.nix
new file mode 100644
index 000000000000..414b7d6fe7ef
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitolite-fcgiwrap.nix
@@ -0,0 +1,93 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+
+    let
+      user = "gitolite-admin";
+      password = "some_password";
+
+      # not used but needed to setup gitolite
+      adminPublicKey = ''
+        ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7urFhAA90BTpGuEHeWWTY3W/g9PBxXNxfWhfbrm4Le root@client
+      '';
+    in
+      {
+        name = "gitolite-fcgiwrap";
+
+        meta = with pkgs.stdenv.lib.maintainers; {
+          maintainers = [ bbigras ];
+        };
+
+        nodes = {
+
+          server =
+            { ... }:
+              {
+                networking.firewall.allowedTCPPorts = [ 80 ];
+
+                services.fcgiwrap.enable = true;
+                services.gitolite = {
+                  enable = true;
+                  adminPubkey = adminPublicKey;
+                };
+
+                services.nginx = {
+                  enable = true;
+                  recommendedProxySettings = true;
+                  virtualHosts."server".locations."/git".extraConfig = ''
+                    # turn off gzip as git objects are already well compressed
+                    gzip off;
+
+                    # use file based basic authentication
+                    auth_basic "Git Repository Authentication";
+                    auth_basic_user_file /etc/gitolite/htpasswd;
+
+                    # common FastCGI parameters are required
+                    include ${pkgs.nginx}/conf/fastcgi_params;
+
+                    # strip the CGI program prefix
+                    fastcgi_split_path_info ^(/git)(.*)$;
+                    fastcgi_param PATH_INFO $fastcgi_path_info;
+
+                    # pass authenticated user login(mandatory) to Gitolite
+                    fastcgi_param REMOTE_USER $remote_user;
+
+                    # pass git repository root directory and hosting user directory
+                    # these env variables can be set in a wrapper script
+                    fastcgi_param GIT_HTTP_EXPORT_ALL "";
+                    fastcgi_param GIT_PROJECT_ROOT /var/lib/gitolite/repositories;
+                    fastcgi_param GITOLITE_HTTP_HOME /var/lib/gitolite;
+                    fastcgi_param SCRIPT_FILENAME ${pkgs.gitolite}/bin/gitolite-shell;
+
+                    # use Unix domain socket or inet socket
+                    fastcgi_pass unix:/run/fcgiwrap.sock;
+                  '';
+                };
+
+                # WARNING: DON'T DO THIS IN PRODUCTION!
+                # This puts unhashed secrets directly into the Nix store for ease of testing.
+                environment.etc."gitolite/htpasswd".source = pkgs.runCommand "htpasswd" {} ''
+                  ${pkgs.apacheHttpd}/bin/htpasswd -bc "$out" ${user} ${password}
+                '';
+              };
+
+          client =
+            { pkgs, ... }:
+              {
+                environment.systemPackages = [ pkgs.git ];
+              };
+        };
+
+        testScript = ''
+          start_all()
+
+          server.wait_for_unit("gitolite-init.service")
+          server.wait_for_unit("nginx.service")
+          server.wait_for_file("/run/fcgiwrap.sock")
+
+          client.wait_for_unit("multi-user.target")
+          client.succeed(
+              "git clone http://${user}:${password}@server/git/gitolite-admin.git"
+          )
+        '';
+      }
+)
diff --git a/nixpkgs/nixos/tests/gitolite.nix b/nixpkgs/nixos/tests/gitolite.nix
new file mode 100644
index 000000000000..a928645bd80f
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitolite.nix
@@ -0,0 +1,138 @@
+import ./make-test-python.nix ({ pkgs, ...}:
+
+let
+  adminPrivateKey = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACDu7qxYQAPdAU6RrhB3llk2N1v4PTwcVzcX1oX265uC3gAAAJBJiYxDSYmM
+    QwAAAAtzc2gtZWQyNTUxOQAAACDu7qxYQAPdAU6RrhB3llk2N1v4PTwcVzcX1oX265uC3g
+    AAAEDE1W6vMwSEUcF1r7Hyypm/+sCOoDmKZgPxi3WOa1mD2u7urFhAA90BTpGuEHeWWTY3
+    W/g9PBxXNxfWhfbrm4LeAAAACGJmb0BtaW5pAQIDBAU=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+
+  adminPublicKey = ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7urFhAA90BTpGuEHeWWTY3W/g9PBxXNxfWhfbrm4Le root@client
+  '';
+
+  alicePrivateKey = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACBbeWvHh/AWGWI6EIc1xlSihyXtacNQ9KeztlW/VUy8wQAAAJAwVQ5VMFUO
+    VQAAAAtzc2gtZWQyNTUxOQAAACBbeWvHh/AWGWI6EIc1xlSihyXtacNQ9KeztlW/VUy8wQ
+    AAAEB7lbfkkdkJoE+4TKHPdPQWBKLSx+J54Eg8DaTr+3KoSlt5a8eH8BYZYjoQhzXGVKKH
+    Je1pw1D0p7O2Vb9VTLzBAAAACGJmb0BtaW5pAQIDBAU=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+
+  alicePublicKey = pkgs.writeText "id_ed25519.pub" ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFt5a8eH8BYZYjoQhzXGVKKHJe1pw1D0p7O2Vb9VTLzB alice@client
+  '';
+
+  bobPrivateKey = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACCWTaJ1D9Xjxy6759FvQ9oXTes1lmWBciXPkEeqTikBMAAAAJDQBmNV0AZj
+    VQAAAAtzc2gtZWQyNTUxOQAAACCWTaJ1D9Xjxy6759FvQ9oXTes1lmWBciXPkEeqTikBMA
+    AAAEDM1IYYFUwk/IVxauha9kuR6bbRtT3gZ6ZA0GLb9txb/pZNonUP1ePHLrvn0W9D2hdN
+    6zWWZYFyJc+QR6pOKQEwAAAACGJmb0BtaW5pAQIDBAU=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+
+  bobPublicKey = pkgs.writeText "id_ed25519.pub" ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJZNonUP1ePHLrvn0W9D2hdN6zWWZYFyJc+QR6pOKQEw bob@client
+  '';
+
+  gitoliteAdminConfSnippet = pkgs.writeText "gitolite-admin-conf-snippet" ''
+    repo alice-project
+        RW+     =   alice
+  '';
+in
+{
+  name = "gitolite";
+
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ bjornfor ];
+  };
+
+  nodes = {
+
+    server =
+      { ... }:
+      {
+        services.gitolite = {
+          enable = true;
+          adminPubkey = adminPublicKey;
+        };
+        services.openssh.enable = true;
+      };
+
+    client =
+      { pkgs, ... }:
+      {
+        environment.systemPackages = [ pkgs.git ];
+        programs.ssh.extraConfig = ''
+          Host *
+            UserKnownHostsFile /dev/null
+            StrictHostKeyChecking no
+            # there's nobody around that can input password
+            PreferredAuthentications publickey
+        '';
+        users.users.alice = { isNormalUser = true; };
+        users.users.bob = { isNormalUser = true; };
+      };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("can setup ssh keys on system"):
+        client.succeed(
+            "mkdir -p ~root/.ssh",
+            "cp ${adminPrivateKey} ~root/.ssh/id_ed25519",
+            "chmod 600 ~root/.ssh/id_ed25519",
+        )
+        client.succeed(
+            "sudo -u alice mkdir -p ~alice/.ssh",
+            "sudo -u alice cp ${alicePrivateKey} ~alice/.ssh/id_ed25519",
+            "sudo -u alice chmod 600 ~alice/.ssh/id_ed25519",
+        )
+        client.succeed(
+            "sudo -u bob mkdir -p ~bob/.ssh",
+            "sudo -u bob cp ${bobPrivateKey} ~bob/.ssh/id_ed25519",
+            "sudo -u bob chmod 600 ~bob/.ssh/id_ed25519",
+        )
+
+    with subtest("gitolite server starts"):
+        server.wait_for_unit("gitolite-init.service")
+        server.wait_for_unit("sshd.service")
+        client.succeed("ssh gitolite@server info")
+
+    with subtest("admin can clone and configure gitolite-admin.git"):
+        client.succeed(
+            "git clone gitolite@server:gitolite-admin.git",
+            "git config --global user.name 'System Administrator'",
+            "git config --global user.email root\@domain.example",
+            "cp ${alicePublicKey} gitolite-admin/keydir/alice.pub",
+            "cp ${bobPublicKey} gitolite-admin/keydir/bob.pub",
+            "(cd gitolite-admin && git add . && git commit -m 'Add keys for alice, bob' && git push)",
+            "cat ${gitoliteAdminConfSnippet} >> gitolite-admin/conf/gitolite.conf",
+            "(cd gitolite-admin && git add . && git commit -m 'Add repo for alice' && git push)",
+        )
+
+    with subtest("non-admins cannot clone gitolite-admin.git"):
+        client.fail("sudo -i -u alice git clone gitolite@server:gitolite-admin.git")
+        client.fail("sudo -i -u bob git clone gitolite@server:gitolite-admin.git")
+
+    with subtest("non-admins can clone testing.git"):
+        client.succeed("sudo -i -u alice git clone gitolite@server:testing.git")
+        client.succeed("sudo -i -u bob git clone gitolite@server:testing.git")
+
+    with subtest("alice can clone alice-project.git"):
+        client.succeed("sudo -i -u alice git clone gitolite@server:alice-project.git")
+
+    with subtest("bob cannot clone alice-project.git"):
+        client.fail("sudo -i -u bob git clone gitolite@server:alice-project.git")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/glusterfs.nix b/nixpkgs/nixos/tests/glusterfs.nix
new file mode 100644
index 000000000000..cb07bc09511d
--- /dev/null
+++ b/nixpkgs/nixos/tests/glusterfs.nix
@@ -0,0 +1,68 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+
+let
+  client = { pkgs, ... } : {
+    environment.systemPackages = [ pkgs.glusterfs ];
+    fileSystems = pkgs.lib.mkVMOverride
+      { "/gluster" =
+          { device = "server1:/gv0";
+            fsType = "glusterfs";
+          };
+      };
+  };
+
+  server = { pkgs, ... } : {
+    networking.firewall.enable = false;
+    services.glusterfs.enable = true;
+
+    # create a mount point for the volume
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb
+    '';
+
+    virtualisation.emptyDiskImages = [ 1024 ];
+
+    fileSystems = pkgs.lib.mkVMOverride
+      { "/data" =
+          { device = "/dev/disk/by-label/data";
+            fsType = "ext4";
+          };
+      };
+  };
+in {
+  name = "glusterfs";
+
+  nodes = {
+    server1 = server;
+    server2 = server;
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = ''
+    server1.wait_for_unit("glusterd.service")
+    server2.wait_for_unit("glusterd.service")
+
+    server1.wait_until_succeeds("gluster peer status")
+    server2.wait_until_succeeds("gluster peer status")
+
+    # establish initial contact
+    server1.succeed("gluster peer probe server2")
+    server1.succeed("gluster peer probe server1")
+
+    server1.succeed("gluster peer status | grep Connected")
+
+    # create volumes
+    server1.succeed("mkdir -p /data/vg0")
+    server2.succeed("mkdir -p /data/vg0")
+    server1.succeed("gluster volume create gv0 server1:/data/vg0 server2:/data/vg0")
+    server1.succeed("gluster volume start gv0")
+
+    # test clients
+    client1.wait_for_unit("gluster.mount")
+    client2.wait_for_unit("gluster.mount")
+
+    client1.succeed("echo test > /gluster/file1")
+    client2.succeed("grep test /gluster/file1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gnome3-xorg.nix b/nixpkgs/nixos/tests/gnome3-xorg.nix
new file mode 100644
index 000000000000..b59badcd5de4
--- /dev/null
+++ b/nixpkgs/nixos/tests/gnome3-xorg.nix
@@ -0,0 +1,79 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "gnome3-xorg";
+  meta = with lib; {
+    maintainers = teams.gnome.members;
+  };
+
+  machine = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in
+
+    { imports = [ ./common/user-account.nix ];
+
+      services.xserver.enable = true;
+
+      services.xserver.displayManager.gdm = {
+        enable = true;
+        autoLogin = {
+          enable = true;
+          user = user.name;
+        };
+      };
+
+      services.xserver.desktopManager.gnome3.enable = true;
+      services.xserver.displayManager.defaultSession = "gnome-xorg";
+
+      virtualisation.memorySize = 1024;
+    };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+    uid = toString user.uid;
+    bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus";
+    xauthority = "/run/user/${uid}/gdm/Xauthority";
+    display = "DISPLAY=:0.0";
+    env = "${bus} XAUTHORITY=${xauthority} ${display}";
+    gdbus = "${env} gdbus";
+    su = command: "su - ${user.name} -c '${env} ${command}'";
+
+    # Call javascript in gnome shell, returns a tuple (success, output), where
+    # `success` is true if the dbus call was successful and output is what the
+    # javascript evaluates to.
+    eval = "call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval";
+
+    # False when startup is done
+    startingUp = su "${gdbus} ${eval} Main.layoutManager._startingUp";
+
+    # Start gnome-terminal
+    gnomeTerminalCommand = su "gnome-terminal";
+
+    # Hopefully gnome-terminal's wm class
+    wmClass = su "${gdbus} ${eval} global.display.focus_window.wm_class";
+  in ''
+      with subtest("Login to GNOME Xorg with GDM"):
+          machine.wait_for_x()
+          # Wait for alice to be logged in"
+          machine.wait_for_unit("default.target", "${user.name}")
+          machine.wait_for_file("${xauthority}")
+          machine.succeed("xauth merge ${xauthority}")
+          # Check that logging in has given the user ownership of devices
+          assert "alice" in machine.succeed("getfacl -p /dev/snd/timer")
+
+      with subtest("Wait for GNOME Shell"):
+          # correct output should be (true, 'false')
+          machine.wait_until_succeeds(
+              "${startingUp} | grep -q 'true,..false'"
+          )
+
+      with subtest("Open Gnome Terminal"):
+          machine.succeed(
+              "${gnomeTerminalCommand}"
+          )
+          # correct output should be (true, '"Gnome-terminal"')
+          machine.wait_until_succeeds(
+              "${wmClass} | grep -q  'true,...Gnome-terminal'"
+          )
+          machine.sleep(20)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/gnome3.nix b/nixpkgs/nixos/tests/gnome3.nix
new file mode 100644
index 000000000000..17e72c5f6510
--- /dev/null
+++ b/nixpkgs/nixos/tests/gnome3.nix
@@ -0,0 +1,76 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "gnome3";
+  meta = with lib; {
+    maintainers = teams.gnome.members;
+  };
+
+  machine =
+    { ... }:
+
+    { imports = [ ./common/user-account.nix ];
+
+      services.xserver.enable = true;
+
+      services.xserver.displayManager.gdm = {
+        enable = true;
+        autoLogin = {
+          enable = true;
+          user = "alice";
+        };
+      };
+
+      services.xserver.desktopManager.gnome3.enable = true;
+
+      virtualisation.memorySize = 1024;
+    };
+
+  testScript = { nodes, ... }: let
+    # Keep line widths somewhat managable
+    user = nodes.machine.config.users.users.alice;
+    uid = toString user.uid;
+    bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus";
+    gdbus = "${bus} gdbus";
+    su = command: "su - ${user.name} -c '${command}'";
+
+    # Call javascript in gnome shell, returns a tuple (success, output), where
+    # `success` is true if the dbus call was successful and output is what the
+    # javascript evaluates to.
+    eval = "call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval";
+
+    # False when startup is done
+    startingUp = su "${gdbus} ${eval} Main.layoutManager._startingUp";
+
+    # Start gnome-terminal
+    gnomeTerminalCommand = su "${bus} gnome-terminal";
+
+    # Hopefully gnome-terminal's wm class
+    wmClass = su "${gdbus} ${eval} global.display.focus_window.wm_class";
+  in ''
+      with subtest("Login to GNOME with GDM"):
+          # wait for gdm to start
+          machine.wait_for_unit("display-manager.service")
+          # wait for the wayland server
+          machine.wait_for_file("/run/user/${uid}/wayland-0")
+          # wait for alice to be logged in
+          machine.wait_for_unit("default.target", "${user.name}")
+          # check that logging in has given the user ownership of devices
+          assert "alice" in machine.succeed("getfacl -p /dev/snd/timer")
+
+      with subtest("Wait for GNOME Shell"):
+          # correct output should be (true, 'false')
+          machine.wait_until_succeeds(
+              "${startingUp} | grep -q 'true,..false'"
+          )
+
+      with subtest("Open Gnome Terminal"):
+          machine.succeed(
+              "${gnomeTerminalCommand}"
+          )
+          # correct output should be (true, '"gnome-terminal-server"')
+          machine.wait_until_succeeds(
+              "${wmClass} | grep -q 'gnome-terminal-server'"
+          )
+          machine.sleep(20)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/gocd-agent.nix b/nixpkgs/nixos/tests/gocd-agent.nix
new file mode 100644
index 000000000000..5b630a40736e
--- /dev/null
+++ b/nixpkgs/nixos/tests/gocd-agent.nix
@@ -0,0 +1,48 @@
+# verifies:
+#   1. GoCD agent starts
+#   2. GoCD agent responds
+#   3. GoCD agent is available on GoCD server using GoCD API
+#     3.1. https://api.go.cd/current/#get-all-agents
+
+let
+  serverUrl = "localhost:8153/go/api/agents";
+  header = "Accept: application/vnd.go.cd.v2+json";
+in
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "gocd-agent";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ grahamc swarren83 ];
+
+    # gocd agent needs to register with the autoregister key created on first server startup,
+    # but NixOS module doesn't seem to allow to pass during runtime currently
+    broken = true;
+  };
+
+  nodes = {
+    agent =
+      { ... }:
+      {
+        virtualisation.memorySize = 2046;
+        services.gocd-agent = {
+          enable = true;
+        };
+        services.gocd-server = {
+          enable = true;
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    agent.wait_for_unit("gocd-server")
+    agent.wait_for_open_port("8153")
+    agent.wait_for_unit("gocd-agent")
+    agent.wait_until_succeeds(
+        "curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].uuid"
+    )
+    agent.succeed(
+        "curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].agent_state | grep -q Idle"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gocd-server.nix b/nixpkgs/nixos/tests/gocd-server.nix
new file mode 100644
index 000000000000..20faf85a1ccd
--- /dev/null
+++ b/nixpkgs/nixos/tests/gocd-server.nix
@@ -0,0 +1,28 @@
+# verifies:
+#   1. GoCD server starts
+#   2. GoCD server responds
+
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "gocd-server";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ swarren83 ];
+  };
+
+  nodes = {
+    server =
+      { ... }:
+      {
+        virtualisation.memorySize = 2046;
+        services.gocd-server.enable = true;
+      };
+  };
+
+  testScript = ''
+    server.start()
+    server.wait_for_unit("gocd-server")
+    server.wait_for_open_port(8153)
+    server.wait_until_succeeds("curl -s -f localhost:8153/go")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/google-oslogin/default.nix b/nixpkgs/nixos/tests/google-oslogin/default.nix
new file mode 100644
index 000000000000..1977e92e9877
--- /dev/null
+++ b/nixpkgs/nixos/tests/google-oslogin/default.nix
@@ -0,0 +1,70 @@
+import ../make-test-python.nix ({ pkgs, ... } :
+let
+  inherit (import ./../ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+
+    # don't check host keys or known hosts, use the snakeoil ssh key
+    ssh-config = builtins.toFile "ssh.conf" ''
+      UserKnownHostsFile=/dev/null
+      StrictHostKeyChecking=no
+      IdentityFile=~/.ssh/id_snakeoil
+    '';
+in {
+  name = "google-oslogin";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ adisbladis flokli ];
+  };
+
+  nodes = {
+    # the server provides both the the mocked google metadata server and the ssh server
+    server = (import ./server.nix pkgs);
+
+    client = { ... }: {};
+  };
+  testScript =  ''
+    start_all()
+
+    server.wait_for_unit("mock-google-metadata.service")
+    server.wait_for_open_port(80)
+
+    # mockserver should return a non-expired ssh key for both mockuser and mockadmin
+    server.succeed(
+        '${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys mockuser | grep -q "${snakeOilPublicKey}"'
+    )
+    server.succeed(
+        '${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys mockadmin | grep -q "${snakeOilPublicKey}"'
+    )
+
+    # install snakeoil ssh key on the client, and provision .ssh/config file
+    client.succeed("mkdir -p ~/.ssh")
+    client.succeed(
+        "cat ${snakeOilPrivateKey} > ~/.ssh/id_snakeoil"
+    )
+    client.succeed("chmod 600 ~/.ssh/id_snakeoil")
+    client.succeed("cp ${ssh-config} ~/.ssh/config")
+
+    client.wait_for_unit("network.target")
+    server.wait_for_unit("sshd.service")
+
+    # we should not be able to connect as non-existing user
+    client.fail("ssh ghost@server 'true'")
+
+    # we should be able to connect as mockuser
+    client.succeed("ssh mockuser@server 'true'")
+    # but we shouldn't be able to sudo
+    client.fail(
+        "ssh mockuser@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
+    )
+
+    # we should also be able to log in as mockadmin
+    client.succeed("ssh mockadmin@server 'true'")
+    # pam_oslogin_admin.so should now have generated a sudoers file
+    server.succeed("find /run/google-sudoers.d | grep -q '/run/google-sudoers.d/mockadmin'")
+
+    # and we should be able to sudo
+    client.succeed(
+        "ssh mockadmin@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
+    )
+  '';
+  })
+
diff --git a/nixpkgs/nixos/tests/google-oslogin/server.nix b/nixpkgs/nixos/tests/google-oslogin/server.nix
new file mode 100644
index 000000000000..fdb7141da317
--- /dev/null
+++ b/nixpkgs/nixos/tests/google-oslogin/server.nix
@@ -0,0 +1,29 @@
+{ pkgs, ... }:
+let
+  inherit (import ./../ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+in {
+  networking.firewall.allowedTCPPorts = [ 80 ];
+
+  systemd.services.mock-google-metadata = {
+    description = "Mock Google metadata service";
+    serviceConfig.Type = "simple";
+    serviceConfig.ExecStart = "${pkgs.python3}/bin/python ${./server.py}";
+    environment = {
+      SNAKEOIL_PUBLIC_KEY = snakeOilPublicKey;
+    };
+    wantedBy = [ "multi-user.target" ];
+    after = [ "network.target" ];
+  };
+
+  services.openssh.enable = true;
+  services.openssh.challengeResponseAuthentication = false;
+  services.openssh.passwordAuthentication = false;
+
+  security.googleOsLogin.enable = true;
+
+  # Mock google service
+  networking.extraHosts = ''
+    127.0.0.1 metadata.google.internal
+  '';
+}
diff --git a/nixpkgs/nixos/tests/google-oslogin/server.py b/nixpkgs/nixos/tests/google-oslogin/server.py
new file mode 100644
index 000000000000..bfc527cb97d3
--- /dev/null
+++ b/nixpkgs/nixos/tests/google-oslogin/server.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+import json
+import sys
+import time
+import os
+import hashlib
+import base64
+
+from http.server import BaseHTTPRequestHandler, HTTPServer
+from typing import Dict
+
+SNAKEOIL_PUBLIC_KEY = os.environ['SNAKEOIL_PUBLIC_KEY']
+
+
+def w(msg):
+    sys.stderr.write(f"{msg}\n")
+    sys.stderr.flush()
+
+
+def gen_fingerprint(pubkey):
+    decoded_key = base64.b64decode(pubkey.encode("ascii").split()[1])
+    return hashlib.sha256(decoded_key).hexdigest()
+
+def gen_email(username):
+    """username seems to be a 21 characters long number string, so mimic that in a reproducible way"""
+    return str(int(hashlib.sha256(username.encode()).hexdigest(), 16))[0:21]
+
+def gen_mockuser(username: str, uid: str, gid: str, home_directory: str, snakeoil_pubkey: str) -> Dict:
+    snakeoil_pubkey_fingerprint = gen_fingerprint(snakeoil_pubkey)
+    # seems to be a 21 characters long numberstring, so mimic that in a reproducible way
+    email = gen_email(username)
+    return {
+        "loginProfiles": [
+            {
+                "name": email,
+                "posixAccounts": [
+                    {
+                        "primary": True,
+                        "username": username,
+                        "uid": uid,
+                        "gid": gid,
+                        "homeDirectory": home_directory,
+                        "operatingSystemType": "LINUX"
+                    }
+                ],
+                "sshPublicKeys": {
+                    snakeoil_pubkey_fingerprint: {
+                        "key": snakeoil_pubkey,
+                        "expirationTimeUsec": str((time.time() + 600) * 1000000),  # 10 minutes in the future
+                        "fingerprint": snakeoil_pubkey_fingerprint
+                    }
+                }
+            }
+        ]
+    }
+
+
+class ReqHandler(BaseHTTPRequestHandler):
+    def _send_json_ok(self, data):
+        self.send_response(200)
+        self.send_header('Content-type', 'application/json')
+        self.end_headers()
+        out = json.dumps(data).encode()
+        w(out)
+        self.wfile.write(out)
+
+    def do_GET(self):
+        p = str(self.path)
+        # mockuser and mockadmin are allowed to login, both use the same snakeoil public key
+        if p == '/computeMetadata/v1/oslogin/users?username=mockuser' \
+            or p == '/computeMetadata/v1/oslogin/users?uid=1009719690':
+            self._send_json_ok(gen_mockuser(username='mockuser', uid='1009719690', gid='1009719690',
+                                            home_directory='/home/mockuser', snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY))
+        elif p == '/computeMetadata/v1/oslogin/users?username=mockadmin' \
+            or p == '/computeMetadata/v1/oslogin/users?uid=1009719691':
+            self._send_json_ok(gen_mockuser(username='mockadmin', uid='1009719691', gid='1009719691',
+                                            home_directory='/home/mockadmin', snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY))
+
+        # mockuser is allowed to login
+        elif p == f"/computeMetadata/v1/oslogin/authorize?email={gen_email('mockuser')}&policy=login":
+            self._send_json_ok({'success': True})
+
+        # mockadmin may also become root
+        elif p == f"/computeMetadata/v1/oslogin/authorize?email={gen_email('mockadmin')}&policy=login" or p == f"/computeMetadata/v1/oslogin/authorize?email={gen_email('mockadmin')}&policy=adminLogin":
+            self._send_json_ok({'success': True})
+        else:
+            sys.stderr.write(f"Unhandled path: {p}\n")
+            sys.stderr.flush()
+            self.send_response(501)
+            self.end_headers()
+            self.wfile.write(b'')
+
+
+if __name__ == '__main__':
+    s = HTTPServer(('0.0.0.0', 80), ReqHandler)
+    s.serve_forever()
diff --git a/nixpkgs/nixos/tests/gotify-server.nix b/nixpkgs/nixos/tests/gotify-server.nix
new file mode 100644
index 000000000000..c6e00686aed9
--- /dev/null
+++ b/nixpkgs/nixos/tests/gotify-server.nix
@@ -0,0 +1,45 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "gotify-server";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ma27 ];
+  };
+
+  machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.jq ];
+
+    services.gotify = {
+      enable = true;
+      port = 3000;
+    };
+  };
+
+  testScript = ''
+    machine.start()
+
+    machine.wait_for_unit("gotify-server.service")
+    machine.wait_for_open_port(3000)
+
+    token = machine.succeed(
+        "curl --fail -sS -X POST localhost:3000/application -F name=nixos "
+        + '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" '
+        + "| jq .token | xargs echo -n"
+    )
+
+    usertoken = machine.succeed(
+        "curl --fail -sS -X POST localhost:3000/client -F name=nixos "
+        + '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" '
+        + "| jq .token | xargs echo -n"
+    )
+
+    machine.succeed(
+        f"curl --fail -sS -X POST 'localhost:3000/message?token={token}' -H 'Accept: application/json' "
+        + "-F title=Gotify -F message=Works"
+    )
+
+    title = machine.succeed(
+        f"curl --fail -sS 'localhost:3000/message?since=0&token={usertoken}' | jq '.messages|.[0]|.title' | xargs echo -n"
+    )
+
+    assert title == "Gotify"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/grafana.nix b/nixpkgs/nixos/tests/grafana.nix
new file mode 100644
index 000000000000..4b453ece7f1e
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana.nix
@@ -0,0 +1,97 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+  inherit (lib) mkMerge nameValuePair maintainers;
+
+  baseGrafanaConf = {
+    services.grafana = {
+      enable = true;
+      addr = "localhost";
+      analytics.reporting.enable = false;
+      domain = "localhost";
+      security = {
+        adminUser = "testadmin";
+        adminPassword = "snakeoilpwd";
+      };
+    };
+  };
+
+  extraNodeConfs = {
+    postgresql = {
+      services.grafana.database = {
+        host = "127.0.0.1:5432";
+        user = "grafana";
+      };
+      services.postgresql = {
+        enable = true;
+        ensureDatabases = [ "grafana" ];
+        ensureUsers = [{
+          name = "grafana";
+          ensurePermissions."DATABASE grafana" = "ALL PRIVILEGES";
+        }];
+      };
+      systemd.services.grafana.after = [ "postgresql.service" ];
+    };
+
+    mysql = {
+      services.grafana.database.user = "grafana";
+      services.mysql = {
+        enable = true;
+        ensureDatabases = [ "grafana" ];
+        ensureUsers = [{
+          name = "grafana";
+          ensurePermissions."grafana.*" = "ALL PRIVILEGES";
+        }];
+        package = pkgs.mariadb;
+      };
+      systemd.services.grafana.after = [ "mysql.service" ];
+    };
+  };
+
+  nodes = builtins.listToAttrs (map (dbName:
+    nameValuePair dbName (mkMerge [
+    baseGrafanaConf
+    (extraNodeConfs.${dbName} or {})
+  ])) [ "sqlite" "postgresql" "mysql" ]);
+
+in {
+  name = "grafana";
+
+  meta = with maintainers; {
+    maintainers = [ willibutz ];
+  };
+
+  inherit nodes;
+
+  testScript = ''
+    start_all()
+
+    with subtest("Successful API query as admin user with sqlite db"):
+        sqlite.wait_for_unit("grafana.service")
+        sqlite.wait_for_open_port(3000)
+        sqlite.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep -q testadmin\@localhost"
+        )
+        sqlite.shutdown()
+
+    with subtest("Successful API query as admin user with postgresql db"):
+        postgresql.wait_for_unit("grafana.service")
+        postgresql.wait_for_unit("postgresql.service")
+        postgresql.wait_for_open_port(3000)
+        postgresql.wait_for_open_port(5432)
+        postgresql.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep -q testadmin\@localhost"
+        )
+        postgresql.shutdown()
+
+    with subtest("Successful API query as admin user with mysql db"):
+        mysql.wait_for_unit("grafana.service")
+        mysql.wait_for_unit("mysql.service")
+        mysql.wait_for_open_port(3000)
+        mysql.wait_for_open_port(3306)
+        mysql.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep -q testadmin\@localhost"
+        )
+        mysql.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/graphite.nix b/nixpkgs/nixos/tests/graphite.nix
new file mode 100644
index 000000000000..71776a94cbd6
--- /dev/null
+++ b/nixpkgs/nixos/tests/graphite.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, ... } :
+{
+  name = "graphite";
+  meta = {
+    # Fails on dependency `python-2.7-Twisted`'s test suite
+    # complaining `ImportError: No module named zope.interface`.
+    broken = true;
+  };
+  nodes = {
+    one =
+      { ... }: {
+        virtualisation.memorySize = 1024;
+        time.timeZone = "UTC";
+        services.graphite = {
+          web = {
+            enable = true;
+            extraConfig = ''
+              SECRET_KEY = "abcd";
+            '';
+          };
+          api = {
+            enable = true;
+            port = 8082;
+            finders = [ pkgs.python3Packages.influxgraph ];
+          };
+          carbon.enableCache = true;
+          seyren.enable = false;  # Implicitely requires openssl-1.0.2u which is marked insecure
+          beacon.enable = true;
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    one.wait_for_unit("default.target")
+    one.wait_for_unit("graphiteWeb.service")
+    one.wait_for_unit("graphiteApi.service")
+    one.wait_for_unit("graphite-beacon.service")
+    one.wait_for_unit("carbonCache.service")
+    # The services above are of type "simple". systemd considers them active immediately
+    # even if they're still in preStart (which takes quite long for graphiteWeb).
+    # Wait for ports to open so we're sure the services are up and listening.
+    one.wait_for_open_port(8080)
+    one.wait_for_open_port(2003)
+    one.succeed('echo "foo 1 `date +%s`" | nc -N localhost 2003')
+    one.wait_until_succeeds(
+        "curl 'http://localhost:8080/metrics/find/?query=foo&format=treejson' --silent | grep foo >&2"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/graylog.nix b/nixpkgs/nixos/tests/graylog.nix
new file mode 100644
index 000000000000..2d22012fa7c0
--- /dev/null
+++ b/nixpkgs/nixos/tests/graylog.nix
@@ -0,0 +1,115 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "graylog";
+  meta.maintainers = with lib.maintainers; [ ma27 ];
+
+  machine = { pkgs, ... }: {
+    virtualisation.memorySize = 4096;
+    virtualisation.diskSize = 4096;
+
+    services.mongodb.enable = true;
+    services.elasticsearch.enable = true;
+    services.elasticsearch.package = pkgs.elasticsearch-oss;
+    services.elasticsearch.extraConf = ''
+      network.publish_host: 127.0.0.1
+      network.bind_host: 127.0.0.1
+    '';
+
+    services.graylog = {
+      enable = true;
+      passwordSecret = "YGhZ59wXMrYOojx5xdgEpBpDw2N6FbhM4lTtaJ1KPxxmKrUvSlDbtWArwAWMQ5LKx1ojHEVrQrBMVRdXbRyZLqffoUzHfssc";
+      elasticsearchHosts = [ "http://localhost:9200" ];
+
+      # `echo -n "nixos" | shasum -a 256`
+      rootPasswordSha2 = "6ed332bcfa615381511d4d5ba44a293bb476f368f7e9e304f0dff50230d1a85b";
+    };
+
+    environment.systemPackages = [ pkgs.jq ];
+
+    systemd.services.graylog.path = [ pkgs.netcat ];
+    systemd.services.graylog.preStart = ''
+      until nc -z localhost 9200; do
+        sleep 2
+      done
+    '';
+  };
+
+  testScript = let
+    payloads.login = pkgs.writeText "login.json" (builtins.toJSON {
+      host = "127.0.0.1:9000";
+      username = "admin";
+      password = "nixos";
+    });
+
+    payloads.input = pkgs.writeText "input.json" (builtins.toJSON {
+      title = "Demo";
+      global = false;
+      type = "org.graylog2.inputs.gelf.udp.GELFUDPInput";
+      node = "@node@";
+      configuration = {
+        bind_address = "0.0.0.0";
+        decompress_size_limit = 8388608;
+        number_worker_threads = 1;
+        override_source = null;
+        port = 12201;
+        recv_buffer_size = 262144;
+      };
+    });
+
+    payloads.gelf_message = pkgs.writeText "gelf.json" (builtins.toJSON {
+      host = "example.org";
+      short_message = "A short message";
+      full_message = "A long message";
+      version = "1.1";
+      level = 5;
+      facility = "Test";
+    });
+  in ''
+    machine.start()
+    machine.wait_for_unit("graylog.service")
+    machine.wait_for_open_port(9000)
+    machine.succeed("curl -sSfL http://127.0.0.1:9000/")
+
+    session = machine.succeed(
+        "curl -X POST "
+        + "-sSfL http://127.0.0.1:9000/api/system/sessions "
+        + "-d $(cat ${payloads.login}) "
+        + "-H 'Content-Type: application/json' "
+        + "-H 'Accept: application/json' "
+        + "-H 'x-requested-by: cli' "
+        + "| jq .session_id | xargs echo"
+    ).rstrip()
+
+    machine.succeed(
+        "curl -X POST "
+        + f"-sSfL http://127.0.0.1:9000/api/system/inputs -u {session}:session "
+        + '-d $(cat ${payloads.input} | sed -e "s,@node@,$(cat /var/lib/graylog/server/node-id),") '
+        + "-H 'Accept: application/json' "
+        + "-H 'Content-Type: application/json' "
+        + "-H 'x-requested-by: cli' "
+    )
+
+    machine.wait_until_succeeds(
+        "test \"$(curl -sSfL 'http://127.0.0.1:9000/api/cluster/inputstates' "
+        + f"-u {session}:session "
+        + "-H 'Accept: application/json' "
+        + "-H 'Content-Type: application/json' "
+        + "-H 'x-requested-by: cli'"
+        + "| jq 'to_entries[]|.value|.[0]|.state' | xargs echo"
+        + ')" = "RUNNING"'
+    )
+
+    machine.succeed(
+        "echo -n $(cat ${payloads.gelf_message}) | nc -w10 -u 127.0.0.1 12201"
+    )
+
+    machine.succeed(
+        'test "$(curl -X GET '
+        + "-sSfL 'http://127.0.0.1:9000/api/search/universal/relative?query=*' "
+        + f"-u {session}:session "
+        + "-H 'Accept: application/json' "
+        + "-H 'Content-Type: application/json' "
+        + "-H 'x-requested-by: cli'"
+        + ' | jq \'.total_results\' | xargs echo)" = "1"'
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/grocy.nix b/nixpkgs/nixos/tests/grocy.nix
new file mode 100644
index 000000000000..7fa479ed2c42
--- /dev/null
+++ b/nixpkgs/nixos/tests/grocy.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "grocy";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ma27 ];
+  };
+
+  machine = { pkgs, ... }: {
+    services.grocy = {
+      enable = true;
+      hostName = "localhost";
+      nginx.enableSSL = false;
+    };
+    environment.systemPackages = [ pkgs.jq ];
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_open_port(80)
+    machine.wait_for_unit("multi-user.target")
+
+    machine.succeed("curl -sSf http://localhost")
+
+    machine.succeed(
+        "curl -c cookies -sSf -X POST http://localhost/login -d 'username=admin&password=admin'"
+    )
+
+    cookie = machine.succeed(
+        "grep -v '^#' cookies | awk '{ print $7 }' | sed -e '/^$/d' | perl -pe 'chomp'"
+    )
+
+    machine.succeed(
+        f"curl -sSf -X POST http://localhost/api/objects/tasks -b 'grocy_session={cookie}' "
+        + '-d \'{"assigned_to_user_id":1,"name":"Test Task","due_date":"1970-01-01"}\'''
+        + " --header 'Content-Type: application/json'"
+    )
+
+    task_name = machine.succeed(
+        f"curl -sSf http://localhost/api/tasks -b 'grocy_session={cookie}' --header 'Accept: application/json' | jq '.[].name' | xargs echo | perl -pe 'chomp'"
+    )
+
+    assert task_name == "Test Task"
+
+    machine.succeed("curl -sSfI http://localhost/api/tasks 2>&1 | grep '401 Unauthorized'")
+
+    machine.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gvisor.nix b/nixpkgs/nixos/tests/gvisor.nix
new file mode 100644
index 000000000000..4d68a1d8a5f8
--- /dev/null
+++ b/nixpkgs/nixos/tests/gvisor.nix
@@ -0,0 +1,49 @@
+# This test runs a container through gvisor and checks if simple container starts
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "gvisor";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ andrew-d ];
+  };
+
+  nodes = {
+    gvisor =
+      { pkgs, ... }:
+        {
+          virtualisation.docker = {
+            enable = true;
+            extraOptions = "--add-runtime runsc=${pkgs.gvisor}/bin/runsc";
+          };
+
+          networking = {
+            dhcpcd.enable = false;
+            defaultGateway = "192.168.1.1";
+            interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+              { address = "192.168.1.2"; prefixLength = 24; }
+            ];
+          };
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    gvisor.wait_for_unit("network.target")
+    gvisor.wait_for_unit("sockets.target")
+
+    # Start by verifying that gvisor itself works
+    output = gvisor.succeed(
+        "${pkgs.gvisor}/bin/runsc -alsologtostderr do ${pkgs.coreutils}/bin/echo hello world"
+    )
+    assert output.strip() == "hello world"
+
+    # Also test the Docker runtime
+    gvisor.succeed("tar cv --files-from /dev/null | docker import - scratchimg")
+    gvisor.succeed(
+        "docker run -d --name=sleeping --runtime=runsc -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+    )
+    gvisor.succeed("docker ps | grep sleeping")
+    gvisor.succeed("docker stop sleeping")
+  '';
+})
+
diff --git a/nixpkgs/nixos/tests/hadoop/hdfs.nix b/nixpkgs/nixos/tests/hadoop/hdfs.nix
new file mode 100644
index 000000000000..85aaab34b158
--- /dev/null
+++ b/nixpkgs/nixos/tests/hadoop/hdfs.nix
@@ -0,0 +1,54 @@
+import ../make-test-python.nix ({...}: {
+  nodes = {
+    namenode = {pkgs, ...}: {
+      services.hadoop = {
+        package = pkgs.hadoop_3_1;
+        hdfs.namenode.enabled = true;
+        coreSite = {
+          "fs.defaultFS" = "hdfs://namenode:8020";
+        };
+        hdfsSite = {
+          "dfs.replication" = 1;
+          "dfs.namenode.rpc-bind-host" = "0.0.0.0";
+          "dfs.namenode.http-bind-host" = "0.0.0.0";
+        };
+      };
+      networking.firewall.allowedTCPPorts = [
+        9870 # namenode.http-address
+        8020 # namenode.rpc-address
+      ];
+    };
+    datanode = {pkgs, ...}: {
+      services.hadoop = {
+        package = pkgs.hadoop_3_1;
+        hdfs.datanode.enabled = true;
+        coreSite = {
+          "fs.defaultFS" = "hdfs://namenode:8020";
+        };
+      };
+      networking.firewall.allowedTCPPorts = [
+        9864 # datanode.http.address
+        9866 # datanode.address
+        9867 # datanode.ipc.address
+      ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    namenode.wait_for_unit("hdfs-namenode")
+    namenode.wait_for_unit("network.target")
+    namenode.wait_for_open_port(8020)
+    namenode.wait_for_open_port(9870)
+
+    datanode.wait_for_unit("hdfs-datanode")
+    datanode.wait_for_unit("network.target")
+    datanode.wait_for_open_port(9864)
+    datanode.wait_for_open_port(9866)
+    datanode.wait_for_open_port(9867)
+
+    namenode.succeed("curl http://namenode:9870")
+    datanode.succeed("curl http://datanode:9864")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hadoop/yarn.nix b/nixpkgs/nixos/tests/hadoop/yarn.nix
new file mode 100644
index 000000000000..2264ecaff155
--- /dev/null
+++ b/nixpkgs/nixos/tests/hadoop/yarn.nix
@@ -0,0 +1,46 @@
+import ../make-test-python.nix ({...}: {
+  nodes = {
+    resourcemanager = {pkgs, ...}: {
+      services.hadoop.package = pkgs.hadoop_3_1;
+      services.hadoop.yarn.resourcemanager.enabled = true;
+      services.hadoop.yarnSite = {
+        "yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
+      };
+      networking.firewall.allowedTCPPorts = [
+        8088 # resourcemanager.webapp.address
+        8031 # resourcemanager.resource-tracker.address
+      ];
+    };
+    nodemanager = {pkgs, ...}: {
+      services.hadoop.package = pkgs.hadoop_3_1;
+      services.hadoop.yarn.nodemanager.enabled = true;
+      services.hadoop.yarnSite = {
+        "yarn.resourcemanager.hostname" = "resourcemanager";
+        "yarn.nodemanager.log-dirs" = "/tmp/userlogs";
+        "yarn.nodemanager.address" = "0.0.0.0:8041";
+      };
+      networking.firewall.allowedTCPPorts = [
+        8042 # nodemanager.webapp.address
+        8041 # nodemanager.address
+      ];
+    };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    resourcemanager.wait_for_unit("yarn-resourcemanager")
+    resourcemanager.wait_for_unit("network.target")
+    resourcemanager.wait_for_open_port(8031)
+    resourcemanager.wait_for_open_port(8088)
+
+    nodemanager.wait_for_unit("yarn-nodemanager")
+    nodemanager.wait_for_unit("network.target")
+    nodemanager.wait_for_open_port(8042)
+    nodemanager.wait_for_open_port(8041)
+
+    resourcemanager.succeed("curl http://localhost:8088")
+    nodemanager.succeed("curl http://localhost:8042")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/haka.nix b/nixpkgs/nixos/tests/haka.nix
new file mode 100644
index 000000000000..3ca19cb0971c
--- /dev/null
+++ b/nixpkgs/nixos/tests/haka.nix
@@ -0,0 +1,24 @@
+# This test runs haka and probes it with hakactl
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "haka";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ tvestelind ];
+  };
+
+  nodes = {
+    haka =
+      { ... }:
+        {
+          services.haka.enable = true;
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    haka.wait_for_unit("haka.service")
+    haka.succeed("hakactl status")
+    haka.succeed("hakactl stop")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/handbrake.nix b/nixpkgs/nixos/tests/handbrake.nix
new file mode 100644
index 000000000000..e5fb6b269b19
--- /dev/null
+++ b/nixpkgs/nixos/tests/handbrake.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  # Download Big Buck Bunny example, licensed under CC Attribution 3.0.
+  testMkv = pkgs.fetchurl {
+    url = "https://github.com/Matroska-Org/matroska-test-files/blob/cf0792be144ac470c4b8052cfe19bb691993e3a2/test_files/test1.mkv?raw=true";
+    sha256 = "1hfxbbgxwfkzv85pvpvx55a72qsd0hxjbm9hkl5r3590zw4s75h9";
+  };
+in {
+  name = "handbrake";
+
+  meta = {
+    maintainers = with pkgs.stdenv.lib.maintainers; [ danieldk ];
+  };
+
+  machine = { pkgs, ... }: {
+    environment.systemPackages = with pkgs; [ handbrake ];
+  };
+
+  testScript = ''
+    # Test MP4 and MKV transcoding. Since this is a short clip, transcoding typically
+    # only takes a few seconds.
+    start_all()
+
+    machine.succeed(
+        "HandBrakeCLI -i ${testMkv} -o test.mp4 -e x264 -q 20 -B 160"
+    )
+    machine.succeed(
+        "HandBrakeCLI -i ${testMkv} -o test.mkv -e x264 -q 20 -B 160"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/haproxy.nix b/nixpkgs/nixos/tests/haproxy.nix
new file mode 100644
index 000000000000..79f34b07faf4
--- /dev/null
+++ b/nixpkgs/nixos/tests/haproxy.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ pkgs, ...}: {
+  name = "haproxy";
+  nodes = {
+    machine = { ... }: {
+      imports = [ ../modules/profiles/minimal.nix ];
+      services.haproxy = {
+        enable = true;
+        config = ''
+          defaults
+            timeout connect 10s
+
+          backend http_server
+            mode http
+            server httpd [::1]:8000
+
+          frontend http
+            bind *:80
+            mode http
+            option http-use-htx
+            http-request use-service prometheus-exporter if { path /metrics }
+            use_backend http_server
+        '';
+      };
+      services.httpd = {
+        enable = true;
+        virtualHosts.localhost = {
+          documentRoot = pkgs.writeTextDir "index.txt" "We are all good!";
+          adminAddr = "notme@yourhost.local";
+          listen = [{
+            ip = "::1";
+            port = 8000;
+          }];
+        };
+      };
+    };
+  };
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("haproxy.service")
+    machine.wait_for_unit("httpd.service")
+    assert "We are all good!" in machine.succeed("curl -k http://localhost:80/index.txt")
+    assert "haproxy_process_pool_allocated_bytes" in machine.succeed(
+        "curl -k http://localhost:80/metrics"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hardened.nix b/nixpkgs/nixos/tests/hardened.nix
new file mode 100644
index 000000000000..5ed0dfcf9ab8
--- /dev/null
+++ b/nixpkgs/nixos/tests/hardened.nix
@@ -0,0 +1,130 @@
+import ./make-test.nix ({ pkgs, latestKernel ? false, ... } : {
+  name = "hardened";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ joachifm ];
+  };
+
+  machine =
+    { lib, pkgs, config, ... }:
+    with lib;
+    { users.users.alice = { isNormalUser = true; extraGroups = [ "proc" ]; };
+      users.users.sybil = { isNormalUser = true; group = "wheel"; };
+      imports = [ ../modules/profiles/hardened.nix ];
+      boot.kernelPackages =
+        lib.mkIf latestKernel pkgs.linuxPackages_latest_hardened;
+      environment.memoryAllocator.provider = "graphene-hardened";
+      nix.useSandbox = false;
+      virtualisation.emptyDiskImages = [ 4096 ];
+      boot.initrd.postDeviceCommands = ''
+        ${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb
+      '';
+      fileSystems = lib.mkVMOverride {
+        "/efi" = {
+          device = "/dev/disk/by-label/EFISYS";
+          fsType = "vfat";
+          options = [ "noauto" ];
+        };
+      };
+      boot.extraModulePackages =
+        optional (versionOlder config.boot.kernelPackages.kernel.version "5.6")
+          config.boot.kernelPackages.wireguard;
+      boot.kernelModules = [ "wireguard" ];
+    };
+
+  testScript =
+    let
+      hardened-malloc-tests = pkgs.stdenv.mkDerivation {
+        name = "hardened-malloc-tests-${pkgs.graphene-hardened-malloc.version}";
+        src = pkgs.graphene-hardened-malloc.src;
+        buildPhase = ''
+          cd test/simple-memory-corruption
+          make -j4
+        '';
+
+        installPhase = ''
+          find . -type f -executable -exec install -Dt $out/bin '{}' +
+        '';
+      };
+    in
+    ''
+      $machine->waitForUnit("multi-user.target");
+
+      subtest "apparmor-loaded", sub {
+          $machine->succeed("systemctl status apparmor.service");
+      };
+
+      # AppArmor securityfs
+      subtest "apparmor-securityfs", sub {
+          $machine->succeed("mountpoint -q /sys/kernel/security");
+          $machine->succeed("cat /sys/kernel/security/apparmor/profiles");
+      };
+
+      # Test loading out-of-tree modules
+      subtest "extra-module-packages", sub {
+          $machine->succeed("grep -Fq wireguard /proc/modules");
+      };
+
+      # Test hidepid
+      subtest "hidepid", sub {
+          $machine->succeed("grep -Fq hidepid=2 /proc/mounts");
+          # cannot use pgrep -u here, it segfaults when access to process info is denied
+          $machine->succeed("[ `su - sybil -c 'ps --no-headers --user root | wc -l'` = 0 ]");
+          $machine->succeed("[ `su - alice -c 'ps --no-headers --user root | wc -l'` != 0 ]");
+      };
+
+      # Test kernel module hardening
+      subtest "lock-modules", sub {
+          # note: this better a be module we normally wouldn't load ...
+          $machine->fail("modprobe dccp");
+      };
+
+      # Test userns
+      subtest "userns", sub {
+          $machine->succeed("unshare --user true");
+          $machine->fail("su -l alice -c 'unshare --user true'");
+      };
+
+      # Test dmesg restriction
+      subtest "dmesg", sub {
+          $machine->fail("su -l alice -c dmesg");
+      };
+
+      # Test access to kcore
+      subtest "kcore", sub {
+          $machine->fail("cat /proc/kcore");
+      };
+
+      # Test deferred mount
+      subtest "mount", sub {
+        $machine->fail("mountpoint -q /efi"); # was deferred
+        $machine->execute("mkdir -p /efi");
+        $machine->succeed("mount /dev/disk/by-label/EFISYS /efi");
+        $machine->succeed("mountpoint -q /efi"); # now mounted
+      };
+
+      # Test Nix dæmon usage
+      subtest "nix-daemon", sub {
+        $machine->fail("su -l nobody -s /bin/sh -c 'nix ping-store'");
+        $machine->succeed("su -l alice -c 'nix ping-store'") =~ "OK";
+      };
+
+      # Test kernel image protection
+      subtest "kernelimage", sub {
+        $machine->fail("systemctl hibernate");
+        $machine->fail("systemctl kexec");
+      };
+
+      # Test hardened memory allocator
+      sub runMallocTestProg {
+          my ($progName, $errorText) = @_;
+          my $text = "fatal allocator error: " . $errorText;
+          $machine->fail("${hardened-malloc-tests}/bin/" . $progName) =~ $text;
+      };
+
+      subtest "hardenedmalloc", sub {
+        runMallocTestProg("double_free_large", "invalid free");
+        runMallocTestProg("unaligned_free_small", "invalid unaligned free");
+        runMallocTestProg("write_after_free_small", "detected write after free");
+      };
+    '';
+})
diff --git a/nixpkgs/nixos/tests/hibernate.nix b/nixpkgs/nixos/tests/hibernate.nix
new file mode 100644
index 000000000000..8251c6e7ef85
--- /dev/null
+++ b/nixpkgs/nixos/tests/hibernate.nix
@@ -0,0 +1,44 @@
+# Test whether hibernation from partition works.
+
+import ./make-test-python.nix (pkgs: {
+  name = "hibernate";
+
+  nodes = {
+    machine = { config, lib, pkgs, ... }: with lib; {
+      virtualisation.emptyDiskImages = [ config.virtualisation.memorySize ];
+
+      systemd.services.backdoor.conflicts = [ "sleep.target" ];
+
+      swapDevices = mkOverride 0 [ { device = "/dev/vdb"; } ];
+
+      networking.firewall.allowedTCPPorts = [ 4444 ];
+
+      systemd.services.listener.serviceConfig.ExecStart = "${pkgs.netcat}/bin/nc -l 4444 -k";
+    };
+
+    probe = { pkgs, ...}: {
+      environment.systemPackages = [ pkgs.netcat ];
+    };
+  };
+
+  # 9P doesn't support reconnection to virtio transport after a hibernation.
+  # Therefore, machine just hangs on any Nix store access.
+  # To work around it we run a daemon which listens to a TCP connection and
+  # try to connect to it as a test.
+
+  testScript =
+    ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("mkswap /dev/vdb")
+      machine.succeed("swapon -a")
+      machine.start_job("listener")
+      machine.wait_for_open_port(4444)
+      machine.succeed("systemctl hibernate &")
+      machine.wait_for_shutdown()
+      probe.wait_for_unit("multi-user.target")
+      machine.start()
+      probe.wait_until_succeeds("echo test | nc machine 4444 -N")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/hitch/default.nix b/nixpkgs/nixos/tests/hitch/default.nix
new file mode 100644
index 000000000000..904d12619d70
--- /dev/null
+++ b/nixpkgs/nixos/tests/hitch/default.nix
@@ -0,0 +1,33 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+{
+  name = "hitch";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ jflanglois ];
+  };
+  machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.hitch = {
+      enable = true;
+      backend = "[127.0.0.1]:80";
+      pem-files = [
+        ./example.pem
+      ];
+    };
+
+    services.httpd = {
+      enable = true;
+      virtualHosts.localhost.documentRoot = ./example;
+      adminAddr = "noone@testing.nowhere";
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_unit("hitch.service")
+      machine.wait_for_open_port(443)
+      assert "We are all good!" in machine.succeed("curl -k https://localhost:443/index.txt")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/hitch/example.pem b/nixpkgs/nixos/tests/hitch/example.pem
new file mode 100644
index 000000000000..fde6f3cbd19a
--- /dev/null
+++ b/nixpkgs/nixos/tests/hitch/example.pem
@@ -0,0 +1,53 @@
+-----BEGIN CERTIFICATE-----
+MIIEKTCCAxGgAwIBAgIJAIFAWQXSZ7lIMA0GCSqGSIb3DQEBCwUAMIGqMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMUmVkd29vZCBD
+aXR5MRkwFwYDVQQKDBBUZXN0aW5nIDEyMyBJbmMuMRQwEgYDVQQLDAtJVCBTZXJ2
+aWNlczEYMBYGA1UEAwwPdGVzdGluZy5ub3doZXJlMSQwIgYJKoZIhvcNAQkBFhVu
+b29uZUB0ZXN0aW5nLm5vd2hlcmUwHhcNMTgwNDIzMDcxMTI5WhcNMTkwNDIzMDcx
+MTI5WjCBqjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFTATBgNV
+BAcMDFJlZHdvb2QgQ2l0eTEZMBcGA1UECgwQVGVzdGluZyAxMjMgSW5jLjEUMBIG
+A1UECwwLSVQgU2VydmljZXMxGDAWBgNVBAMMD3Rlc3Rpbmcubm93aGVyZTEkMCIG
+CSqGSIb3DQEJARYVbm9vbmVAdGVzdGluZy5ub3doZXJlMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEAxQq6AA9o/QErMbQwfgDF4mqXcvglRTwPr2zPE6Rv
+1g0ncRBSMM8iKbPapHM6qHNfg2e1fU2SFqzD6HkyZqHHLCgLzkdzswEcEjsMqiUP
+OR++5g4CWoQrdTi31itzYzCjnQ45BrAMrLEhBQgDTNwrEE+Tit0gpOGggtj/ktLk
+OD8BKa640lkmWEUGF18fd3rYTUC4hwM5qhAVXTe21vj9ZWsgprpQKdN61v0dCUap
+C5eAgvZ8Re+Cd0Id674hK4cJ4SekqfHKv/jLyIg3Vsdc9nkhmiC4O6KH5f1Zzq2i
+E4Kd5mnJDFxfSzIErKWmbhriLWsj3KEJ983AGLJ9hxQTAwIDAQABo1AwTjAdBgNV
+HQ4EFgQU76Mm6DP/BePJRQUNrJ9z038zjocwHwYDVR0jBBgwFoAU76Mm6DP/BePJ
+RQUNrJ9z038zjocwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAAZzt
+VdPaUqrvDAh5rMYqzYMJ3tj6daNYoX6CbTFoevK5J5D4FESM0D/FMKgpNiVz39kB
+8Cjaw5rPHMHY61rHz7JRDK1sWXsonwzCF21BK7Tx0G1CIfLpYHWYb/FfdWGROx+O
+hPgKuoMRWQB+txozkZp5BqWJmk5MOyFCDEXhMOmrfsJq0IYU6QaH3Lsf1oJRy4yU
+afFrT9o3DLOyYLG/j/HXijCu8DVjZVa4aboum79ecYzPjjGF1posrFUnvQiuAeYy
+t7cuHNUB8gW9lWR5J7tP8fzFWtIcyT2oRL8u3H+fXf0i4bW73wtOBOoeULBzBNE7
+6rphcSrQunSZQIc+hg==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFCroAD2j9ASsx
+tDB+AMXiapdy+CVFPA+vbM8TpG/WDSdxEFIwzyIps9qkczqoc1+DZ7V9TZIWrMPo
+eTJmoccsKAvOR3OzARwSOwyqJQ85H77mDgJahCt1OLfWK3NjMKOdDjkGsAyssSEF
+CANM3CsQT5OK3SCk4aCC2P+S0uQ4PwEprrjSWSZYRQYXXx93ethNQLiHAzmqEBVd
+N7bW+P1layCmulAp03rW/R0JRqkLl4CC9nxF74J3Qh3rviErhwnhJ6Sp8cq/+MvI
+iDdWx1z2eSGaILg7oofl/VnOraITgp3mackMXF9LMgSspaZuGuItayPcoQn3zcAY
+sn2HFBMDAgMBAAECggEAcaR8HijFHpab+PC5vxJnDuz3KEHiDQpU6ZJR5DxEnCm+
+A8GsBaaRR4gJpCspO5o/DiS0Ue55QUanPt8XqIXJv7fhBznCiw0qyYDxDviMzR94
+FGskBFySS+tIa+dnh1+4HY7kaO0Egl0udB5o+N1KoP+kUsSyXSYcUxsgW+fx5FW9
+22Ya3HNWnWxMCSfSGGlTFXGj2whf25SkL25dM9iblO4ZOx4MX8kaXij7TaYy8hMM
+Vf6/OMnXqtPKho+ctZZVKZkE9PxdS4f/pnp5EsdoOZwNBtfQ1WqVLWd3DlGWhnsH
+7L8ZSP2HkoI4Pd1wtkpOKZc+yM2bFXWa8WY4TcmpUQKBgQD33HxGdtmtZehrexSA
+/ZwWJlMslUsNz4Ivv6s7J4WCRhdh94+r9TWQP/yHdT9Ry5bvn84I5ZLUdp+aA962
+mvjz+GIglkCGpA7HU/hqurB1O63pj2cIDB8qhV21zjVIoqXcQ7IBJ+tqD79nF8vm
+h3KfuHUhuu1rayGepbtIyNhLdwKBgQDLgw4TJBg/QB8RzYECk78QnfZpCExsQA/z
+YJpc+dF2/nsid5R2u9jWzfmgHM2Jjo2/+ofRUaTqcFYU0K57CqmQkOLIzsbNQoYt
+e2NOANNVHiZLuzTZC2r3BrrkNbo3YvQzhAesUA5lS6LfrxBLUKiwo2LU9NlmJs3b
+UPVFYI0/1QKBgCswxIcS1sOcam+wNtZzWuuRKhUuvrFdY3YmlBPuwxj8Vb7AgMya
+IgdM3xhLmgkKzPZchm6OcpOLSCxyWDDBuHfq5E6BYCUWGW0qeLNAbNdA2wFD99Qz
+KIskSjwP/sD1dql3MmF5L1CABf5U6zb0i0jBv8ds50o8lNMsVgJM3UPpAoGBAL1+
+nzllb4pdi1CJWKnspoizfQCZsIdPM0r71V/jYY36MO+MBtpz2NlSWzAiAaQm74gl
+oBdgfT2qMg0Zro11BSRONEykdOolGkj5TiMQk7b65s+3VeMPRZ8UTis2d9kgs5/Q
+PVDODkl1nwfGu1ZVmW04BUujXVZHpYCkJm1eFMetAoGAImE7gWj+qRMhpbtCCGCg
+z06gDKvMrF6S+GJsvUoSyM8oUtfdPodI6gWAC65NfYkIiqbpCaEVNzfui73f5Lnz
+p5X1IbzhuH5UZs/k5A3OR2PPDbPs3lqEw7YJdBdLVRmO1o824uaXaJJwkL/1C+lq
+8dh1wV3CnynNmZApkz4vpzQ=
+-----END PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/hitch/example/index.txt b/nixpkgs/nixos/tests/hitch/example/index.txt
new file mode 100644
index 000000000000..0478b1c26351
--- /dev/null
+++ b/nixpkgs/nixos/tests/hitch/example/index.txt
@@ -0,0 +1 @@
+We are all good!
diff --git a/nixpkgs/nixos/tests/hocker-fetchdocker/default.nix b/nixpkgs/nixos/tests/hocker-fetchdocker/default.nix
new file mode 100644
index 000000000000..4f30f01e4032
--- /dev/null
+++ b/nixpkgs/nixos/tests/hocker-fetchdocker/default.nix
@@ -0,0 +1,15 @@
+import ../make-test.nix ({ pkgs, ...} : {
+  name = "test-hocker-fetchdocker";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ixmatus ];
+  };
+
+  machine = import ./machine.nix;
+
+  testScript = ''
+    startAll;
+
+    $machine->waitForUnit("sockets.target");
+    $machine->waitUntilSucceeds("docker run registry-1.docker.io/v2/library/hello-world:latest");
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hocker-fetchdocker/hello-world-container.nix b/nixpkgs/nixos/tests/hocker-fetchdocker/hello-world-container.nix
new file mode 100644
index 000000000000..a127875264e9
--- /dev/null
+++ b/nixpkgs/nixos/tests/hocker-fetchdocker/hello-world-container.nix
@@ -0,0 +1,19 @@
+{ fetchDockerConfig, fetchDockerLayer, fetchdocker }:
+fetchdocker rec {
+    name = "hello-world";
+    registry = "https://registry-1.docker.io/v2/";
+    repository = "library";
+    imageName = "hello-world";
+    tag = "latest";
+    imageConfig = fetchDockerConfig {
+      inherit tag registry repository imageName;
+      sha256 = "1ivbd23hyindkahzfw4kahgzi6ibzz2ablmgsz6340vc6qr1gagj";
+    };
+    imageLayers = let
+      layer0 = fetchDockerLayer {
+        inherit registry repository imageName;
+        layerDigest = "ca4f61b1923c10e9eb81228bd46bee1dfba02b9c7dac1844527a734752688ede";
+        sha256 = "1plfd194fwvsa921ib3xkhms1yqxxrmx92r2h7myj41wjaqn2kya";
+      };
+      in [ layer0 ];
+  }
diff --git a/nixpkgs/nixos/tests/hocker-fetchdocker/machine.nix b/nixpkgs/nixos/tests/hocker-fetchdocker/machine.nix
new file mode 100644
index 000000000000..885adebe1498
--- /dev/null
+++ b/nixpkgs/nixos/tests/hocker-fetchdocker/machine.nix
@@ -0,0 +1,26 @@
+{ pkgs, ... }:
+{ nixpkgs.config.packageOverrides = pkgs': {
+    hello-world-container = pkgs'.callPackage ./hello-world-container.nix { };
+  };
+
+  virtualisation.docker = {
+    enable  = true;
+    package = pkgs.docker;
+  };
+
+  systemd.services.docker-load-fetchdocker-image = {
+    description = "Docker load hello-world-container";
+    wantedBy    = [ "multi-user.target" ];
+    wants       = [ "docker.service" ];
+    after       = [ "docker.service" ];
+
+    script = ''
+      ${pkgs.hello-world-container}/compositeImage.sh | ${pkgs.docker}/bin/docker load
+    '';
+
+    serviceConfig = {
+      Type = "oneshot";
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/tests/home-assistant.nix b/nixpkgs/nixos/tests/home-assistant.nix
new file mode 100644
index 000000000000..80dca43f1f3d
--- /dev/null
+++ b/nixpkgs/nixos/tests/home-assistant.nix
@@ -0,0 +1,111 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  configDir = "/var/lib/foobar";
+  apiPassword = "some_secret";
+  mqttPassword = "another_secret";
+  hassCli = "hass-cli --server http://hass:8123 --password '${apiPassword}'";
+in {
+  name = "home-assistant";
+  meta = with pkgs.stdenv.lib; {
+    maintainers = with maintainers; [ dotlambda ];
+  };
+
+  nodes = {
+    hass =
+      { pkgs, ... }:
+      {
+        environment.systemPackages = with pkgs; [
+          mosquitto home-assistant-cli
+        ];
+        services.home-assistant = {
+          inherit configDir;
+          enable = true;
+          package = pkgs.home-assistant.override {
+            extraPackages = ps: with ps; [ hbmqtt ];
+          };
+          config = {
+            homeassistant = {
+              name = "Home";
+              time_zone = "UTC";
+              latitude = "0.0";
+              longitude = "0.0";
+              elevation = 0;
+              auth_providers = [
+                {
+                  type = "legacy_api_password";
+                  api_password = apiPassword;
+                }
+              ];
+            };
+            frontend = { };
+            mqtt = { # Use hbmqtt as broker
+              password = mqttPassword;
+            };
+            binary_sensor = [
+              {
+                platform = "mqtt";
+                state_topic = "home-assistant/test";
+                payload_on = "let_there_be_light";
+                payload_off = "off";
+              }
+            ];
+          };
+          lovelaceConfig = {
+            title = "My Awesome Home";
+            views = [ {
+              title = "Example";
+              cards = [ {
+                type = "markdown";
+                title = "Lovelace";
+                content = "Welcome to your **Lovelace UI**.";
+              } ];
+            } ];
+          };
+          lovelaceConfigWritable = true;
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    hass.wait_for_unit("home-assistant.service")
+    with subtest("Check that YAML configuration file is in place"):
+        hass.succeed("test -L ${configDir}/configuration.yaml")
+    with subtest("lovelace config is copied because lovelaceConfigWritable = true"):
+        hass.succeed("test -f ${configDir}/ui-lovelace.yaml")
+    with subtest("Check that Home Assistant's web interface and API can be reached"):
+        hass.wait_for_open_port(8123)
+        hass.succeed("curl --fail http://localhost:8123/states")
+        assert "API running" in hass.succeed(
+            "curl --fail -H 'x-ha-access: ${apiPassword}' http://localhost:8123/api/"
+        )
+    with subtest("Toggle a binary sensor using MQTT"):
+        assert '"state": "off"' in hass.succeed(
+            "curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}'"
+        )
+        hass.wait_until_succeeds(
+            "mosquitto_pub -V mqttv311 -t home-assistant/test -u homeassistant -P '${mqttPassword}' -m let_there_be_light"
+        )
+        assert '"state": "on"' in hass.succeed(
+            "curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}'"
+        )
+    with subtest("Toggle a binary sensor using hass-cli"):
+        assert '"state": "on"' in hass.succeed(
+            "${hassCli} --output json state get binary_sensor.mqtt_binary_sensor"
+        )
+        hass.succeed(
+            "${hassCli} state edit binary_sensor.mqtt_binary_sensor --json='{\"state\": \"off\"}'"
+        )
+        assert '"state": "off"' in hass.succeed(
+            "curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}'"
+        )
+    with subtest("Print log to ease debugging"):
+        output_log = hass.succeed("cat ${configDir}/home-assistant.log")
+        print("\n### home-assistant.log ###\n")
+        print(output_log + "\n")
+
+    with subtest("Check that no errors were logged"):
+        assert "ERROR" not in output_log
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hound.nix b/nixpkgs/nixos/tests/hound.nix
new file mode 100644
index 000000000000..27c65abdf27c
--- /dev/null
+++ b/nixpkgs/nixos/tests/hound.nix
@@ -0,0 +1,59 @@
+# Test whether `houndd` indexes nixpkgs
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "hound";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ grahamc ];
+  };
+  machine = { pkgs, ... }: {
+    services.hound = {
+      enable = true;
+      config = ''
+        {
+          "max-concurrent-indexers": 1,
+          "dbpath": "/var/lib/hound/data",
+          "repos": {
+            "nix": {
+              "url": "file:///var/lib/hound/my-git"
+            }
+          }
+        }
+      '';
+    };
+
+    systemd.services.houndseed = {
+      description = "seed hound with a git repo";
+      requiredBy = [ "hound.service" ];
+      before = [ "hound.service" ];
+
+      serviceConfig = {
+        User = "hound";
+        Group = "hound";
+        WorkingDirectory = "/var/lib/hound";
+      };
+      path = [ pkgs.git ];
+      script = ''
+        git config --global user.email "you@example.com"
+        git config --global user.name "Your Name"
+        git init my-git --bare
+        git init my-git-clone
+        cd my-git-clone
+        echo 'hi nix!' > hello
+        git add hello
+        git commit -m "hello there :)"
+        git remote add origin /var/lib/hound/my-git
+        git push origin master
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("network.target")
+    machine.wait_for_unit("hound.service")
+    machine.wait_for_open_port(6080)
+    machine.wait_until_succeeds(
+        "curl http://127.0.0.1:6080/api/v1/search\?stats\=fosho\&repos\=\*\&rng=%3A20\&q\=hi\&files\=\&i=nope | grep 'Filename' | grep 'hello'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hydra/common.nix b/nixpkgs/nixos/tests/hydra/common.nix
new file mode 100644
index 000000000000..f612717dc968
--- /dev/null
+++ b/nixpkgs/nixos/tests/hydra/common.nix
@@ -0,0 +1,47 @@
+{ system, ... }:
+{
+  baseConfig = { pkgs, ... }: let
+    trivialJob = pkgs.writeTextDir "trivial.nix" ''
+     { trivial = builtins.derivation {
+         name = "trivial";
+         system = "${system}";
+         builder = "/bin/sh";
+         allowSubstitutes = false;
+         preferLocalBuild = true;
+         args = ["-c" "echo success > $out; exit 0"];
+       };
+     }
+    '';
+
+    createTrivialProject = pkgs.stdenv.mkDerivation {
+      name = "create-trivial-project";
+      dontUnpack = true;
+      buildInputs = [ pkgs.makeWrapper ];
+      installPhase = "install -m755 -D ${./create-trivial-project.sh} $out/bin/create-trivial-project.sh";
+      postFixup = ''
+        wrapProgram "$out/bin/create-trivial-project.sh" --prefix PATH ":" ${pkgs.stdenv.lib.makeBinPath [ pkgs.curl ]} --set EXPR_PATH ${trivialJob}
+      '';
+    };
+  in {
+    virtualisation.memorySize = 2048;
+    time.timeZone = "UTC";
+    environment.systemPackages = [ createTrivialProject pkgs.jq ];
+    services.hydra = {
+      enable = true;
+      # Hydra needs those settings to start up, so we add something not harmfull.
+      hydraURL = "example.com";
+      notificationSender = "example@example.com";
+      extraConfig = ''
+        email_notification = 1
+      '';
+    };
+    services.postfix.enable = true;
+    nix = {
+      buildMachines = [{
+        hostName = "localhost";
+        systems = [ system ];
+      }];
+      binaryCaches = [];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/hydra/create-trivial-project.sh b/nixpkgs/nixos/tests/hydra/create-trivial-project.sh
new file mode 100755
index 000000000000..5aae2d5bf90d
--- /dev/null
+++ b/nixpkgs/nixos/tests/hydra/create-trivial-project.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+#
+# This script creates a project, a jobset with an input of type local
+# path. This local path is a directory that contains a Nix expression
+# to define a job.
+# The EXPR-PATH environment variable must be set with the local path.
+
+set -e
+
+URL=http://localhost:3000
+USERNAME="admin"
+PASSWORD="admin"
+PROJECT_NAME="trivial"
+JOBSET_NAME="trivial"
+EXPR_PATH=${EXPR_PATH:-}
+
+if [ -z $EXPR_PATH ]; then
+   echo "Environment variable EXPR_PATH must be set"
+   exit 1
+fi
+
+mycurl() {
+  curl --referer $URL -H "Accept: application/json" -H "Content-Type: application/json" $@
+}
+
+cat >data.json <<EOF
+{ "username": "$USERNAME", "password": "$PASSWORD" }
+EOF
+mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
+
+cat >data.json <<EOF
+{
+  "displayname":"Trivial",
+  "enabled":"1",
+  "visible":"1"
+}
+EOF
+mycurl --silent -X PUT $URL/project/$PROJECT_NAME -d @data.json -b hydra-cookie.txt
+
+cat >data.json <<EOF
+{
+  "description": "Trivial",
+  "checkinterval": "60",
+  "enabled": "1",
+  "visible": "1",
+  "keepnr": "1",
+  "enableemail": true,
+  "emailoverride": "hydra@localhost",
+  "nixexprinput": "trivial",
+  "nixexprpath": "trivial.nix",
+  "inputs": {
+    "trivial": {
+      "value": "$EXPR_PATH",
+      "type": "path"
+    }
+  }
+}
+EOF
+mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME -d @data.json -b hydra-cookie.txt
diff --git a/nixpkgs/nixos/tests/hydra/db-migration.nix b/nixpkgs/nixos/tests/hydra/db-migration.nix
new file mode 100644
index 000000000000..cf74acfd67aa
--- /dev/null
+++ b/nixpkgs/nixos/tests/hydra/db-migration.nix
@@ -0,0 +1,92 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+, ...
+}:
+
+let inherit (import ./common.nix { inherit system; }) baseConfig; in
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+{ mig = makeTest {
+    name = "hydra-db-migration";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ ma27 ];
+    };
+
+    nodes = {
+      original = { pkgs, lib, ... }: {
+        imports = [ baseConfig ];
+
+        # An older version of Hydra before the db change
+        # for testing purposes.
+        services.hydra.package = pkgs.hydra-migration.overrideAttrs (old: {
+          inherit (old) pname;
+          version = "2020-02-06";
+          src = pkgs.fetchFromGitHub {
+            owner = "NixOS";
+            repo = "hydra";
+            rev = "2b4f14963b16b21ebfcd6b6bfa7832842e9b2afc";
+            sha256 = "16q0cffcsfx5pqd91n9k19850c1nbh4vvbd9h8yi64ihn7v8bick";
+          };
+        });
+      };
+
+      migration_phase1 = { pkgs, lib, ... }: {
+        imports = [ baseConfig ];
+        services.hydra.package = pkgs.hydra-migration;
+      };
+
+      finished = { pkgs, lib, ... }: {
+        imports = [ baseConfig ];
+        services.hydra.package = pkgs.hydra-unstable;
+      };
+    };
+
+    testScript = { nodes, ... }: let
+      next = nodes.migration_phase1.config.system.build.toplevel;
+      finished = nodes.finished.config.system.build.toplevel;
+    in ''
+      original.start()
+      original.wait_for_unit("multi-user.target")
+      original.wait_for_unit("postgresql.service")
+      original.wait_for_unit("hydra-init.service")
+      original.require_unit_state("hydra-queue-runner.service")
+      original.require_unit_state("hydra-evaluator.service")
+      original.require_unit_state("hydra-notify.service")
+      original.succeed("hydra-create-user admin --role admin --password admin")
+      original.wait_for_open_port(3000)
+      original.succeed("create-trivial-project.sh")
+      original.wait_until_succeeds(
+          'curl -L -s http://localhost:3000/build/1 -H "Accept: application/json" |  jq .buildstatus | xargs test 0 -eq'
+      )
+
+      out = original.succeed("su -l postgres -c 'psql -d hydra <<< \"\\d+ jobs\" -A'")
+      assert "jobset_id" not in out
+
+      original.succeed(
+          "${next}/bin/switch-to-configuration test >&2"
+      )
+      original.wait_for_unit("hydra-init.service")
+
+      out = original.succeed("su -l postgres -c 'psql -d hydra <<< \"\\d+ jobs\" -A'")
+      assert "jobset_id|integer|||" in out
+
+      original.succeed("hydra-backfill-ids")
+
+      original.succeed(
+          "${finished}/bin/switch-to-configuration test >&2"
+      )
+      original.wait_for_unit("hydra-init.service")
+
+      out = original.succeed("su -l postgres -c 'psql -d hydra <<< \"\\d+ jobs\" -A'")
+      assert "jobset_id|integer||not null|" in out
+
+      original.wait_until_succeeds(
+          'curl -L -s http://localhost:3000/build/1 -H "Accept: application/json" |  jq .buildstatus | xargs test 0 -eq'
+      )
+
+      original.shutdown()
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/hydra/default.nix b/nixpkgs/nixos/tests/hydra/default.nix
new file mode 100644
index 000000000000..2336e4033d6d
--- /dev/null
+++ b/nixpkgs/nixos/tests/hydra/default.nix
@@ -0,0 +1,59 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+
+  inherit (import ./common.nix { inherit system; }) baseConfig;
+
+  hydraPkgs = {
+    inherit (pkgs) hydra-migration hydra-unstable;
+  };
+
+  makeHydraTest = with pkgs.lib; name: package: makeTest {
+    name = "hydra-${name}";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ pstn lewo ma27 ];
+    };
+
+    machine = { pkgs, lib, ... }: {
+      imports = [ baseConfig ];
+      services.hydra = { inherit package; };
+    };
+
+    testScript = ''
+      # let the system boot up
+      machine.wait_for_unit("multi-user.target")
+      # test whether the database is running
+      machine.wait_for_unit("postgresql.service")
+      # test whether the actual hydra daemons are running
+      machine.wait_for_unit("hydra-init.service")
+      machine.require_unit_state("hydra-queue-runner.service")
+      machine.require_unit_state("hydra-evaluator.service")
+      machine.require_unit_state("hydra-notify.service")
+
+      machine.succeed("hydra-create-user admin --role admin --password admin")
+
+      # create a project with a trivial job
+      machine.wait_for_open_port(3000)
+
+      # make sure the build as been successfully built
+      machine.succeed("create-trivial-project.sh")
+
+      machine.wait_until_succeeds(
+          'curl -L -s http://localhost:3000/build/1 -H "Accept: application/json" |  jq .buildstatus | xargs test 0 -eq'
+      )
+
+      machine.wait_until_succeeds(
+          'journalctl -eu hydra-notify.service -o cat | grep -q "sending mail notification to hydra@localhost"'
+      )
+    '';
+  };
+
+in
+
+mapAttrs makeHydraTest hydraPkgs
diff --git a/nixpkgs/nixos/tests/i3wm.nix b/nixpkgs/nixos/tests/i3wm.nix
new file mode 100644
index 000000000000..b527aa706ad2
--- /dev/null
+++ b/nixpkgs/nixos/tests/i3wm.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "i3wm";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ aszlig ];
+  };
+
+  machine = { lib, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = lib.mkForce "none+i3";
+    services.xserver.windowManager.i3.enable = true;
+  };
+
+  testScript = { ... }: ''
+    with subtest("ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("/home/alice/.Xauthority")
+        machine.succeed("xauth merge ~alice/.Xauthority")
+
+    with subtest("ensure we get first configuration window"):
+        machine.wait_for_window(r".*?first configuration.*?")
+        machine.sleep(2)
+        machine.screenshot("started")
+
+    with subtest("ensure we generate and save a config"):
+        # press return to indicate we want to gen a new config
+        machine.send_key("\n")
+        machine.sleep(2)
+        machine.screenshot("preconfig")
+        # press alt then return to indicate we want to use alt as our Mod key
+        machine.send_key("alt")
+        machine.send_key("\n")
+        machine.sleep(2)
+        # make sure the config file is created before we continue
+        machine.wait_for_file("/home/alice/.config/i3/config")
+        machine.screenshot("postconfig")
+        machine.sleep(2)
+
+    with subtest("ensure we can open a new terminal"):
+        machine.send_key("alt-ret")
+        machine.sleep(2)
+        machine.wait_for_window(r"alice.*?machine")
+        machine.sleep(2)
+        machine.screenshot("terminal")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/icingaweb2.nix b/nixpkgs/nixos/tests/icingaweb2.nix
new file mode 100644
index 000000000000..2f65604539c1
--- /dev/null
+++ b/nixpkgs/nixos/tests/icingaweb2.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "icingaweb2";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ das_j ];
+  };
+
+  nodes = {
+    icingaweb2 = { config, pkgs, ... }: {
+      services.icingaweb2 = {
+        enable = true;
+
+        modulePackages = with pkgs.icingaweb2Modules; {
+          particles = theme-particles;
+          spring = theme-spring;
+        };
+
+        modules = {
+          doc.enable = true;
+          migrate.enable =  true;
+          setup.enable = true;
+          test.enable = true;
+          translation.enable = true;
+        };
+
+        generalConfig = {
+          global = {
+            module_path = "${pkgs.icingaweb2}/modules";
+          };
+        };
+
+        authentications = {
+          icingaweb = {
+            backend = "external";
+          };
+        };
+
+        groupBackends = {
+          icingaweb = {
+            backend = "db";
+            resource = "icingaweb_db";
+          };
+        };
+
+        resources = {
+          # Not used, so no DB server needed
+          icingaweb_db = {
+            type = "db";
+            db = "mysql";
+            host = "localhost";
+            username = "icingaweb2";
+            password = "icingaweb2";
+            dbname = "icingaweb2";
+          };
+        };
+
+        roles = {
+          Administrators = {
+            users = "*";
+            permissions = "*";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    icingaweb2.wait_for_unit("multi-user.target")
+    icingaweb2.succeed("curl -sSf http://icingaweb2/authentication/login")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/iftop.nix b/nixpkgs/nixos/tests/iftop.nix
new file mode 100644
index 000000000000..8a161027c2ad
--- /dev/null
+++ b/nixpkgs/nixos/tests/iftop.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+with lib;
+
+{
+  name = "iftop";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ ma27 ];
+
+  nodes = {
+    withIftop = {
+      imports = [ ./common/user-account.nix ];
+      programs.iftop.enable = true;
+    };
+    withoutIftop = {
+      imports = [ ./common/user-account.nix ];
+      environment.systemPackages = [ pkgs.iftop ];
+    };
+  };
+
+  testScript = ''
+    with subtest("machine with iftop enabled"):
+        withIftop.wait_for_unit("default.target")
+        # limit to eth1 (eth0 is the test driver's control interface)
+        # and don't try name lookups
+        withIftop.succeed("su -l alice -c 'iftop -t -s 1 -n -i eth1'")
+
+    with subtest("machine without iftop"):
+        withoutIftop.wait_for_unit("default.target")
+        # check that iftop is there but user alice lacks capabilitie
+        withoutIftop.succeed("iftop -t -s 1 -n -i eth1")
+        withoutIftop.fail("su -l alice -c 'iftop -t -s 1 -n -i eth1'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ihatemoney.nix b/nixpkgs/nixos/tests/ihatemoney.nix
new file mode 100644
index 000000000000..7df0ea0b691f
--- /dev/null
+++ b/nixpkgs/nixos/tests/ihatemoney.nix
@@ -0,0 +1,55 @@
+let
+  f = backend: import ./make-test-python.nix ({ pkgs, ... }: {
+    name = "ihatemoney-${backend}";
+    machine = { lib, ... }: {
+      services.ihatemoney = {
+        enable = true;
+        enablePublicProjectCreation = true;
+        inherit backend;
+        uwsgiConfig = {
+          http = ":8000";
+        };
+      };
+      boot.cleanTmpDir = true;
+      # ihatemoney needs a local smtp server otherwise project creation just crashes
+      services.opensmtpd = {
+        enable = true;
+        serverConfiguration = ''
+          listen on lo
+          action foo relay
+          match from any for any action foo
+        '';
+      };
+    };
+    testScript = ''
+      machine.wait_for_open_port(8000)
+      machine.wait_for_unit("uwsgi.service")
+
+      assert '"yay"' in machine.succeed(
+          "curl -X POST http://localhost:8000/api/projects -d 'name=yay&id=yay&password=yay&contact_email=yay\@example.com'"
+      )
+      owner, timestamp = machine.succeed(
+          "stat --printf %U:%G___%Y /var/lib/ihatemoney/secret_key"
+      ).split("___")
+      assert "ihatemoney:ihatemoney" == owner
+
+      with subtest("Restart machine and service"):
+          machine.shutdown()
+          machine.start()
+          machine.wait_for_open_port(8000)
+          machine.wait_for_unit("uwsgi.service")
+
+      with subtest("check that the database is really persistent"):
+          machine.succeed("curl --basic -u yay:yay http://localhost:8000/api/projects/yay")
+
+      with subtest("check that the secret key is really persistent"):
+          timestamp2 = machine.succeed("stat --printf %Y /var/lib/ihatemoney/secret_key")
+          assert timestamp == timestamp2
+
+      assert "ihatemoney" in machine.succeed("curl http://localhost:8000")
+    '';
+  });
+in {
+  ihatemoney-sqlite = f "sqlite";
+  ihatemoney-postgresql = f "postgresql";
+}
diff --git a/nixpkgs/nixos/tests/incron.nix b/nixpkgs/nixos/tests/incron.nix
new file mode 100644
index 000000000000..b22ee4c9a037
--- /dev/null
+++ b/nixpkgs/nixos/tests/incron.nix
@@ -0,0 +1,52 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+{
+  name = "incron";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  machine =
+    { ... }:
+    { services.incron.enable = true;
+      services.incron.extraPackages = [ pkgs.coreutils ];
+      services.incron.systab = ''
+        /test IN_CREATE,IN_MODIFY,IN_CLOSE_WRITE,IN_MOVED_FROM,IN_MOVED_TO echo "$@/$# $%" >> /root/incron.log
+      '';
+
+      # ensure the directory to be monitored exists before incron is started
+      system.activationScripts.incronTest = ''
+        mkdir /test
+      '';
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("incron.service")
+
+    machine.succeed("test -d /test")
+    # create some activity for incron to monitor
+    machine.succeed("touch /test/file")
+    machine.succeed("echo foo >> /test/file")
+    machine.succeed("mv /test/file /root")
+    machine.succeed("mv /root/file /test")
+
+    machine.sleep(1)
+
+    # touch /test/file
+    machine.succeed("grep '/test/file IN_CREATE' /root/incron.log")
+
+    # echo foo >> /test/file
+    machine.succeed("grep '/test/file IN_MODIFY' /root/incron.log")
+    machine.succeed("grep '/test/file IN_CLOSE_WRITE' /root/incron.log")
+
+    # mv /test/file /root
+    machine.succeed("grep '/test/file IN_MOVED_FROM' /root/incron.log")
+
+    # mv /root/file /test
+    machine.succeed("grep '/test/file IN_MOVED_TO' /root/incron.log")
+
+    # ensure something unexpected is not present
+    machine.fail("grep 'IN_OPEN' /root/incron.log")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/influxdb.nix b/nixpkgs/nixos/tests/influxdb.nix
new file mode 100644
index 000000000000..04ef80461010
--- /dev/null
+++ b/nixpkgs/nixos/tests/influxdb.nix
@@ -0,0 +1,40 @@
+# This test runs influxdb and checks if influxdb is up and running
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "influxdb";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = {
+    one = { ... }: {
+      services.influxdb.enable = true;
+      environment.systemPackages = [ pkgs.httpie ];
+    };
+  };
+
+  testScript = ''
+    import shlex
+
+    start_all()
+
+    one.wait_for_unit("influxdb.service")
+
+    # create database
+    one.succeed(
+        "curl -XPOST http://localhost:8086/query --data-urlencode 'q=CREATE DATABASE test'"
+    )
+
+    # write some points and run simple query
+    out = one.succeed(
+        "curl -XPOST 'http://localhost:8086/write?db=test' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'"
+    )
+
+    qv = "SELECT value FROM cpu_load_short WHERE region='us-west'"
+    cmd = f'curl -GET "http://localhost:8086/query?db=test" --data-urlencode {shlex.quote("q="+ qv)}'
+    out = one.succeed(cmd)
+
+    assert "2015-06-11T20:46:02Z" in out
+    assert "0.64" in out
+  '';
+})
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/default.nix b/nixpkgs/nixos/tests/initrd-network-ssh/default.nix
new file mode 100644
index 000000000000..017de6882081
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/default.nix
@@ -0,0 +1,75 @@
+import ../make-test-python.nix ({ lib, ... }:
+
+{
+  name = "initrd-network-ssh";
+  meta = with lib.maintainers; {
+    maintainers = [ willibutz emily ];
+  };
+
+  nodes = with lib; {
+    server =
+      { config, ... }:
+      {
+        boot.kernelParams = [
+          "ip=${config.networking.primaryIPAddress}:::255.255.255.0::eth1:none"
+        ];
+        boot.initrd.network = {
+          enable = true;
+          ssh = {
+            enable = true;
+            authorizedKeys = [ (readFile ./id_ed25519.pub) ];
+            port = 22;
+            hostKeys = [ ./ssh_host_ed25519_key ];
+          };
+        };
+        boot.initrd.preLVMCommands = ''
+          while true; do
+            if [ -f fnord ]; then
+              poweroff
+            fi
+            sleep 1
+          done
+        '';
+      };
+
+    client =
+      { config, ... }:
+      {
+        environment.etc = {
+          knownHosts = {
+            text = concatStrings [
+              "server,"
+              "${toString (head (splitString " " (
+                toString (elemAt (splitString "\n" config.networking.extraHosts) 2)
+              )))} "
+              "${readFile ./ssh_host_ed25519_key.pub}"
+            ];
+          };
+          sshKey = {
+            source = ./id_ed25519;
+            mode = "0600";
+          };
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    client.wait_for_unit("network.target")
+
+
+    def ssh_is_up(_) -> bool:
+        status, _ = client.execute("nc -z server 22")
+        return status == 0
+
+
+    with client.nested("waiting for SSH server to come up"):
+        retry(ssh_is_up)
+
+
+    client.succeed(
+        "ssh -i /etc/sshKey -o UserKnownHostsFile=/etc/knownHosts server 'touch /fnord'"
+    )
+    client.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/generate-keys.nix b/nixpkgs/nixos/tests/initrd-network-ssh/generate-keys.nix
new file mode 100644
index 000000000000..3d7978890ab0
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/generate-keys.nix
@@ -0,0 +1,10 @@
+with import ../../.. {};
+
+runCommand "gen-keys" {
+    buildInputs = [ openssh ];
+  }
+  ''
+    mkdir $out
+    ssh-keygen -q -t ed25519 -N "" -f $out/ssh_host_ed25519_key
+    ssh-keygen -q -t ed25519 -N "" -f $out/id_ed25519
+  ''
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519 b/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519
new file mode 100644
index 000000000000..f914b3f712fc
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACAVcX+32Yqig25RxRA8bel/f604wV0p/63um+Oku/3vfwAAAJi/AJZMvwCW
+TAAAAAtzc2gtZWQyNTUxOQAAACAVcX+32Yqig25RxRA8bel/f604wV0p/63um+Oku/3vfw
+AAAEAPLjQusjrB90Lk3996G3AbtTeK+XweNgxaegYnml/A/RVxf7fZiqKDblHFEDxt6X9/
+rTjBXSn/re6b46S7/e9/AAAAEG5peGJsZEBsb2NhbGhvc3QBAgMEBQ==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519.pub b/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519.pub
new file mode 100644
index 000000000000..40de4a8ac602
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBVxf7fZiqKDblHFEDxt6X9/rTjBXSn/re6b46S7/e9/ nixbld@localhost
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key b/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key
new file mode 100644
index 000000000000..f1e29459b7a3
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACDP9Mz6qlxdQqA4omrgbOlVsxSGONCJstjW9zqquajlIAAAAJg0WGFGNFhh
+RgAAAAtzc2gtZWQyNTUxOQAAACDP9Mz6qlxdQqA4omrgbOlVsxSGONCJstjW9zqquajlIA
+AAAEA0Hjs7LfFPdTf3ThGx6GNKvX0ItgzgXs91Z3oGIaF6S8/0zPqqXF1CoDiiauBs6VWz
+FIY40Imy2Nb3Oqq5qOUgAAAAEG5peGJsZEBsb2NhbGhvc3QBAgMEBQ==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key.pub b/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key.pub
new file mode 100644
index 000000000000..3aa1587e1dce
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM/0zPqqXF1CoDiiauBs6VWzFIY40Imy2Nb3Oqq5qOUg nixbld@localhost
diff --git a/nixpkgs/nixos/tests/initrd-network.nix b/nixpkgs/nixos/tests/initrd-network.nix
new file mode 100644
index 000000000000..9c35b7305768
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "initrd-network";
+
+  meta.maintainers = [ pkgs.stdenv.lib.maintainers.eelco ];
+
+  machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+    boot.initrd.network.enable = true;
+    boot.initrd.network.postCommands =
+      ''
+        ip addr show
+        ip route show
+        ip addr | grep 10.0.2.15 || exit 1
+        ping -c1 10.0.2.2 || exit 1
+      '';
+    # Check if cleanup was done correctly
+    boot.initrd.postMountCommands = lib.mkAfter
+      ''
+        ip addr show
+        ip route show
+        ip addr | grep 10.0.2.15 && exit 1
+        ping -c1 10.0.2.2 && exit 1
+      '';
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("ip addr show >&2")
+      machine.succeed("ip route show >&2")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/installed-tests/colord.nix b/nixpkgs/nixos/tests/installed-tests/colord.nix
new file mode 100644
index 000000000000..77e6b917fe68
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/colord.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.colord;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/default.nix b/nixpkgs/nixos/tests/installed-tests/default.nix
new file mode 100644
index 000000000000..b6bdfea22770
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/default.nix
@@ -0,0 +1,99 @@
+# NixOS tests for gnome-desktop-testing-runner using software
+# See https://wiki.gnome.org/Initiatives/GnomeGoals/InstalledTests
+
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; }
+}:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+
+  callInstalledTest = pkgs.newScope { inherit makeInstalledTest; };
+
+  makeInstalledTest =
+    { # Package to test. Needs to have an installedTests output
+      tested
+
+      # Config to inject into machine
+    , testConfig ? {}
+
+      # Test script snippet to inject before gnome-desktop-testing-runner begins.
+      # This is useful for extra setup the environment may need before the runner begins.
+    , preTestScript ? ""
+
+      # Does test need X11?
+    , withX11 ? false
+
+      # Extra flags to pass to gnome-desktop-testing-runner.
+    , testRunnerFlags ? ""
+
+      # Extra attributes to pass to makeTest.
+      # They will be recursively merged into the attrset created by this function.
+    , ...
+    }@args:
+    makeTest
+      (recursiveUpdate
+        rec {
+          name = tested.name;
+
+          meta = {
+            maintainers = tested.meta.maintainers;
+          };
+
+          machine = { ... }: {
+            imports = [
+              testConfig
+            ] ++ optional withX11 ../common/x11.nix;
+
+            environment.systemPackages = with pkgs; [ gnome-desktop-testing ];
+
+          };
+
+          testScript =
+            optionalString withX11 ''
+              machine.wait_for_x()
+            '' +
+            optionalString (preTestScript != "") ''
+              ${preTestScript}
+            '' +
+            ''
+              machine.succeed(
+                  "gnome-desktop-testing-runner ${testRunnerFlags} -d '${tested.installedTests}/share'"
+              )
+            '';
+        }
+
+        (removeAttrs args [
+          "tested"
+          "testConfig"
+          "preTestScript"
+          "withX11"
+          "testRunnerFlags"
+        ])
+      );
+
+in
+
+{
+  colord = callInstalledTest ./colord.nix {};
+  flatpak = callInstalledTest ./flatpak.nix {};
+  flatpak-builder = callInstalledTest ./flatpak-builder.nix {};
+  fwupd = callInstalledTest ./fwupd.nix {};
+  gcab = callInstalledTest ./gcab.nix {};
+  gdk-pixbuf = callInstalledTest ./gdk-pixbuf.nix {};
+  gjs = callInstalledTest ./gjs.nix {};
+  glib-networking = callInstalledTest ./glib-networking.nix {};
+  gnome-photos = callInstalledTest ./gnome-photos.nix {};
+  graphene = callInstalledTest ./graphene.nix {};
+  ibus = callInstalledTest ./ibus.nix {};
+  libgdata = callInstalledTest ./libgdata.nix {};
+  glib-testing = callInstalledTest ./glib-testing.nix {};
+  libjcat = callInstalledTest ./libjcat.nix {};
+  libxmlb = callInstalledTest ./libxmlb.nix {};
+  malcontent = callInstalledTest ./malcontent.nix {};
+  ostree = callInstalledTest ./ostree.nix {};
+  xdg-desktop-portal = callInstalledTest ./xdg-desktop-portal.nix {};
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/flatpak-builder.nix b/nixpkgs/nixos/tests/installed-tests/flatpak-builder.nix
new file mode 100644
index 000000000000..31b9f2b258fd
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/flatpak-builder.nix
@@ -0,0 +1,14 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.flatpak-builder;
+
+  testConfig = {
+    services.flatpak.enable = true;
+    xdg.portal.enable = true;
+    environment.systemPackages = with pkgs; [ flatpak-builder ] ++ flatpak-builder.installedTestsDependencies;
+    virtualisation.diskSize = 2048;
+  };
+
+  testRunnerFlags = "--timeout 3600";
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/flatpak.nix b/nixpkgs/nixos/tests/installed-tests/flatpak.nix
new file mode 100644
index 000000000000..091c99326629
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/flatpak.nix
@@ -0,0 +1,19 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.flatpak;
+  withX11 = true;
+
+  testConfig = {
+    services.xserver.desktopManager.gnome3.enable = true; # TODO: figure out minimal environment where the tests work
+    # common/x11.nix enables the auto display manager (lightdm)
+    services.xserver.displayManager.gdm.enable = false;
+    services.gnome3.core-utilities.enable = false;
+    services.flatpak.enable = true;
+    environment.systemPackages = with pkgs; [ gnupg ostree python2 ];
+    virtualisation.memorySize = 2047;
+    virtualisation.diskSize = 1024;
+  };
+
+  testRunnerFlags = "--timeout 3600";
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/fwupd.nix b/nixpkgs/nixos/tests/installed-tests/fwupd.nix
new file mode 100644
index 000000000000..6a0ceb57dda4
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/fwupd.nix
@@ -0,0 +1,12 @@
+{ pkgs, lib, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.fwupd;
+
+  testConfig = {
+    services.fwupd.enable = true;
+    services.fwupd.blacklistPlugins = lib.mkForce []; # don't blacklist test plugin
+    services.fwupd.enableTestRemote = true;
+    virtualisation.memorySize = 768;
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/gcab.nix b/nixpkgs/nixos/tests/installed-tests/gcab.nix
new file mode 100644
index 000000000000..b24cc2e01267
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/gcab.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.gcab;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/gdk-pixbuf.nix b/nixpkgs/nixos/tests/installed-tests/gdk-pixbuf.nix
new file mode 100644
index 000000000000..3d0011a427a4
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/gdk-pixbuf.nix
@@ -0,0 +1,13 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.gdk-pixbuf;
+
+  testConfig = {
+    # Tests allocate a lot of memory trying to exploit a CVE
+    # but qemu-system-i386 has a 2047M memory limit
+    virtualisation.memorySize = if pkgs.stdenv.isi686 then 2047 else 4096;
+  };
+
+  testRunnerFlags = "--timeout 1800";
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/gjs.nix b/nixpkgs/nixos/tests/installed-tests/gjs.nix
new file mode 100644
index 000000000000..1656e9de171b
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/gjs.nix
@@ -0,0 +1,6 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.gjs;
+  withX11 = true;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/glib-networking.nix b/nixpkgs/nixos/tests/installed-tests/glib-networking.nix
new file mode 100644
index 000000000000..b58d4df21fca
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/glib-networking.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.glib-networking;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/glib-testing.nix b/nixpkgs/nixos/tests/installed-tests/glib-testing.nix
new file mode 100644
index 000000000000..7a06cf792bdd
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/glib-testing.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.glib-testing;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/gnome-photos.nix b/nixpkgs/nixos/tests/installed-tests/gnome-photos.nix
new file mode 100644
index 000000000000..05e7ccb65ad5
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/gnome-photos.nix
@@ -0,0 +1,35 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.gnome-photos;
+
+  withX11 = true;
+
+  testConfig = {
+    programs.dconf.enable = true;
+    services.gnome3.at-spi2-core.enable = true; # needed for dogtail
+    environment.systemPackages = with pkgs; [
+      # gsettings tool with access to gsettings-desktop-schemas
+      (stdenv.mkDerivation {
+        name = "desktop-gsettings";
+        dontUnpack = true;
+        nativeBuildInputs = [ glib wrapGAppsHook ];
+        buildInputs = [ gsettings-desktop-schemas ];
+        installPhase = ''
+          runHook preInstall
+          mkdir -p $out/bin
+          ln -s ${glib.bin}/bin/gsettings $out/bin/desktop-gsettings
+          runHook postInstall
+        '';
+      })
+    ];
+    services.dbus.packages = with pkgs; [ gnome-photos ];
+  };
+
+  preTestScript = ''
+    # dogtail needs accessibility enabled
+    machine.succeed(
+        "desktop-gsettings set org.gnome.desktop.interface toolkit-accessibility true 2>&1"
+    )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/graphene.nix b/nixpkgs/nixos/tests/installed-tests/graphene.nix
new file mode 100644
index 000000000000..e43339abd88c
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/graphene.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.graphene;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/ibus.nix b/nixpkgs/nixos/tests/installed-tests/ibus.nix
new file mode 100644
index 000000000000..af54b612b507
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/ibus.nix
@@ -0,0 +1,20 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.ibus;
+
+  testConfig = {
+    i18n.inputMethod.enabled = "ibus";
+  };
+
+  preTestScript = ''
+    # ibus has ibus-desktop-testing-runner but it tries to manage desktop session so we just spawn ibus-daemon ourselves
+    machine.succeed("ibus-daemon --daemonize --verbose")
+  '';
+
+  withX11 = true;
+
+  # TODO: ibus-daemon is currently crashing or something
+  # maybe make ibus systemd service that auto-restarts?
+  meta.broken = true;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/libgdata.nix b/nixpkgs/nixos/tests/installed-tests/libgdata.nix
new file mode 100644
index 000000000000..f11a7bc1bc51
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/libgdata.nix
@@ -0,0 +1,11 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.libgdata;
+
+  testConfig = {
+    # # GLib-GIO-DEBUG: _g_io_module_get_default: Found default implementation dummy (GDummyTlsBackend) for ‘gio-tls-backend’
+    # Bail out! libgdata:ERROR:../gdata/tests/common.c:134:gdata_test_init: assertion failed (child_error == NULL): TLS support is not available (g-tls-error-quark, 0)
+    services.gnome3.glib-networking.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/libjcat.nix b/nixpkgs/nixos/tests/installed-tests/libjcat.nix
new file mode 100644
index 000000000000..41493a730890
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/libjcat.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.libjcat;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/libxmlb.nix b/nixpkgs/nixos/tests/installed-tests/libxmlb.nix
new file mode 100644
index 000000000000..af2bbe9c35e2
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/libxmlb.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.libxmlb;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/malcontent.nix b/nixpkgs/nixos/tests/installed-tests/malcontent.nix
new file mode 100644
index 000000000000..d4e214c41988
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/malcontent.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.malcontent;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/ostree.nix b/nixpkgs/nixos/tests/installed-tests/ostree.nix
new file mode 100644
index 000000000000..eef7cace54cc
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/ostree.nix
@@ -0,0 +1,23 @@
+{ pkgs, lib, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.ostree;
+
+  # TODO: Wrap/patch the tests directly in the package
+  testConfig = {
+    environment.systemPackages = with pkgs; [
+      (python3.withPackages (p: with p; [ pyyaml ]))
+      gnupg
+      ostree
+    ];
+
+    # for GJS tests
+    environment.variables.GI_TYPELIB_PATH = lib.makeSearchPath "lib/girepository-1.0" (with pkgs; [
+      gtk3
+      pango.out
+      ostree
+      gdk-pixbuf
+      atk
+    ]);
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/xdg-desktop-portal.nix b/nixpkgs/nixos/tests/installed-tests/xdg-desktop-portal.nix
new file mode 100644
index 000000000000..90529d37ee0f
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/xdg-desktop-portal.nix
@@ -0,0 +1,9 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.xdg-desktop-portal;
+
+  # Ton of breakage.
+  # https://github.com/flatpak/xdg-desktop-portal/pull/428
+  meta.broken = true;
+}
diff --git a/nixpkgs/nixos/tests/installer.nix b/nixpkgs/nixos/tests/installer.nix
new file mode 100644
index 000000000000..c5abd458ec9a
--- /dev/null
+++ b/nixpkgs/nixos/tests/installer.nix
@@ -0,0 +1,773 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+
+  # The configuration to install.
+  makeConfig = { bootLoader, grubVersion, grubDevice, grubIdentifier, grubUseEfi
+               , extraConfig, forceGrubReinstallCount ? 0
+               }:
+    pkgs.writeText "configuration.nix" ''
+      { config, lib, pkgs, modulesPath, ... }:
+
+      { imports =
+          [ ./hardware-configuration.nix
+            <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          ];
+
+        # To ensure that we can rebuild the grub configuration on the nixos-rebuild
+        system.extraDependencies = with pkgs; [ stdenvNoCC ];
+
+        ${optionalString (bootLoader == "grub") ''
+          boot.loader.grub.version = ${toString grubVersion};
+          ${optionalString (grubVersion == 1) ''
+            boot.loader.grub.splashImage = null;
+          ''}
+
+          boot.loader.grub.extraConfig = "serial; terminal_output.serial";
+          ${if grubUseEfi then ''
+            boot.loader.grub.device = "nodev";
+            boot.loader.grub.efiSupport = true;
+            boot.loader.grub.efiInstallAsRemovable = true; # XXX: needed for OVMF?
+          '' else ''
+            boot.loader.grub.device = "${grubDevice}";
+            boot.loader.grub.fsIdentifier = "${grubIdentifier}";
+          ''}
+
+          boot.loader.grub.configurationLimit = 100 + ${toString forceGrubReinstallCount};
+        ''}
+
+        ${optionalString (bootLoader == "systemd-boot") ''
+          boot.loader.systemd-boot.enable = true;
+        ''}
+
+        users.users.alice = {
+          isNormalUser = true;
+          home = "/home/alice";
+          description = "Alice Foobar";
+        };
+
+        hardware.enableAllFirmware = lib.mkForce false;
+
+        ${replaceChars ["\n"] ["\n  "] extraConfig}
+      }
+    '';
+
+
+  # The test script boots a NixOS VM, installs NixOS on an empty hard
+  # disk, and then reboot from the hard disk.  It's parameterized with
+  # a test script fragment `createPartitions', which must create
+  # partitions and filesystems.
+  testScriptFun = { bootLoader, createPartitions, grubVersion, grubDevice, grubUseEfi
+                  , grubIdentifier, preBootCommands, extraConfig
+                  , testSpecialisationConfig
+                  }:
+    let iface = if grubVersion == 1 then "ide" else "virtio";
+        isEfi = bootLoader == "systemd-boot" || (bootLoader == "grub" && grubUseEfi);
+        bios  = if pkgs.stdenv.isAarch64 then "QEMU_EFI.fd" else "OVMF.fd";
+    in if !isEfi && !(pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) then
+      throw "Non-EFI boot methods are only supported on i686 / x86_64"
+    else ''
+      def assemble_qemu_flags():
+          flags = "-cpu host"
+          ${if system == "x86_64-linux"
+            then ''flags += " -m 768"''
+            else ''flags += " -m 512 -enable-kvm -machine virt,gic-version=host"''
+          }
+          return flags
+
+
+      qemu_flags = {"qemuFlags": assemble_qemu_flags()}
+
+      hd_flags = {
+          "hdaInterface": "${iface}",
+          "hda": "vm-state-machine/machine.qcow2",
+      }
+      ${optionalString isEfi ''
+        hd_flags.update(
+            bios="${pkgs.OVMF.fd}/FV/${bios}"
+        )''
+      }
+      default_flags = {**hd_flags, **qemu_flags}
+
+
+      def create_machine_named(name):
+          return create_machine({**default_flags, "name": "boot-after-install"})
+
+
+      machine.start()
+
+      with subtest("Assert readiness of login prompt"):
+          machine.succeed("echo hello")
+
+      with subtest("Wait for hard disks to appear in /dev"):
+          machine.succeed("udevadm settle")
+
+      ${createPartitions}
+
+      with subtest("Create the NixOS configuration"):
+          machine.succeed("nixos-generate-config --root /mnt")
+          machine.succeed("cat /mnt/etc/nixos/hardware-configuration.nix >&2")
+          machine.copy_from_host(
+              "${ makeConfig {
+                    inherit bootLoader grubVersion grubDevice grubIdentifier
+                            grubUseEfi extraConfig;
+                  }
+              }",
+              "/mnt/etc/nixos/configuration.nix",
+          )
+
+      with subtest("Perform the installation"):
+          machine.succeed("nixos-install < /dev/null >&2")
+
+      with subtest("Do it again to make sure it's idempotent"):
+          machine.succeed("nixos-install < /dev/null >&2")
+
+      with subtest("Shutdown system after installation"):
+          machine.succeed("umount /mnt/boot || true")
+          machine.succeed("umount /mnt")
+          machine.succeed("sync")
+          machine.shutdown()
+
+      # Now see if we can boot the installation.
+      machine = create_machine_named("boot-after-install")
+
+      # For example to enter LUKS passphrase.
+      ${preBootCommands}
+
+      with subtest("Assert that /boot get mounted"):
+          machine.wait_for_unit("local-fs.target")
+          ${if bootLoader == "grub"
+              then ''machine.succeed("test -e /boot/grub")''
+              else ''machine.succeed("test -e /boot/loader/loader.conf")''
+          }
+
+      with subtest("Check whether /root has correct permissions"):
+          assert "700" in machine.succeed("stat -c '%a' /root")
+
+      with subtest("Assert swap device got activated"):
+          # uncomment once https://bugs.freedesktop.org/show_bug.cgi?id=86930 is resolved
+          machine.wait_for_unit("swap.target")
+          machine.succeed("cat /proc/swaps | grep -q /dev")
+
+      with subtest("Check that the store is in good shape"):
+          machine.succeed("nix-store --verify --check-contents >&2")
+
+      with subtest("Check whether the channel works"):
+          machine.succeed("nix-env -iA nixos.procps >&2")
+          assert ".nix-profile" in machine.succeed("type -tP ps | tee /dev/stderr")
+
+      with subtest(
+          "Check that the daemon works, and that non-root users can run builds "
+          "(this will build a new profile generation through the daemon)"
+      ):
+          machine.succeed("su alice -l -c 'nix-env -iA nixos.procps' >&2")
+
+      with subtest("Configure system with writable Nix store on next boot"):
+          # we're not using copy_from_host here because the installer image
+          # doesn't know about the host-guest sharing mechanism.
+          machine.copy_from_host_via_shell(
+              "${ makeConfig {
+                    inherit bootLoader grubVersion grubDevice grubIdentifier
+                            grubUseEfi extraConfig;
+                    forceGrubReinstallCount = 1;
+                  }
+              }",
+              "/etc/nixos/configuration.nix",
+          )
+
+      with subtest("Check whether nixos-rebuild works"):
+          machine.succeed("nixos-rebuild switch >&2")
+
+      with subtest("Test nixos-option"):
+          kernel_modules = machine.succeed("nixos-option boot.initrd.kernelModules")
+          assert "virtio_console" in kernel_modules
+          assert "List of modules" in kernel_modules
+          assert "qemu-guest.nix" in kernel_modules
+
+      machine.shutdown()
+
+      # Check whether a writable store build works
+      machine = create_machine_named("rebuild-switch")
+      ${preBootCommands}
+      machine.wait_for_unit("multi-user.target")
+
+      # we're not using copy_from_host here because the installer image
+      # doesn't know about the host-guest sharing mechanism.
+      machine.copy_from_host_via_shell(
+          "${ makeConfig {
+                inherit bootLoader grubVersion grubDevice grubIdentifier
+                grubUseEfi extraConfig;
+                forceGrubReinstallCount = 2;
+              }
+          }",
+          "/etc/nixos/configuration.nix",
+      )
+      machine.succeed("nixos-rebuild boot >&2")
+      machine.shutdown()
+
+      # And just to be sure, check that the machine still boots after
+      # "nixos-rebuild switch".
+      machine = create_machine_named("boot-after-rebuild-switch")
+      ${preBootCommands}
+      machine.wait_for_unit("network.target")
+      machine.shutdown()
+
+      # Tests for validating clone configuration entries in grub menu
+    ''
+    + optionalString testSpecialisationConfig ''
+      # Reboot Machine
+      machine = create_machine_named("clone-default-config")
+      ${preBootCommands}
+      machine.wait_for_unit("multi-user.target")
+
+      with subtest("Booted configuration name should be 'Home'"):
+          # This is not the name that shows in the grub menu.
+          # The default configuration is always shown as "Default"
+          machine.succeed("cat /run/booted-system/configuration-name >&2")
+          assert "Home" in machine.succeed("cat /run/booted-system/configuration-name")
+
+      with subtest("We should **not** find a file named /etc/gitconfig"):
+          machine.fail("test -e /etc/gitconfig")
+
+      with subtest("Set grub to boot the second configuration"):
+          machine.succeed("grub-reboot 1")
+
+      machine.shutdown()
+
+      # Reboot Machine
+      machine = create_machine_named("clone-alternate-config")
+      ${preBootCommands}
+
+      machine.wait_for_unit("multi-user.target")
+      with subtest("Booted configuration name should be Work"):
+          machine.succeed("cat /run/booted-system/configuration-name >&2")
+          assert "Work" in machine.succeed("cat /run/booted-system/configuration-name")
+
+      with subtest("We should find a file named /etc/gitconfig"):
+          machine.succeed("test -e /etc/gitconfig")
+
+      machine.shutdown()
+    '';
+
+
+  makeInstallerTest = name:
+    { createPartitions, preBootCommands ? "", extraConfig ? ""
+    , extraInstallerConfig ? {}
+    , bootLoader ? "grub" # either "grub" or "systemd-boot"
+    , grubVersion ? 2, grubDevice ? "/dev/vda", grubIdentifier ? "uuid", grubUseEfi ? false
+    , enableOCR ? false, meta ? {}
+    , testSpecialisationConfig ? false
+    }:
+    makeTest {
+      inherit enableOCR;
+      name = "installer-" + name;
+      meta = with pkgs.stdenv.lib.maintainers; {
+        # put global maintainers here, individuals go into makeInstallerTest fkt call
+        maintainers = (meta.maintainers or []);
+      };
+      nodes = {
+
+        # The configuration of the machine used to run "nixos-install".
+        machine = { pkgs, ... }: {
+          imports = [
+            ../modules/profiles/installation-device.nix
+            ../modules/profiles/base.nix
+            extraInstallerConfig
+          ];
+
+          virtualisation.diskSize = 8 * 1024;
+          virtualisation.memorySize = 1024;
+
+          # Use a small /dev/vdb as the root disk for the
+          # installer. This ensures the target disk (/dev/vda) is
+          # the same during and after installation.
+          virtualisation.emptyDiskImages = [ 512 ];
+          virtualisation.bootDevice =
+            if grubVersion == 1 then "/dev/sdb" else "/dev/vdb";
+          virtualisation.qemu.diskInterface =
+            if grubVersion == 1 then "scsi" else "virtio";
+
+          boot.loader.systemd-boot.enable = mkIf (bootLoader == "systemd-boot") true;
+
+          hardware.enableAllFirmware = mkForce false;
+
+          # The test cannot access the network, so any packages we
+          # need must be included in the VM.
+          system.extraDependencies = with pkgs; [
+            desktop-file-utils
+            docbook5
+            docbook_xsl_ns
+            libxml2.bin
+            libxslt.bin
+            nixos-artwork.wallpapers.simple-dark-gray-bottom
+            ntp
+            perlPackages.ListCompare
+            perlPackages.XMLLibXML
+            shared-mime-info
+            sudo
+            texinfo
+            unionfs-fuse
+            xorg.lndir
+
+            # add curl so that rather than seeing the test attempt to download
+            # curl's tarball, we see what it's trying to download
+            curl
+          ]
+          ++ optional (bootLoader == "grub" && grubVersion == 1) pkgs.grub
+          ++ optionals (bootLoader == "grub" && grubVersion == 2) [
+            pkgs.grub2
+            pkgs.grub2_efi
+          ];
+
+          nix.binaryCaches = mkForce [ ];
+          nix.extraOptions = ''
+            hashed-mirrors =
+            connect-timeout = 1
+          '';
+        };
+
+      };
+
+      testScript = testScriptFun {
+        inherit bootLoader createPartitions preBootCommands
+                grubVersion grubDevice grubIdentifier grubUseEfi extraConfig
+                testSpecialisationConfig;
+      };
+    };
+
+    makeLuksRootTest = name: luksFormatOpts: makeInstallerTest name {
+      createPartitions = ''
+        machine.succeed(
+            "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+            + " mkpart primary ext2 1M 50MB"  # /boot
+            + " mkpart primary linux-swap 50M 1024M"
+            + " mkpart primary 1024M -1s",  # LUKS
+            "udevadm settle",
+            "mkswap /dev/vda2 -L swap",
+            "swapon -L swap",
+            "modprobe dm_mod dm_crypt",
+            "echo -n supersecret | cryptsetup luksFormat ${luksFormatOpts} -q /dev/vda3 -",
+            "echo -n supersecret | cryptsetup luksOpen --key-file - /dev/vda3 cryptroot",
+            "mkfs.ext3 -L nixos /dev/mapper/cryptroot",
+            "mount LABEL=nixos /mnt",
+            "mkfs.ext3 -L boot /dev/vda1",
+            "mkdir -p /mnt/boot",
+            "mount LABEL=boot /mnt/boot",
+        )
+      '';
+      extraConfig = ''
+        boot.kernelParams = lib.mkAfter [ "console=tty0" ];
+      '';
+      enableOCR = true;
+      preBootCommands = ''
+        machine.start()
+        machine.wait_for_text("Passphrase for")
+        machine.send_chars("supersecret\n")
+      '';
+    };
+
+  # The (almost) simplest partitioning scheme: a swap partition and
+  # one big filesystem partition.
+  simple-test-config = {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary linux-swap 1M 1024M"
+          + " mkpart primary ext2 1024M -1s",
+          "udevadm settle",
+          "mkswap /dev/vda1 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda2",
+          "mount LABEL=nixos /mnt",
+      )
+    '';
+  };
+
+  simple-uefi-grub-config = {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel gpt"
+          + " mkpart ESP fat32 1M 50MiB"  # /boot
+          + " set 1 boot on"
+          + " mkpart primary linux-swap 50MiB 1024MiB"
+          + " mkpart primary ext2 1024MiB -1MiB",  # /
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+          "mkfs.vfat -n BOOT /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=BOOT /mnt/boot",
+      )
+    '';
+    bootLoader = "grub";
+    grubUseEfi = true;
+  };
+
+  specialisation-test-extraconfig = {
+    extraConfig = ''
+      environment.systemPackages = [ pkgs.grub2 ];
+      boot.loader.grub.configurationName = "Home";
+      specialisation.work.configuration = {
+        boot.loader.grub.configurationName = lib.mkForce "Work";
+
+        environment.etc = {
+          "gitconfig".text = "
+            [core]
+              gitproxy = none for work.com
+              ";
+        };
+      };
+    '';
+    testSpecialisationConfig = true;
+  };
+
+
+in {
+
+  # !!! `parted mkpart' seems to silently create overlapping partitions.
+
+
+  # The (almost) simplest partitioning scheme: a swap partition and
+  # one big filesystem partition.
+  simple = makeInstallerTest "simple" simple-test-config;
+
+  # Test cloned configurations with the simple grub configuration
+  simpleSpecialised = makeInstallerTest "simpleSpecialised" (simple-test-config // specialisation-test-extraconfig);
+
+  # Simple GPT/UEFI configuration using systemd-boot with 3 partitions: ESP, swap & root filesystem
+  simpleUefiSystemdBoot = makeInstallerTest "simpleUefiSystemdBoot" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel gpt"
+          + " mkpart ESP fat32 1M 50MiB"  # /boot
+          + " set 1 boot on"
+          + " mkpart primary linux-swap 50MiB 1024MiB"
+          + " mkpart primary ext2 1024MiB -1MiB",  # /
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+          "mkfs.vfat -n BOOT /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=BOOT /mnt/boot",
+      )
+    '';
+    bootLoader = "systemd-boot";
+  };
+
+  simpleUefiGrub = makeInstallerTest "simpleUefiGrub" simple-uefi-grub-config;
+
+  # Test cloned configurations with the uefi grub configuration
+  simpleUefiGrubSpecialisation = makeInstallerTest "simpleUefiGrubSpecialisation" (simple-uefi-grub-config // specialisation-test-extraconfig);
+
+  # Same as the previous, but now with a separate /boot partition.
+  separateBoot = makeInstallerTest "separateBoot" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary ext2 1M 50MB"  # /boot
+          + " mkpart primary linux-swap 50MB 1024M"
+          + " mkpart primary ext2 1024M -1s",  # /
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+      )
+    '';
+  };
+
+  # Same as the previous, but with fat32 /boot.
+  separateBootFat = makeInstallerTest "separateBootFat" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary ext2 1M 50MB"  # /boot
+          + " mkpart primary linux-swap 50MB 1024M"
+          + " mkpart primary ext2 1024M -1s",  # /
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+          "mkfs.vfat -n BOOT /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=BOOT /mnt/boot",
+      )
+    '';
+  };
+
+  # zfs on / with swap
+  zfsroot = makeInstallerTest "zfs-root" {
+    extraInstallerConfig = {
+      boot.supportedFilesystems = [ "zfs" ];
+    };
+
+    extraConfig = ''
+      boot.supportedFilesystems = [ "zfs" ];
+
+      # Using by-uuid overrides the default of by-id, and is unique
+      # to the qemu disks, as they don't produce by-id paths for
+      # some reason.
+      boot.zfs.devNodes = "/dev/disk/by-uuid/";
+      networking.hostId = "00000000";
+    '';
+
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary linux-swap 1M 1024M"
+          + " mkpart primary 1024M -1s",
+          "udevadm settle",
+          "mkswap /dev/vda1 -L swap",
+          "swapon -L swap",
+          "zpool create rpool /dev/vda2",
+          "zfs create -o mountpoint=legacy rpool/root",
+          "mount -t zfs rpool/root /mnt",
+          "udevadm settle",
+      )
+    '';
+  };
+
+  # Create two physical LVM partitions combined into one volume group
+  # that contains the logical swap and root partitions.
+  lvm = makeInstallerTest "lvm" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary 1M 2048M"  # PV1
+          + " set 1 lvm on"
+          + " mkpart primary 2048M -1s"  # PV2
+          + " set 2 lvm on",
+          "udevadm settle",
+          "pvcreate /dev/vda1 /dev/vda2",
+          "vgcreate MyVolGroup /dev/vda1 /dev/vda2",
+          "lvcreate --size 1G --name swap MyVolGroup",
+          "lvcreate --size 2G --name nixos MyVolGroup",
+          "mkswap -f /dev/MyVolGroup/swap -L swap",
+          "swapon -L swap",
+          "mkfs.xfs -L nixos /dev/MyVolGroup/nixos",
+          "mount LABEL=nixos /mnt",
+      )
+    '';
+  };
+
+  # Boot off an encrypted root partition with the default LUKS header format
+  luksroot = makeLuksRootTest "luksroot-format1" "";
+
+  # Boot off an encrypted root partition with LUKS1 format
+  luksroot-format1 = makeLuksRootTest "luksroot-format1" "--type=LUKS1";
+
+  # Boot off an encrypted root partition with LUKS2 format
+  luksroot-format2 = makeLuksRootTest "luksroot-format2" "--type=LUKS2";
+
+  # Test whether opening encrypted filesystem with keyfile
+  # Checks for regression of missing cryptsetup, when no luks device without
+  # keyfile is configured
+  encryptedFSWithKeyfile = makeInstallerTest "encryptedFSWithKeyfile" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary ext2 1M 50MB"  # /boot
+          + " mkpart primary linux-swap 50M 1024M"
+          + " mkpart primary 1024M 1280M"  # LUKS with keyfile
+          + " mkpart primary 1280M -1s",
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda4",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+          "modprobe dm_mod dm_crypt",
+          "echo -n supersecret > /mnt/keyfile",
+          "cryptsetup luksFormat -q /dev/vda3 --key-file /mnt/keyfile",
+          "cryptsetup luksOpen --key-file /mnt/keyfile /dev/vda3 crypt",
+          "mkfs.ext3 -L test /dev/mapper/crypt",
+          "cryptsetup luksClose crypt",
+          "mkdir -p /mnt/test",
+      )
+    '';
+    extraConfig = ''
+      fileSystems."/test" = {
+        device = "/dev/disk/by-label/test";
+        fsType = "ext3";
+        encrypted.enable = true;
+        encrypted.blkDev = "/dev/vda3";
+        encrypted.label = "crypt";
+        encrypted.keyFile = "/mnt-root/keyfile";
+      };
+    '';
+  };
+
+  swraid = makeInstallerTest "swraid" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda --"
+          + " mklabel msdos"
+          + " mkpart primary ext2 1M 100MB"  # /boot
+          + " mkpart extended 100M -1s"
+          + " mkpart logical 102M 2102M"  # md0 (root), first device
+          + " mkpart logical 2103M 4103M"  # md0 (root), second device
+          + " mkpart logical 4104M 4360M"  # md1 (swap), first device
+          + " mkpart logical 4361M 4617M",  # md1 (swap), second device
+          "udevadm settle",
+          "ls -l /dev/vda* >&2",
+          "cat /proc/partitions >&2",
+          "udevadm control --stop-exec-queue",
+          "mdadm --create --force /dev/md0 --metadata 1.2 --level=raid1 "
+          + "--raid-devices=2 /dev/vda5 /dev/vda6",
+          "mdadm --create --force /dev/md1 --metadata 1.2 --level=raid1 "
+          + "--raid-devices=2 /dev/vda7 /dev/vda8",
+          "udevadm control --start-exec-queue",
+          "udevadm settle",
+          "mkswap -f /dev/md1 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/md0",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+          "udevadm settle",
+      )
+    '';
+    preBootCommands = ''
+      machine.start()
+      machine.fail("dmesg | grep 'immediate safe mode'")
+    '';
+  };
+
+  # Test a basic install using GRUB 1.
+  grub1 = makeInstallerTest "grub1" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/sda parted --script /dev/sda -- mklabel msdos"
+          + " mkpart primary linux-swap 1M 1024M"
+          + " mkpart primary ext2 1024M -1s",
+          "udevadm settle",
+          "mkswap /dev/sda1 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/sda2",
+          "mount LABEL=nixos /mnt",
+          "mkdir -p /mnt/tmp",
+      )
+    '';
+    grubVersion = 1;
+    grubDevice = "/dev/sda";
+  };
+
+  # Test using labels to identify volumes in grub
+  simpleLabels = makeInstallerTest "simpleLabels" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext4 -L root /dev/vda3",
+          "mount LABEL=root /mnt",
+      )
+    '';
+    grubIdentifier = "label";
+  };
+
+  # Test using the provided disk name within grub
+  # TODO: Fix udev so the symlinks are unneeded in /dev/disks
+  simpleProvided = makeInstallerTest "simpleProvided" {
+    createPartitions = ''
+      uuid = "$(blkid -s UUID -o value /dev/vda2)"
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+100M -n 3:0:+1G -N 4 -t 1:ef02 -t 2:8300 "
+          + "-t 3:8200 -t 4:8300 -c 2:boot -c 4:root /dev/vda",
+          "mkswap /dev/vda3 -L swap",
+          "swapon -L swap",
+          "mkfs.ext4 -L boot /dev/vda2",
+          "mkfs.ext4 -L root /dev/vda4",
+      )
+      machine.execute(f"ln -s ../../vda2 /dev/disk/by-uuid/{uuid}")
+      machine.execute("ln -s ../../vda4 /dev/disk/by-label/root")
+      machine.succeed(
+          "mount /dev/disk/by-label/root /mnt",
+          "mkdir /mnt/boot",
+          f"mount /dev/disk/by-uuid/{uuid} /mnt/boot",
+      )
+    '';
+    grubIdentifier = "provided";
+  };
+
+  # Simple btrfs grub testing
+  btrfsSimple = makeInstallerTest "btrfsSimple" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.btrfs -L root /dev/vda3",
+          "mount LABEL=root /mnt",
+      )
+    '';
+  };
+
+  # Test to see if we can detect /boot and /nix on subvolumes
+  btrfsSubvols = makeInstallerTest "btrfsSubvols" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.btrfs -L root /dev/vda3",
+          "btrfs device scan",
+          "mount LABEL=root /mnt",
+          "btrfs subvol create /mnt/boot",
+          "btrfs subvol create /mnt/nixos",
+          "btrfs subvol create /mnt/nixos/default",
+          "umount /mnt",
+          "mount -o defaults,subvol=nixos/default LABEL=root /mnt",
+          "mkdir /mnt/boot",
+          "mount -o defaults,subvol=boot LABEL=root /mnt/boot",
+      )
+    '';
+  };
+
+  # Test to see if we can detect default and aux subvolumes correctly
+  btrfsSubvolDefault = makeInstallerTest "btrfsSubvolDefault" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.btrfs -L root /dev/vda3",
+          "btrfs device scan",
+          "mount LABEL=root /mnt",
+          "btrfs subvol create /mnt/badpath",
+          "btrfs subvol create /mnt/badpath/boot",
+          "btrfs subvol create /mnt/nixos",
+          "btrfs subvol set-default "
+          + "$(btrfs subvol list /mnt | grep 'nixos' | awk '{print \$2}') /mnt",
+          "umount /mnt",
+          "mount -o defaults LABEL=root /mnt",
+          "mkdir -p /mnt/badpath/boot",  # Help ensure the detection mechanism
+          # is actually looking up subvolumes
+          "mkdir /mnt/boot",
+          "mount -o defaults,subvol=badpath/boot LABEL=root /mnt/boot",
+      )
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/iodine.nix b/nixpkgs/nixos/tests/iodine.nix
new file mode 100644
index 000000000000..41fb2e7778d4
--- /dev/null
+++ b/nixpkgs/nixos/tests/iodine.nix
@@ -0,0 +1,64 @@
+import ./make-test-python.nix (
+  { pkgs, ... }: let
+    domain = "whatever.example.com";
+    password = "false;foo;exit;withspecialcharacters";
+  in
+    {
+      name = "iodine";
+      nodes = {
+        server =
+          { ... }:
+
+            {
+              networking.firewall = {
+                allowedUDPPorts = [ 53 ];
+                trustedInterfaces = [ "dns0" ];
+              };
+              boot.kernel.sysctl = {
+                "net.ipv4.ip_forward" = 1;
+                "net.ipv6.ip_forward" = 1;
+              };
+
+              services.iodine.server = {
+                enable = true;
+                ip = "10.53.53.1/24";
+                passwordFile = "${builtins.toFile "password" password}";
+                inherit domain;
+              };
+
+              # test resource: accessible only via tunnel
+              services.openssh = {
+                enable = true;
+                openFirewall = false;
+              };
+            };
+
+        client =
+          { ... }: {
+            services.iodine.clients.testClient = {
+              # test that ProtectHome is "read-only"
+              passwordFile = "/root/pw";
+              relay = "server";
+              server = domain;
+            };
+            systemd.tmpfiles.rules = [
+              "f /root/pw 0666 root root - ${password}"
+            ];
+            environment.systemPackages = [
+              pkgs.nagiosPluginsOfficial
+            ];
+          };
+
+      };
+
+      testScript = ''
+        start_all()
+
+        server.wait_for_unit("sshd")
+        server.wait_for_unit("iodined")
+        client.wait_for_unit("iodine-testClient")
+
+        client.succeed("check_ssh -H 10.53.53.1")
+      '';
+    }
+)
diff --git a/nixpkgs/nixos/tests/ipfs.nix b/nixpkgs/nixos/tests/ipfs.nix
new file mode 100644
index 000000000000..3cff7e99ff88
--- /dev/null
+++ b/nixpkgs/nixos/tests/ipfs.nix
@@ -0,0 +1,55 @@
+
+import ./make-test.nix ({ pkgs, ...} : {
+  name = "ipfs";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mguentner ];
+  };
+
+  nodes = {
+    adder =
+      { ... }:
+      {
+        services.ipfs = {
+          enable = true;
+          defaultMode = "norouting";
+          gatewayAddress = "/ip4/127.0.0.1/tcp/2323";
+          apiAddress = "/ip4/127.0.0.1/tcp/2324";
+        };
+        networking.firewall.allowedTCPPorts = [ 4001 ];
+      };
+    getter =
+      { ... }:
+      {
+        services.ipfs = {
+          enable = true;
+          defaultMode = "norouting";
+          autoMount = true;
+        };
+        networking.firewall.allowedTCPPorts = [ 4001 ];
+      };
+  };
+
+  testScript = ''
+    startAll;
+    $adder->waitForUnit("ipfs-norouting");
+    $getter->waitForUnit("ipfs-norouting");
+
+    # wait until api is available
+    $adder->waitUntilSucceeds("ipfs --api /ip4/127.0.0.1/tcp/2324 id");
+    my $addrId = $adder->succeed("ipfs --api /ip4/127.0.0.1/tcp/2324 id -f=\"<id>\"");
+    my $addrIp = (split /[ \/]+/, $adder->succeed("ip -o -4 addr show dev eth1"))[3];
+
+    $adder->mustSucceed("[ -n \"\$(ipfs --api /ip4/127.0.0.1/tcp/2324 config Addresses.Gateway | grep /ip4/127.0.0.1/tcp/2323)\" ]");
+
+    # wait until api is available
+    $getter->waitUntilSucceeds("ipfs --api /ip4/127.0.0.1/tcp/5001 id");
+    my $ipfsHash = $adder->mustSucceed("echo fnord | ipfs --api /ip4/127.0.0.1/tcp/2324 add | cut -d' ' -f2");
+    chomp($ipfsHash);
+
+    $adder->mustSucceed("[ -n \"\$(echo fnord | ipfs --api /ip4/127.0.0.1/tcp/2324 add | grep added)\" ]");
+
+    $getter->mustSucceed("ipfs --api /ip4/127.0.0.1/tcp/5001 swarm connect /ip4/$addrIp/tcp/4001/ipfs/$addrId");
+    $getter->mustSucceed("[ -n \"\$(ipfs --api /ip4/127.0.0.1/tcp/5001 cat /ipfs/$ipfsHash | grep fnord)\" ]");
+    $getter->mustSucceed("[ -n \"$(cat /ipfs/$ipfsHash | grep fnord)\" ]");
+    '';
+})
diff --git a/nixpkgs/nixos/tests/ipv6.nix b/nixpkgs/nixos/tests/ipv6.nix
new file mode 100644
index 000000000000..ba464b57447b
--- /dev/null
+++ b/nixpkgs/nixos/tests/ipv6.nix
@@ -0,0 +1,91 @@
+# Test of IPv6 functionality in NixOS, including whether router
+# solicication/advertisement using radvd works.
+
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "ipv6";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes =
+    # Remove the interface configuration provided by makeTest so that the
+    # interfaces are all configured implicitly
+    { client = { ... }: { networking.interfaces = lib.mkForce {}; };
+
+      server =
+        { ... }:
+        { services.httpd.enable = true;
+          services.httpd.adminAddr = "foo@example.org";
+          networking.firewall.allowedTCPPorts = [ 80 ];
+        };
+
+      router =
+        { ... }:
+        { services.radvd.enable = true;
+          services.radvd.config =
+            ''
+              interface eth1 {
+                AdvSendAdvert on;
+                # ULA prefix (RFC 4193).
+                prefix fd60:cc69:b537:1::/64 { };
+              };
+            '';
+        };
+    };
+
+  testScript =
+    ''
+      import re
+
+      # Start the router first so that it respond to router solicitations.
+      router.wait_for_unit("radvd")
+
+      start_all()
+
+      client.wait_for_unit("network.target")
+      server.wait_for_unit("network.target")
+      server.wait_for_unit("httpd.service")
+
+      # Wait until the given interface has a non-tentative address of
+      # the desired scope (i.e. has completed Duplicate Address
+      # Detection).
+      def wait_for_address(machine, iface, scope, temporary=False):
+          temporary_flag = "temporary" if temporary else "-temporary"
+          cmd = f"ip -o -6 addr show dev {iface} scope {scope} -tentative {temporary_flag}"
+
+          machine.wait_until_succeeds(f"[ `{cmd} | wc -l` -eq 1 ]")
+          output = machine.succeed(cmd)
+          ip = re.search(r"inet6 ([0-9a-f:]{2,})/", output).group(1)
+
+          if temporary:
+              scope = scope + " temporary"
+          machine.log(f"{scope} address on {iface} is {ip}")
+          return ip
+
+
+      with subtest("Loopback address can be pinged"):
+          client.succeed("ping -c 1 ::1 >&2")
+          client.fail("ping -c 1 ::2 >&2")
+
+      with subtest("Local link addresses can be obtained and pinged"):
+          client_ip = wait_for_address(client, "eth1", "link")
+          server_ip = wait_for_address(server, "eth1", "link")
+          client.succeed(f"ping -c 1 {client_ip}%eth1 >&2")
+          client.succeed(f"ping -c 1 {server_ip}%eth1 >&2")
+
+      with subtest("Global addresses can be obtained, pinged, and reached via http"):
+          client_ip = wait_for_address(client, "eth1", "global")
+          server_ip = wait_for_address(server, "eth1", "global")
+          client.succeed(f"ping -c 1 {client_ip} >&2")
+          client.succeed(f"ping -c 1 {server_ip} >&2")
+          client.succeed(f"curl --fail -g http://[{server_ip}]")
+          client.fail(f"curl --fail -g http://[{client_ip}]")
+
+      with subtest("Privacy extensions: Global temporary address can be obtained and pinged"):
+          ip = wait_for_address(client, "eth1", "global", temporary=True)
+          # Default route should have "src <temporary address>" in it
+          client.succeed(f"ip r g ::2 | grep {ip}")
+
+      # TODO: test reachability of a machine on another network.
+    '';
+})
diff --git a/nixpkgs/nixos/tests/jackett.nix b/nixpkgs/nixos/tests/jackett.nix
new file mode 100644
index 000000000000..0a706c99b999
--- /dev/null
+++ b/nixpkgs/nixos/tests/jackett.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "jackett";
+  meta.maintainers = with maintainers; [ etu ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.jackett.enable = true; };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("jackett.service")
+    machine.wait_for_open_port(9117)
+    machine.succeed("curl --fail http://localhost:9117/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jellyfin.nix b/nixpkgs/nixos/tests/jellyfin.nix
new file mode 100644
index 000000000000..65360624d487
--- /dev/null
+++ b/nixpkgs/nixos/tests/jellyfin.nix
@@ -0,0 +1,16 @@
+import ./make-test-python.nix ({ lib, ...}:
+
+{
+  name = "jellyfin";
+  meta.maintainers = with lib.maintainers; [ minijackson ];
+
+  machine =
+    { ... }:
+    { services.jellyfin.enable = true; };
+
+  testScript = ''
+    machine.wait_for_unit("jellyfin.service")
+    machine.wait_for_open_port(8096)
+    machine.succeed("curl --fail http://localhost:8096/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jenkins.nix b/nixpkgs/nixos/tests/jenkins.nix
new file mode 100644
index 000000000000..cd64ff512878
--- /dev/null
+++ b/nixpkgs/nixos/tests/jenkins.nix
@@ -0,0 +1,49 @@
+# verifies:
+#   1. jenkins service starts on master node
+#   2. jenkins user can be extended on both master and slave
+#   3. jenkins service not started on slave node
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "jenkins";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ bjornfor coconnor domenkozar eelco ];
+  };
+
+  nodes = {
+
+    master =
+      { ... }:
+      { services.jenkins.enable = true;
+
+        # should have no effect
+        services.jenkinsSlave.enable = true;
+
+        users.users.jenkins.extraGroups = [ "users" ];
+
+        systemd.services.jenkins.serviceConfig.TimeoutStartSec = "6min";
+      };
+
+    slave =
+      { ... }:
+      { services.jenkinsSlave.enable = true;
+
+        users.users.jenkins.extraGroups = [ "users" ];
+      };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    master.wait_for_unit("jenkins")
+
+    assert "Authentication required" in master.succeed("curl http://localhost:8080")
+
+    for host in master, slave:
+        groups = host.succeed("sudo -u jenkins groups")
+        assert "jenkins" in groups
+        assert "users" in groups
+
+    slave.fail("systemctl is-enabled jenkins.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jirafeau.nix b/nixpkgs/nixos/tests/jirafeau.nix
new file mode 100644
index 000000000000..0f5af7f718a4
--- /dev/null
+++ b/nixpkgs/nixos/tests/jirafeau.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "jirafeau";
+  meta.maintainers = with maintainers; [ davidtwco ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.jirafeau = {
+      enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("phpfpm-jirafeau.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_open_port(80)
+    machine.succeed("curl -sSfL http://localhost/ | grep 'Jirafeau'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/k3s.nix b/nixpkgs/nixos/tests/k3s.nix
new file mode 100644
index 000000000000..5bda6f493f0e
--- /dev/null
+++ b/nixpkgs/nixos/tests/k3s.nix
@@ -0,0 +1,78 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  # A suitable k3s pause image, also used for the test pod
+  pauseImage = pkgs.dockerTools.buildImage {
+    name = "test.local/pause";
+    tag = "local";
+    contents = with pkgs; [ tini coreutils busybox ];
+    config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
+  };
+  testPodYaml = pkgs.writeText "test.yml" ''
+    # Don't use the default service account because there's a race where it may
+    # not be created yet; make our own instead.
+    apiVersion: v1
+    kind: ServiceAccount
+    metadata:
+      name: test
+    ---
+    apiVersion: v1
+    kind: Pod
+    metadata:
+      name: test
+    spec:
+      serviceAccountName: test
+      containers:
+      - name: test
+        image: test.local/pause:local
+        imagePullPolicy: Never
+        command: ["sh", "-c", "sleep inf"]
+  '';
+in
+{
+  name = "k3s";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ euank ];
+  };
+
+  nodes = {
+    k3s =
+      { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.k3s pkgs.gzip ];
+
+        # k3s uses enough resources the default vm fails.
+        virtualisation.memorySize = pkgs.lib.mkDefault 1536;
+        virtualisation.diskSize = pkgs.lib.mkDefault 4096;
+
+        services.k3s.enable = true;
+        services.k3s.role = "server";
+        services.k3s.package = pkgs.k3s;
+        # Slightly reduce resource usage
+        services.k3s.extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local";
+
+        users.users = {
+          noprivs = {
+            isNormalUser = true;
+            description = "Can't access k3s by default";
+            password = "*";
+          };
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    k3s.wait_for_unit("k3s")
+    k3s.succeed("k3s kubectl cluster-info")
+    k3s.fail("sudo -u noprivs k3s kubectl cluster-info")
+    # k3s.succeed("k3s check-config") # fails with the current nixos kernel config, uncomment once this passes
+
+    k3s.succeed(
+        "zcat ${pauseImage} | k3s ctr image import -"
+    )
+
+    k3s.succeed("k3s kubectl apply -f ${testPodYaml}")
+    k3s.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kafka.nix b/nixpkgs/nixos/tests/kafka.nix
new file mode 100644
index 000000000000..d29c802b47b4
--- /dev/null
+++ b/nixpkgs/nixos/tests/kafka.nix
@@ -0,0 +1,93 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with pkgs.lib;
+
+let
+  makeKafkaTest = name: kafkaPackage: (import ./make-test-python.nix ({
+    inherit name;
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ nequissimus ];
+    };
+
+    nodes = {
+      zookeeper1 = { ... }: {
+        services.zookeeper = {
+          enable = true;
+        };
+
+        networking.firewall.allowedTCPPorts = [ 2181 ];
+        virtualisation.memorySize = 1024;
+      };
+      kafka = { ... }: {
+        services.apache-kafka = {
+          enable = true;
+          extraProperties = ''
+            offsets.topic.replication.factor = 1
+            zookeeper.session.timeout.ms = 600000
+          '';
+          package = kafkaPackage;
+          zookeeper = "zookeeper1:2181";
+          # These are the default options, but UseCompressedOops doesn't work with 32bit JVM
+          jvmOptions = [
+            "-server" "-Xmx1G" "-Xms1G" "-XX:+UseParNewGC" "-XX:+UseConcMarkSweepGC" "-XX:+CMSClassUnloadingEnabled"
+            "-XX:+CMSScavengeBeforeRemark" "-XX:+DisableExplicitGC" "-Djava.awt.headless=true" "-Djava.net.preferIPv4Stack=true"
+          ] ++ optionals (! pkgs.stdenv.isi686 ) [ "-XX:+UseCompressedOops" ];
+        };
+
+        networking.firewall.allowedTCPPorts = [ 9092 ];
+        # i686 tests: qemu-system-i386 can simulate max 2047MB RAM (not 2048)
+        virtualisation.memorySize = 2047;
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      zookeeper1.wait_for_unit("default.target")
+      zookeeper1.wait_for_unit("zookeeper.service")
+      zookeeper1.wait_for_open_port(2181)
+
+      kafka.wait_for_unit("default.target")
+      kafka.wait_for_unit("apache-kafka.service")
+      kafka.wait_for_open_port(9092)
+
+      kafka.wait_until_succeeds(
+          "${kafkaPackage}/bin/kafka-topics.sh --create "
+          + "--zookeeper zookeeper1:2181 --partitions 1 "
+          + "--replication-factor 1 --topic testtopic"
+      )
+      kafka.succeed(
+          "echo 'test 1' | "
+          + "${kafkaPackage}/bin/kafka-console-producer.sh "
+          + "--broker-list localhost:9092 --topic testtopic"
+      )
+    '' + (if name == "kafka_0_9" then ''
+      assert "test 1" in kafka.succeed(
+          "${kafkaPackage}/bin/kafka-console-consumer.sh "
+          + "--zookeeper zookeeper1:2181 --topic testtopic "
+          + "--from-beginning --max-messages 1"
+      )
+    '' else ''
+      assert "test 1" in kafka.succeed(
+          "${kafkaPackage}/bin/kafka-console-consumer.sh "
+          + "--bootstrap-server localhost:9092 --topic testtopic "
+          + "--from-beginning --max-messages 1"
+      )
+    '');
+  }) { inherit system; });
+
+in with pkgs; {
+  kafka_0_9  = makeKafkaTest "kafka_0_9"  apacheKafka_0_9;
+  kafka_0_10 = makeKafkaTest "kafka_0_10" apacheKafka_0_10;
+  kafka_0_11 = makeKafkaTest "kafka_0_11" apacheKafka_0_11;
+  kafka_1_0  = makeKafkaTest "kafka_1_0"  apacheKafka_1_0;
+  kafka_1_1  = makeKafkaTest "kafka_1_1"  apacheKafka_1_1;
+  kafka_2_0  = makeKafkaTest "kafka_2_0"  apacheKafka_2_0;
+  kafka_2_1  = makeKafkaTest "kafka_2_1"  apacheKafka_2_1;
+  kafka_2_2  = makeKafkaTest "kafka_2_2"  apacheKafka_2_2;
+  kafka_2_3  = makeKafkaTest "kafka_2_3"  apacheKafka_2_3;
+  kafka_2_4  = makeKafkaTest "kafka_2_4"  apacheKafka_2_4;
+}
diff --git a/nixpkgs/nixos/tests/keepalived.nix b/nixpkgs/nixos/tests/keepalived.nix
new file mode 100644
index 000000000000..d0bf9d465200
--- /dev/null
+++ b/nixpkgs/nixos/tests/keepalived.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "keepalived";
+
+  nodes = {
+    node1 = { pkgs, ... }: {
+      networking.firewall.extraCommands = "iptables -A INPUT -p vrrp -j ACCEPT";
+      services.keepalived.enable = true;
+      services.keepalived.vrrpInstances.test = {
+        interface = "eth1";
+        state = "MASTER";
+        priority = 50;
+        virtualIps = [{ addr = "192.168.1.200"; }];
+        virtualRouterId = 1;
+      };
+      environment.systemPackages = [ pkgs.tcpdump ];
+    };
+    node2 = { pkgs, ... }: {
+      networking.firewall.extraCommands = "iptables -A INPUT -p vrrp -j ACCEPT";
+      services.keepalived.enable = true;
+      services.keepalived.vrrpInstances.test = {
+        interface = "eth1";
+        state = "MASTER";
+        priority = 100;
+        virtualIps = [{ addr = "192.168.1.200"; }];
+        virtualRouterId = 1;
+      };
+      environment.systemPackages = [ pkgs.tcpdump ];
+    };
+  };
+
+  testScript = ''
+    # wait for boot time delay to pass
+    for node in [node1, node2]:
+        node.wait_until_succeeds(
+            "systemctl show -p LastTriggerUSecMonotonic keepalived-boot-delay.timer | grep -vq 'LastTriggerUSecMonotonic=0'"
+        )
+        node.wait_for_unit("keepalived")
+    node2.wait_until_succeeds("ip addr show dev eth1 | grep -q 192.168.1.200")
+    node1.fail("ip addr show dev eth1 | grep -q 192.168.1.200")
+    node1.succeed("ping -c1 192.168.1.200")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kerberos/default.nix b/nixpkgs/nixos/tests/kerberos/default.nix
new file mode 100644
index 000000000000..f2f1a438918c
--- /dev/null
+++ b/nixpkgs/nixos/tests/kerberos/default.nix
@@ -0,0 +1,7 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+}:
+{
+  mit = import ./mit.nix { inherit system pkgs; };
+  heimdal = import ./heimdal.nix { inherit system pkgs; };
+}
diff --git a/nixpkgs/nixos/tests/kerberos/heimdal.nix b/nixpkgs/nixos/tests/kerberos/heimdal.nix
new file mode 100644
index 000000000000..8abae667d043
--- /dev/null
+++ b/nixpkgs/nixos/tests/kerberos/heimdal.nix
@@ -0,0 +1,42 @@
+import ../make-test-python.nix ({pkgs, ...}: {
+  name = "kerberos_server-heimdal";
+  machine = { config, libs, pkgs, ...}:
+  { services.kerberos_server =
+    { enable = true;
+      realms = {
+        "FOO.BAR".acl = [{principal = "admin"; access = ["add" "cpw"];}];
+      };
+    };
+    krb5 = {
+      enable = true;
+      kerberos = pkgs.heimdalFull;
+      libdefaults = {
+        default_realm = "FOO.BAR";
+      };
+      realms = {
+        "FOO.BAR" = {
+          admin_server = "machine";
+          kdc = "machine";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.succeed(
+        "kadmin -l init --realm-max-ticket-life='8 day' --realm-max-renewable-life='10 day' FOO.BAR",
+        "systemctl restart kadmind.service kdc.service",
+    )
+
+    for unit in ["kadmind", "kdc", "kpasswdd"]:
+        machine.wait_for_unit(f"{unit}.service")
+
+    machine.succeed(
+        "kadmin -l add --password=admin_pw --use-defaults admin",
+        "kadmin -l ext_keytab --keytab=admin.keytab admin",
+        "kadmin -p admin -K admin.keytab add --password=alice_pw --use-defaults alice",
+        "kadmin -l ext_keytab --keytab=alice.keytab alice",
+        "kinit -kt alice.keytab alice",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kerberos/mit.nix b/nixpkgs/nixos/tests/kerberos/mit.nix
new file mode 100644
index 000000000000..93b4020d4994
--- /dev/null
+++ b/nixpkgs/nixos/tests/kerberos/mit.nix
@@ -0,0 +1,41 @@
+import ../make-test-python.nix ({pkgs, ...}: {
+  name = "kerberos_server-mit";
+  machine = { config, libs, pkgs, ...}:
+  { services.kerberos_server =
+    { enable = true;
+      realms = {
+        "FOO.BAR".acl = [{principal = "admin"; access = ["add" "cpw"];}];
+      };
+    };
+    krb5 = {
+      enable = true;
+      kerberos = pkgs.krb5Full;
+      libdefaults = {
+        default_realm = "FOO.BAR";
+      };
+      realms = {
+        "FOO.BAR" = {
+          admin_server = "machine";
+          kdc = "machine";
+        };
+      };
+    };
+    users.extraUsers.alice = { isNormalUser = true; };
+  };
+
+  testScript = ''
+    machine.succeed(
+        "kdb5_util create -s -r FOO.BAR -P master_key",
+        "systemctl restart kadmind.service kdc.service",
+    )
+
+    for unit in ["kadmind", "kdc"]:
+        machine.wait_for_unit(f"{unit}.service")
+
+    machine.succeed(
+        "kadmin.local add_principal -pw admin_pw admin",
+        "kadmin -p admin -w admin_pw addprinc -pw alice_pw alice",
+        "echo alice_pw | sudo -u alice kinit",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kernel-latest.nix b/nixpkgs/nixos/tests/kernel-latest.nix
new file mode 100644
index 000000000000..f09d0926d223
--- /dev/null
+++ b/nixpkgs/nixos/tests/kernel-latest.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "kernel-latest";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  machine = { pkgs, ... }:
+    {
+      boot.kernelPackages = pkgs.linuxPackages_latest;
+    };
+
+  testScript =
+    ''
+      assert "Linux" in machine.succeed("uname -s")
+      assert "${pkgs.linuxPackages_latest.kernel.version}" in machine.succeed("uname -a")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/kernel-lts.nix b/nixpkgs/nixos/tests/kernel-lts.nix
new file mode 100644
index 000000000000..bad706d63c03
--- /dev/null
+++ b/nixpkgs/nixos/tests/kernel-lts.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "kernel-lts";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  machine = { pkgs, ... }:
+    {
+      boot.kernelPackages = pkgs.linuxPackages;
+    };
+
+  testScript =
+    ''
+      assert "Linux" in machine.succeed("uname -s")
+      assert "${pkgs.linuxPackages.kernel.version}" in machine.succeed("uname -a")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/kernel-testing.nix b/nixpkgs/nixos/tests/kernel-testing.nix
new file mode 100644
index 000000000000..b7e10ebd5bd1
--- /dev/null
+++ b/nixpkgs/nixos/tests/kernel-testing.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "kernel-testing";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  machine = { pkgs, ... }:
+    {
+      boot.kernelPackages = pkgs.linuxPackages_testing;
+    };
+
+  testScript =
+    ''
+      assert "Linux" in machine.succeed("uname -s")
+      assert "${pkgs.linuxPackages_testing.kernel.modDirVersion}" in machine.succeed("uname -a")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/kexec.nix b/nixpkgs/nixos/tests/kexec.nix
new file mode 100644
index 000000000000..ec0cd9796b0e
--- /dev/null
+++ b/nixpkgs/nixos/tests/kexec.nix
@@ -0,0 +1,25 @@
+# Test whether fast reboots via kexec work.
+
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "kexec";
+  meta = with lib.maintainers; {
+    maintainers = [ eelco ];
+    # Currently hangs forever; last output is:
+    #     machine # [   10.239914] dhcpcd[707]: eth0: adding default route via fe80::2
+    #     machine: waiting for the VM to finish booting
+    #     machine # Cannot find the ESP partition mount point.
+    #     machine # [   28.681197] nscd[692]: 692 checking for monitored file `/etc/netgroup': No such file or directory
+    broken = true;
+  };
+
+  machine = { ... }:
+    { virtualisation.vlans = [ ]; };
+
+  testScript =
+    ''
+      machine.wait_for_unit("multi-user.target")
+      machine.execute("systemctl kexec &")
+      machine.connected = False
+      machine.wait_for_unit("multi-user.target")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/keymap.nix b/nixpkgs/nixos/tests/keymap.nix
new file mode 100644
index 000000000000..09d5d2a6c9e1
--- /dev/null
+++ b/nixpkgs/nixos/tests/keymap.nix
@@ -0,0 +1,175 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  readyFile  = "/tmp/readerReady";
+  resultFile = "/tmp/readerResult";
+
+  testReader = pkgs.writeScript "test-input-reader" ''
+    rm -f ${resultFile} ${resultFile}.tmp
+    logger "testReader: START: Waiting for $1 characters, expecting '$2'."
+    touch ${readyFile}
+    read -r -N $1 chars
+    rm -f ${readyFile}
+
+    if [ "$chars" == "$2" ]; then
+      logger -s "testReader: PASS: Got '$2' as expected." 2>${resultFile}.tmp
+    else
+      logger -s "testReader: FAIL: Expected '$2' but got '$chars'." 2>${resultFile}.tmp
+    fi
+    # rename after the file is written to prevent a race condition
+    mv  ${resultFile}.tmp ${resultFile}
+  '';
+
+
+  mkKeyboardTest = layout: { extraConfig ? {}, tests }: with pkgs.lib; makeTest {
+    name = "keymap-${layout}";
+
+    machine.console.keyMap = mkOverride 900 layout;
+    machine.services.xserver.desktopManager.xterm.enable = false;
+    machine.services.xserver.layout = mkOverride 900 layout;
+    machine.imports = [ ./common/x11.nix extraConfig ];
+
+    testScript = ''
+      import json
+      import shlex
+
+
+      def run_test_case(cmd, xorg_keymap, test_case_name, inputs, expected):
+          with subtest(test_case_name):
+              assert len(inputs) == len(expected)
+              machine.execute("rm -f ${readyFile} ${resultFile}")
+
+              # set up process that expects all the keys to be entered
+              machine.succeed(
+                  "{} {} {} {} &".format(
+                      cmd,
+                      "${testReader}",
+                      len(inputs),
+                      shlex.quote("".join(expected)),
+                  )
+              )
+
+              if xorg_keymap:
+                  # make sure the xterm window is open and has focus
+                  machine.wait_for_window("testterm")
+                  machine.wait_until_succeeds(
+                      "${pkgs.xdotool}/bin/xdotool search --sync --onlyvisible "
+                      "--class testterm windowfocus --sync"
+                  )
+
+              # wait for reader to be ready
+              machine.wait_for_file("${readyFile}")
+              machine.sleep(1)
+
+              # send all keys
+              for key in inputs:
+                  machine.send_key(key)
+
+              # wait for result and check
+              machine.wait_for_file("${resultFile}")
+              machine.succeed("grep -q 'PASS:' ${resultFile}")
+
+
+      with open("${pkgs.writeText "tests.json" (builtins.toJSON tests)}") as json_file:
+          tests = json.load(json_file)
+
+      keymap_environments = {
+          "VT Keymap": "openvt -sw --",
+          "Xorg Keymap": "DISPLAY=:0 xterm -title testterm -class testterm -fullscreen -e",
+      }
+
+      machine.wait_for_x()
+
+      for keymap_env_name, command in keymap_environments.items():
+          with subtest(keymap_env_name):
+              for test_case_name, test_data in tests.items():
+                  run_test_case(
+                      command,
+                      False,
+                      test_case_name,
+                      test_data["qwerty"],
+                      test_data["expect"],
+                  )
+    '';
+  };
+
+in pkgs.lib.mapAttrs mkKeyboardTest {
+  azerty = {
+    tests = {
+      azqw.qwerty = [ "q" "w" ];
+      azqw.expect = [ "a" "z" ];
+      altgr.qwerty = [ "alt_r-2" "alt_r-3" "alt_r-4" "alt_r-5" "alt_r-6" ];
+      altgr.expect = [ "~"       "#"       "{"       "["       "|"       ];
+    };
+
+    extraConfig.console.keyMap = "azerty/fr";
+    extraConfig.services.xserver.layout = "fr";
+  };
+
+  colemak = {
+    tests = {
+      homerow.qwerty = [ "a" "s" "d" "f" "j" "k" "l" "semicolon" ];
+      homerow.expect = [ "a" "r" "s" "t" "n" "e" "i" "o"         ];
+    };
+
+    extraConfig.console.keyMap = "colemak/colemak";
+    extraConfig.services.xserver.layout = "us";
+    extraConfig.services.xserver.xkbVariant = "colemak";
+  };
+
+  dvorak = {
+    tests = {
+      homerow.qwerty = [ "a" "s" "d" "f" "j" "k" "l" "semicolon" ];
+      homerow.expect = [ "a" "o" "e" "u" "h" "t" "n" "s"         ];
+      symbols.qwerty = [ "q" "w" "e" "minus" "equal" ];
+      symbols.expect = [ "'" "," "." "["     "]"     ];
+    };
+  };
+
+  dvp = {
+    tests = {
+      homerow.qwerty = [ "a" "s" "d" "f" "j" "k" "l" "semicolon" ];
+      homerow.expect = [ "a" "o" "e" "u" "h" "t" "n" "s"         ];
+      numbers.qwerty = map (x: "shift-${x}")
+                       [ "1" "2" "3" "4" "5" "6" "7" "8" "9" "0" "minus" ];
+      numbers.expect = [ "%" "7" "5" "3" "1" "9" "0" "2" "4" "6" "8" ];
+      symbols.qwerty = [ "1" "2" "3" "4" "5" "6" "7" "8" "9" "0" "minus" ];
+      symbols.expect = [ "&" "[" "{" "}" "(" "=" "*" ")" "+" "]" "!" ];
+    };
+
+    extraConfig.services.xserver.layout = "us";
+    extraConfig.services.xserver.xkbVariant = "dvp";
+  };
+
+  neo = {
+    tests = {
+      layer1.qwerty = [ "f"           "j"                     ];
+      layer1.expect = [ "e"           "n"                     ];
+      layer2.qwerty = [ "shift-f"     "shift-j"     "shift-6" ];
+      layer2.expect = [ "E"           "N"           "$"       ];
+      layer3.qwerty = [ "caps_lock-d" "caps_lock-f"           ];
+      layer3.expect = [ "{"           "}"                     ];
+    };
+
+    extraConfig.services.xserver.layout = "de";
+    extraConfig.services.xserver.xkbVariant = "neo";
+  };
+
+  qwertz = {
+    tests = {
+      zy.qwerty = [ "z" "y" ];
+      zy.expect = [ "y" "z" ];
+      altgr.qwerty = map (x: "alt_r-${x}")
+                     [ "q" "less" "7" "8" "9" "0" ];
+      altgr.expect = [ "@" "|"    "{" "[" "]" "}" ];
+    };
+
+    extraConfig.console.keyMap = "de";
+    extraConfig.services.xserver.layout = "de";
+  };
+}
diff --git a/nixpkgs/nixos/tests/knot.nix b/nixpkgs/nixos/tests/knot.nix
new file mode 100644
index 000000000000..8bab917a351e
--- /dev/null
+++ b/nixpkgs/nixos/tests/knot.nix
@@ -0,0 +1,210 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+let
+  common = {
+    networking.firewall.enable = false;
+    networking.useDHCP = false;
+  };
+  exampleZone = pkgs.writeTextDir "example.com.zone" ''
+      @ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
+      @       NS      ns1
+      @       NS      ns2
+      ns1     A       192.168.0.1
+      ns1     AAAA    fd00::1
+      ns2     A       192.168.0.2
+      ns2     AAAA    fd00::2
+      www     A       192.0.2.1
+      www     AAAA    2001:DB8::1
+      sub     NS      ns.example.com.
+  '';
+  delegatedZone = pkgs.writeTextDir "sub.example.com.zone" ''
+      @ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
+      @       NS      ns1.example.com.
+      @       NS      ns2.example.com.
+      @       A       192.0.2.2
+      @       AAAA    2001:DB8::2
+  '';
+
+  knotZonesEnv = pkgs.buildEnv {
+    name = "knot-zones";
+    paths = [ exampleZone delegatedZone ];
+  };
+  # DO NOT USE pkgs.writeText IN PRODUCTION. This put secrets in the nix store!
+  tsigFile = pkgs.writeText "tsig.conf" ''
+    key:
+      - id: slave_key
+        algorithm: hmac-sha256
+        secret: zOYgOgnzx3TGe5J5I/0kxd7gTcxXhLYMEq3Ek3fY37s=
+  '';
+in {
+  name = "knot";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ hexa ];
+  };
+
+
+  nodes = {
+    master = { lib, ... }: {
+      imports = [ common ];
+      networking.interfaces.eth1 = {
+        ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.1"; prefixLength = 24; }
+        ];
+        ipv6.addresses = lib.mkForce [
+          { address = "fd00::1"; prefixLength = 64; }
+        ];
+      };
+      services.knot.enable = true;
+      services.knot.extraArgs = [ "-v" ];
+      services.knot.keyFiles = [ tsigFile ];
+      services.knot.extraConfig = ''
+        server:
+            listen: 0.0.0.0@53
+            listen: ::@53
+
+        acl:
+          - id: slave_acl
+            address: 192.168.0.2
+            key: slave_key
+            action: transfer
+
+        remote:
+          - id: slave
+            address: 192.168.0.2@53
+
+        template:
+          - id: default
+            storage: ${knotZonesEnv}
+            notify: [slave]
+            acl: [slave_acl]
+            dnssec-signing: on
+            # Input-only zone files
+            # https://www.knot-dns.cz/docs/2.8/html/operation.html#example-3
+            # prevents modification of the zonefiles, since the zonefiles are immutable
+            zonefile-sync: -1
+            zonefile-load: difference
+            journal-content: changes
+            # move databases below the state directory, because they need to be writable
+            journal-db: /var/lib/knot/journal
+            kasp-db: /var/lib/knot/kasp
+            timer-db: /var/lib/knot/timer
+
+        zone:
+          - domain: example.com
+            file: example.com.zone
+
+          - domain: sub.example.com
+            file: sub.example.com.zone
+
+        log:
+          - target: syslog
+            any: info
+      '';
+    };
+
+    slave = { lib, ... }: {
+      imports = [ common ];
+      networking.interfaces.eth1 = {
+        ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.2"; prefixLength = 24; }
+        ];
+        ipv6.addresses = lib.mkForce [
+          { address = "fd00::2"; prefixLength = 64; }
+        ];
+      };
+      services.knot.enable = true;
+      services.knot.keyFiles = [ tsigFile ];
+      services.knot.extraArgs = [ "-v" ];
+      services.knot.extraConfig = ''
+        server:
+            listen: 0.0.0.0@53
+            listen: ::@53
+
+        acl:
+          - id: notify_from_master
+            address: 192.168.0.1
+            action: notify
+
+        remote:
+          - id: master
+            address: 192.168.0.1@53
+            key: slave_key
+
+        template:
+          - id: default
+            master: master
+            acl: [notify_from_master]
+            # zonefileless setup
+            # https://www.knot-dns.cz/docs/2.8/html/operation.html#example-2
+            zonefile-sync: -1
+            zonefile-load: none
+            journal-content: all
+            # move databases below the state directory, because they need to be writable
+            journal-db: /var/lib/knot/journal
+            kasp-db: /var/lib/knot/kasp
+            timer-db: /var/lib/knot/timer
+
+        zone:
+          - domain: example.com
+            file: example.com.zone
+
+          - domain: sub.example.com
+            file: sub.example.com.zone
+
+        log:
+          - target: syslog
+            any: info
+      '';
+    };
+    client = { lib, nodes, ... }: {
+      imports = [ common ];
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          { address = "192.168.0.3"; prefixLength = 24; }
+        ];
+        ipv6.addresses = [
+          { address = "fd00::3"; prefixLength = 64; }
+        ];
+      };
+      environment.systemPackages = [ pkgs.knot-dns ];
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    master4 = (lib.head nodes.master.config.networking.interfaces.eth1.ipv4.addresses).address;
+    master6 = (lib.head nodes.master.config.networking.interfaces.eth1.ipv6.addresses).address;
+
+    slave4 = (lib.head nodes.slave.config.networking.interfaces.eth1.ipv4.addresses).address;
+    slave6 = (lib.head nodes.slave.config.networking.interfaces.eth1.ipv6.addresses).address;
+  in ''
+    import re
+
+    start_all()
+
+    client.wait_for_unit("network.target")
+    master.wait_for_unit("knot.service")
+    slave.wait_for_unit("knot.service")
+
+
+    def test(host, query_type, query, pattern):
+        out = client.succeed(f"khost -t {query_type} {query} {host}").strip()
+        client.log(f"{host} replied with: {out}")
+        assert re.search(pattern, out), f'Did not match "{pattern}"'
+
+
+    for host in ("${master4}", "${master6}", "${slave4}", "${slave6}"):
+        with subtest(f"Interrogate {host}"):
+            test(host, "SOA", "example.com", r"start of authority.*noc\.example\.com\.")
+            test(host, "A", "example.com", r"has no [^ ]+ record")
+            test(host, "AAAA", "example.com", r"has no [^ ]+ record")
+
+            test(host, "A", "www.example.com", r"address 192.0.2.1$")
+            test(host, "AAAA", "www.example.com", r"address 2001:db8::1$")
+
+            test(host, "NS", "sub.example.com", r"nameserver is ns\d\.example\.com.$")
+            test(host, "A", "sub.example.com", r"address 192.0.2.2$")
+            test(host, "AAAA", "sub.example.com", r"address 2001:db8::2$")
+
+            test(host, "RRSIG", "www.example.com", r"RR set signature is")
+            test(host, "DNSKEY", "example.com", r"DNSSEC key is")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/krb5/default.nix b/nixpkgs/nixos/tests/krb5/default.nix
new file mode 100644
index 000000000000..dd5b2f37202e
--- /dev/null
+++ b/nixpkgs/nixos/tests/krb5/default.nix
@@ -0,0 +1,5 @@
+{ system ? builtins.currentSystem }:
+{
+  example-config = import ./example-config.nix { inherit system; };
+  deprecated-config = import ./deprecated-config.nix { inherit system; };
+}
diff --git a/nixpkgs/nixos/tests/krb5/deprecated-config.nix b/nixpkgs/nixos/tests/krb5/deprecated-config.nix
new file mode 100644
index 000000000000..be6ebce9e051
--- /dev/null
+++ b/nixpkgs/nixos/tests/krb5/deprecated-config.nix
@@ -0,0 +1,50 @@
+# Verifies that the configuration suggested in deprecated example values
+# will result in the expected output.
+
+import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "krb5-with-deprecated-config";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eqyiel ];
+  };
+
+  machine =
+    { ... }: {
+      krb5 = {
+        enable = true;
+        defaultRealm = "ATHENA.MIT.EDU";
+        domainRealm = "athena.mit.edu";
+        kdc = "kerberos.mit.edu";
+        kerberosAdminServer = "kerberos.mit.edu";
+      };
+    };
+
+  testScript =
+    let snapshot = pkgs.writeText "krb5-with-deprecated-config.conf" ''
+      [libdefaults]
+        default_realm = ATHENA.MIT.EDU
+
+      [realms]
+        ATHENA.MIT.EDU = {
+          admin_server = kerberos.mit.edu
+          kdc = kerberos.mit.edu
+        }
+
+      [domain_realm]
+        .athena.mit.edu = ATHENA.MIT.EDU
+        athena.mit.edu = ATHENA.MIT.EDU
+
+      [capaths]
+
+
+      [appdefaults]
+
+
+      [plugins]
+
+    '';
+  in ''
+    machine.succeed(
+        "diff /etc/krb5.conf ${snapshot}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/krb5/example-config.nix b/nixpkgs/nixos/tests/krb5/example-config.nix
new file mode 100644
index 000000000000..be195b513935
--- /dev/null
+++ b/nixpkgs/nixos/tests/krb5/example-config.nix
@@ -0,0 +1,108 @@
+# Verifies that the configuration suggested in (non-deprecated) example values
+# will result in the expected output.
+
+import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "krb5-with-example-config";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eqyiel ];
+  };
+
+  machine =
+    { pkgs, ... }: {
+      krb5 = {
+        enable = true;
+        kerberos = pkgs.krb5Full;
+        libdefaults = {
+          default_realm = "ATHENA.MIT.EDU";
+        };
+        realms = {
+          "ATHENA.MIT.EDU" = {
+            admin_server = "athena.mit.edu";
+            kdc = "athena.mit.edu";
+          };
+        };
+        domain_realm = {
+          "example.com" = "EXAMPLE.COM";
+          ".example.com" = "EXAMPLE.COM";
+        };
+        capaths = {
+          "ATHENA.MIT.EDU" = {
+            "EXAMPLE.COM" = ".";
+          };
+          "EXAMPLE.COM" = {
+            "ATHENA.MIT.EDU" = ".";
+          };
+        };
+        appdefaults = {
+          pam = {
+            debug = false;
+            ticket_lifetime = 36000;
+            renew_lifetime = 36000;
+            max_timeout = 30;
+            timeout_shift = 2;
+            initial_timeout = 1;
+          };
+        };
+        plugins = {
+          ccselect = {
+            disable = "k5identity";
+          };
+        };
+        extraConfig = ''
+          [logging]
+            kdc          = SYSLOG:NOTICE
+            admin_server = SYSLOG:NOTICE
+            default      = SYSLOG:NOTICE
+        '';
+      };
+    };
+
+  testScript =
+    let snapshot = pkgs.writeText "krb5-with-example-config.conf" ''
+      [libdefaults]
+        default_realm = ATHENA.MIT.EDU
+
+      [realms]
+        ATHENA.MIT.EDU = {
+          admin_server = athena.mit.edu
+          kdc = athena.mit.edu
+        }
+
+      [domain_realm]
+        .example.com = EXAMPLE.COM
+        example.com = EXAMPLE.COM
+
+      [capaths]
+        ATHENA.MIT.EDU = {
+          EXAMPLE.COM = .
+        }
+        EXAMPLE.COM = {
+          ATHENA.MIT.EDU = .
+        }
+
+      [appdefaults]
+        pam = {
+          debug = false
+          initial_timeout = 1
+          max_timeout = 30
+          renew_lifetime = 36000
+          ticket_lifetime = 36000
+          timeout_shift = 2
+        }
+
+      [plugins]
+        ccselect = {
+          disable = k5identity
+        }
+
+      [logging]
+        kdc          = SYSLOG:NOTICE
+        admin_server = SYSLOG:NOTICE
+        default      = SYSLOG:NOTICE
+    '';
+  in ''
+    machine.succeed(
+        "diff /etc/krb5.conf ${snapshot}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kubernetes/base.nix b/nixpkgs/nixos/tests/kubernetes/base.nix
new file mode 100644
index 000000000000..adb736506895
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/base.nix
@@ -0,0 +1,111 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; }
+}:
+
+with import ../../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  mkKubernetesBaseTest =
+    { name, domain ? "my.zyx", test, machines
+    , pkgs ? import <nixpkgs> { inherit system; }
+    , extraConfiguration ? null }:
+    let
+      masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
+      master = machines.${masterName};
+      extraHosts = ''
+        ${master.ip}  etcd.${domain}
+        ${master.ip}  api.${domain}
+        ${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip}  ${machineName}.${domain}") (attrNames machines)}
+      '';
+      kubectl = with pkgs; runCommand "wrap-kubectl" { buildInputs = [ makeWrapper ]; } ''
+        mkdir -p $out/bin
+        makeWrapper ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl --set KUBECONFIG "/etc/kubernetes/cluster-admin.kubeconfig"
+      '';
+    in makeTest {
+      inherit name;
+
+      nodes = mapAttrs (machineName: machine:
+        { config, pkgs, lib, nodes, ... }:
+          mkMerge [
+            {
+              boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
+              virtualisation.memorySize = mkDefault 1536;
+              virtualisation.diskSize = mkDefault 4096;
+              networking = {
+                inherit domain extraHosts;
+                primaryIPAddress = mkForce machine.ip;
+
+                firewall = {
+                  allowedTCPPorts = [
+                    10250 # kubelet
+                  ];
+                  trustedInterfaces = ["docker0"];
+
+                  extraCommands = concatMapStrings  (node: ''
+                    iptables -A INPUT -s ${node.config.networking.primaryIPAddress} -j ACCEPT
+                  '') (attrValues nodes);
+                };
+              };
+              programs.bash.enableCompletion = true;
+              environment.systemPackages = [ kubectl ];
+              services.flannel.iface = "eth1";
+              services.kubernetes = {
+                addons.dashboard.enable = true;
+                proxy.hostname = "${masterName}.${domain}";
+
+                easyCerts = true;
+                inherit (machine) roles;
+                apiserver = {
+                  securePort = 443;
+                  advertiseAddress = master.ip;
+                };
+                masterAddress = "${masterName}.${config.networking.domain}";
+              };
+            }
+            (optionalAttrs (any (role: role == "master") machine.roles) {
+              networking.firewall.allowedTCPPorts = [
+                443 # kubernetes apiserver
+              ];
+            })
+            (optionalAttrs (machine ? extraConfiguration) (machine.extraConfiguration { inherit config pkgs lib nodes; }))
+            (optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
+          ]
+      ) machines;
+
+      testScript = ''
+        startAll;
+
+        ${test}
+      '';
+    };
+
+  mkKubernetesMultiNodeTest = attrs: mkKubernetesBaseTest ({
+    machines = {
+      machine1 = {
+        roles = ["master"];
+        ip = "192.168.1.1";
+      };
+      machine2 = {
+        roles = ["node"];
+        ip = "192.168.1.2";
+      };
+    };
+  } // attrs // {
+    name = "kubernetes-${attrs.name}-multinode";
+  });
+
+  mkKubernetesSingleNodeTest = attrs: mkKubernetesBaseTest ({
+    machines = {
+      machine1 = {
+        roles = ["master" "node"];
+        ip = "192.168.1.1";
+      };
+    };
+  } // attrs // {
+    name = "kubernetes-${attrs.name}-singlenode";
+  });
+in {
+  inherit mkKubernetesBaseTest mkKubernetesSingleNodeTest mkKubernetesMultiNodeTest;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/default.nix b/nixpkgs/nixos/tests/kubernetes/default.nix
new file mode 100644
index 000000000000..a801759bf582
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/default.nix
@@ -0,0 +1,7 @@
+{ system ? builtins.currentSystem }:
+{
+  dns = import ./dns.nix { inherit system; };
+  # e2e = import ./e2e.nix { inherit system; };  # TODO: make it pass
+  # the following test(s) can be removed when e2e is working:
+  rbac = import ./rbac.nix { inherit system; };
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/dns.nix b/nixpkgs/nixos/tests/kubernetes/dns.nix
new file mode 100644
index 000000000000..638942e15407
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/dns.nix
@@ -0,0 +1,128 @@
+{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+  domain = "my.zyx";
+
+  redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "redis";
+    metadata.labels.name = "redis";
+    spec.containers = [{
+      name = "redis";
+      image = "redis";
+      args = ["--bind" "0.0.0.0"];
+      imagePullPolicy = "Never";
+      ports = [{
+        name = "redis-server";
+        containerPort = 6379;
+      }];
+    }];
+  });
+
+  redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
+    kind = "Service";
+    apiVersion = "v1";
+    metadata.name = "redis";
+    spec = {
+      ports = [{port = 6379; targetPort = 6379;}];
+      selector = {name = "redis";};
+    };
+  });
+
+  redisImage = pkgs.dockerTools.buildImage {
+    name = "redis";
+    tag = "latest";
+    contents = [ pkgs.redis pkgs.bind.host ];
+    config.Entrypoint = "/bin/redis-server";
+  };
+
+  probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "probe";
+    metadata.labels.name = "probe";
+    spec.containers = [{
+      name = "probe";
+      image = "probe";
+      args = [ "-f" ];
+      tty = true;
+      imagePullPolicy = "Never";
+    }];
+  });
+
+  probeImage = pkgs.dockerTools.buildImage {
+    name = "probe";
+    tag = "latest";
+    contents = [ pkgs.bind.host pkgs.busybox ];
+    config.Entrypoint = "/bin/tail";
+  };
+
+  extraConfiguration = { config, pkgs, ... }: {
+    environment.systemPackages = [ pkgs.bind.host ];
+    # virtualisation.docker.extraOptions = "--dns=${config.services.kubernetes.addons.dns.clusterIp}";
+    services.dnsmasq.enable = true;
+    services.dnsmasq.servers = [
+      "/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
+    ];
+  };
+
+  base = {
+    name = "dns";
+    inherit domain extraConfiguration;
+  };
+
+  singleNodeTest = {
+    test = ''
+      # prepare machine1 for test
+      $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
+      $machine1->waitUntilSucceeds("docker load < ${redisImage}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
+      $machine1->waitUntilSucceeds("docker load < ${probeImage}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
+
+      # check if pods are running
+      $machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
+      $machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
+      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
+
+      # check dns on host (dnsmasq)
+      $machine1->succeed("host redis.default.svc.cluster.local");
+
+      # check dns inside the container
+      $machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
+    '';
+  };
+
+  multiNodeTest = {
+    test = ''
+      # Node token exchange
+      $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
+      $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
+
+      # prepare machines for test
+      $machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
+      $machine2->waitUntilSucceeds("docker load < ${redisImage}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
+      $machine2->waitUntilSucceeds("docker load < ${probeImage}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
+
+      # check if pods are running
+      $machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
+      $machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
+      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
+
+      # check dns on hosts (dnsmasq)
+      $machine1->succeed("host redis.default.svc.cluster.local");
+      $machine2->succeed("host redis.default.svc.cluster.local");
+
+      # check dns inside the container
+      $machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
+    '';
+  };
+in {
+  singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
+  multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/e2e.nix b/nixpkgs/nixos/tests/kubernetes/e2e.nix
new file mode 100644
index 000000000000..175d8413045e
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/e2e.nix
@@ -0,0 +1,40 @@
+{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+  domain = "my.zyx";
+  certs = import ./certs.nix { externalDomain = domain; kubelets = ["machine1" "machine2"]; };
+  kubeconfig = pkgs.writeText "kubeconfig.json" (builtins.toJSON {
+    apiVersion = "v1";
+    kind = "Config";
+    clusters = [{
+      name = "local";
+      cluster.certificate-authority = "${certs.master}/ca.pem";
+      cluster.server = "https://api.${domain}";
+    }];
+    users = [{
+      name = "kubelet";
+      user = {
+        client-certificate = "${certs.admin}/admin.pem";
+        client-key = "${certs.admin}/admin-key.pem";
+      };
+    }];
+    contexts = [{
+      context = {
+        cluster = "local";
+        user = "kubelet";
+      };
+      current-context = "kubelet-context";
+    }];
+  });
+
+  base = {
+    name = "e2e";
+    inherit domain certs;
+    test = ''
+      $machine1->succeed("e2e.test -kubeconfig ${kubeconfig} -provider local -ginkgo.focus '\\[Conformance\\]' -ginkgo.skip '\\[Flaky\\]|\\[Serial\\]'");
+    '';
+  };
+in {
+  singlenode = mkKubernetesSingleNodeTest base;
+  multinode = mkKubernetesMultiNodeTest base;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/rbac.nix b/nixpkgs/nixos/tests/kubernetes/rbac.nix
new file mode 100644
index 000000000000..3ce7adcd0d71
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/rbac.nix
@@ -0,0 +1,140 @@
+{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+
+  roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
+    kind = "ServiceAccount";
+    apiVersion = "v1";
+    metadata = {
+      name = "read-only";
+      namespace = "default";
+    };
+  });
+
+  roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
+    apiVersion = "rbac.authorization.k8s.io/v1";
+    kind = "RoleBinding";
+    metadata = {
+      name = "read-pods";
+      namespace = "default";
+    };
+    roleRef = {
+      apiGroup = "rbac.authorization.k8s.io";
+      kind = "Role";
+      name = "pod-reader";
+    };
+    subjects = [{
+      kind = "ServiceAccount";
+      name = "read-only";
+      namespace = "default";
+    }];
+  });
+
+  roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
+    apiVersion = "rbac.authorization.k8s.io/v1";
+    kind = "Role";
+    metadata = {
+      name = "pod-reader";
+      namespace = "default";
+    };
+    rules = [{
+      apiGroups = [""];
+      resources = ["pods"];
+      verbs = ["get" "list" "watch"];
+    }];
+  });
+
+  kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "kubectl";
+    metadata.namespace = "default";
+    metadata.labels.name = "kubectl";
+    spec.serviceAccountName = "read-only";
+    spec.containers = [{
+      name = "kubectl";
+      image = "kubectl:latest";
+      command = ["/bin/tail" "-f"];
+      imagePullPolicy = "Never";
+      tty = true;
+    }];
+  });
+
+  kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "kubectl-2";
+    metadata.namespace = "default";
+    metadata.labels.name = "kubectl-2";
+    spec.serviceAccountName = "read-only";
+    spec.containers = [{
+      name = "kubectl-2";
+      image = "kubectl:latest";
+      command = ["/bin/tail" "-f"];
+      imagePullPolicy = "Never";
+      tty = true;
+    }];
+  });
+
+  kubectl = pkgs.runCommand "copy-kubectl" { buildInputs = [ pkgs.kubernetes ]; } ''
+    mkdir -p $out/bin
+    cp ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl
+  '';
+
+  kubectlImage = pkgs.dockerTools.buildImage {
+    name = "kubectl";
+    tag = "latest";
+    contents = [ kubectl pkgs.busybox kubectlPod2 ];
+    config.Entrypoint = "/bin/sh";
+  };
+
+  base = {
+    name = "rbac";
+  };
+
+  singlenode = base // {
+    test = ''
+      $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
+
+      $machine1->waitUntilSucceeds("docker load < ${kubectlImage}");
+
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
+
+      $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
+
+      $machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
+      $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
+      $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
+    '';
+  };
+
+  multinode = base // {
+    test = ''
+      # Node token exchange
+      $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
+      $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
+
+      $machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
+
+      $machine2->waitUntilSucceeds("docker load < ${kubectlImage}");
+
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
+
+      $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
+
+      $machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
+      $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
+      $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
+    '';
+  };
+
+in {
+  singlenode = mkKubernetesSingleNodeTest singlenode;
+  multinode = mkKubernetesMultiNodeTest multinode;
+}
diff --git a/nixpkgs/nixos/tests/ldap.nix b/nixpkgs/nixos/tests/ldap.nix
new file mode 100644
index 000000000000..74b002fc00ee
--- /dev/null
+++ b/nixpkgs/nixos/tests/ldap.nix
@@ -0,0 +1,405 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+let
+  unlines = lib.concatStringsSep "\n";
+  unlinesAttrs = f: as: unlines (lib.mapAttrsToList f as);
+
+  dbDomain = "example.com";
+  dbSuffix = "dc=example,dc=com";
+  dbAdminDn = "cn=admin,${dbSuffix}";
+  dbAdminPwd = "admin-password";
+  # NOTE: slappasswd -h "{SSHA}" -s '${dbAdminPwd}'
+  dbAdminPwdHash = "{SSHA}i7FopSzkFQMrHzDMB1vrtkI0rBnwouP8";
+  ldapUser = "test-ldap-user";
+  ldapUserId = 10000;
+  ldapUserPwd = "user-password";
+  # NOTE: slappasswd -h "{SSHA}" -s '${ldapUserPwd}'
+  ldapUserPwdHash = "{SSHA}v12XICMZNGT6r2KJ26rIkN8Vvvp4QX6i";
+  ldapGroup = "test-ldap-group";
+  ldapGroupId = 10000;
+
+  mkClient = useDaemon:
+    { lib, ... }:
+    {
+      virtualisation.memorySize = 256;
+      virtualisation.vlans = [ 1 ];
+      security.pam.services.su.rootOK = lib.mkForce false;
+      users.ldap.enable = true;
+      users.ldap.daemon = {
+        enable = useDaemon;
+        rootpwmoddn = "cn=admin,${dbSuffix}";
+        rootpwmodpwFile = "/etc/nslcd.rootpwmodpw";
+      };
+      users.ldap.loginPam = true;
+      users.ldap.nsswitch = true;
+      users.ldap.server = "ldap://server";
+      users.ldap.base = "ou=posix,${dbSuffix}";
+      users.ldap.bind = {
+        distinguishedName = "cn=admin,${dbSuffix}";
+        passwordFile = "/etc/ldap/bind.password";
+      };
+      # NOTE: passwords stored in clear in Nix's store, but this is a test.
+      environment.etc."ldap/bind.password".source = pkgs.writeText "password" dbAdminPwd;
+      environment.etc."nslcd.rootpwmodpw".source = pkgs.writeText "rootpwmodpw" dbAdminPwd;
+    };
+in
+
+{
+  name = "ldap";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ montag451 ];
+  };
+
+  nodes = {
+
+    server =
+      { pkgs, config, ... }:
+      let
+        inherit (config.services) openldap;
+
+        slapdConfig = pkgs.writeText "cn=config.ldif" (''
+          dn: cn=config
+          objectClass: olcGlobal
+          #olcPidFile: /run/slapd/slapd.pid
+          # List of arguments that were passed to the server
+          #olcArgsFile: /run/slapd/slapd.args
+          # Read slapd-config(5) for possible values
+          olcLogLevel: none
+          # The tool-threads parameter sets the actual amount of CPU's
+          # that is used for indexing.
+          olcToolThreads: 1
+
+          dn: olcDatabase={-1}frontend,cn=config
+          objectClass: olcDatabaseConfig
+          objectClass: olcFrontendConfig
+          # The maximum number of entries that is returned for a search operation
+          olcSizeLimit: 500
+          # Allow unlimited access to local connection from the local root user
+          olcAccess: to *
+            by dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth manage
+            by * break
+          # Allow unauthenticated read access for schema and base DN autodiscovery
+          olcAccess: to dn.exact=""
+            by * read
+          olcAccess: to dn.base="cn=Subschema"
+            by * read
+
+          dn: olcDatabase=config,cn=config
+          objectClass: olcDatabaseConfig
+          olcRootDN: cn=admin,cn=config
+          #olcRootPW:
+          # NOTE: access to cn=config, system root can be manager
+          # with SASL mechanism (-Y EXTERNAL) over unix socket (-H ldapi://)
+          olcAccess: to *
+            by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" manage
+            by * break
+
+          dn: cn=schema,cn=config
+          objectClass: olcSchemaConfig
+
+          include: file://${pkgs.openldap}/etc/schema/core.ldif
+          include: file://${pkgs.openldap}/etc/schema/cosine.ldif
+          include: file://${pkgs.openldap}/etc/schema/nis.ldif
+          include: file://${pkgs.openldap}/etc/schema/inetorgperson.ldif
+
+          dn: cn=module{0},cn=config
+          objectClass: olcModuleList
+          # Where the dynamically loaded modules are stored
+          #olcModulePath: /usr/lib/ldap
+          olcModuleLoad: back_mdb
+
+          ''
+          + unlinesAttrs (olcSuffix: {conf, ...}:
+              "include: file://" + pkgs.writeText "config.ldif" conf
+            ) slapdDatabases
+          );
+
+        slapdDatabases = {
+          ${dbSuffix} = {
+            conf = ''
+              dn: olcBackend={1}mdb,cn=config
+              objectClass: olcBackendConfig
+
+              dn: olcDatabase={1}mdb,cn=config
+              olcSuffix: ${dbSuffix}
+              olcDbDirectory: ${openldap.dataDir}/${dbSuffix}
+              objectClass: olcDatabaseConfig
+              objectClass: olcMdbConfig
+              # NOTE: checkpoint the database periodically in case of system failure
+              # and to speed up slapd shutdown.
+              olcDbCheckpoint: 512 30
+              # Database max size is 1G
+              olcDbMaxSize: 1073741824
+              olcLastMod: TRUE
+              # NOTE: database superuser. Needed for syncrepl,
+              # and used to auth as admin through a TCP connection.
+              olcRootDN: cn=admin,${dbSuffix}
+              olcRootPW: ${dbAdminPwdHash}
+              #
+              olcDbIndex: objectClass eq
+              olcDbIndex: cn,uid eq
+              olcDbIndex: uidNumber,gidNumber eq
+              olcDbIndex: member,memberUid eq
+              #
+              olcAccess: to attrs=userPassword
+                by self write
+                by anonymous auth
+                by dn="cn=admin,${dbSuffix}" write
+                by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
+                by * none
+              olcAccess: to attrs=shadowLastChange
+                by self write
+                by dn="cn=admin,${dbSuffix}" write
+                by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
+                by * none
+              olcAccess: to dn.sub="ou=posix,${dbSuffix}"
+                by self read
+                by dn="cn=admin,${dbSuffix}" read
+                by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" read
+              olcAccess: to *
+                by self read
+                by * none
+            '';
+            data = ''
+              dn: ${dbSuffix}
+              objectClass: top
+              objectClass: dcObject
+              objectClass: organization
+              o: ${dbDomain}
+
+              dn: cn=admin,${dbSuffix}
+              objectClass: simpleSecurityObject
+              objectClass: organizationalRole
+              description: ${dbDomain} LDAP administrator
+              roleOccupant: ${dbSuffix}
+              userPassword: ${ldapUserPwdHash}
+
+              dn: ou=posix,${dbSuffix}
+              objectClass: top
+              objectClass: organizationalUnit
+
+              dn: ou=accounts,ou=posix,${dbSuffix}
+              objectClass: top
+              objectClass: organizationalUnit
+
+              dn: ou=groups,ou=posix,${dbSuffix}
+              objectClass: top
+              objectClass: organizationalUnit
+            ''
+            + lib.concatMapStrings posixAccount [
+              { uid=ldapUser; uidNumber=ldapUserId; gidNumber=ldapGroupId; userPassword=ldapUserPwdHash; }
+            ]
+            + lib.concatMapStrings posixGroup [
+              { gid=ldapGroup; gidNumber=ldapGroupId; members=[]; }
+            ];
+          };
+        };
+
+        # NOTE: create a user account using the posixAccount objectClass.
+        posixAccount =
+          { uid
+          , uidNumber ? null
+          , gidNumber ? null
+          , cn ? ""
+          , sn ? ""
+          , userPassword ? ""
+          , loginShell ? "/bin/sh"
+          }: ''
+
+            dn: uid=${uid},ou=accounts,ou=posix,${dbSuffix}
+            objectClass: person
+            objectClass: posixAccount
+            objectClass: shadowAccount
+            cn: ${cn}
+            gecos:
+            ${if gidNumber == null then "#" else "gidNumber: ${toString gidNumber}"}
+            homeDirectory: /home/${uid}
+            loginShell: ${loginShell}
+            sn: ${sn}
+            ${if uidNumber == null then "#" else "uidNumber: ${toString uidNumber}"}
+            ${if userPassword == "" then "#" else "userPassword: ${userPassword}"}
+          '';
+
+        # NOTE: create a group using the posixGroup objectClass.
+        posixGroup =
+          { gid
+          , gidNumber
+          , members
+          }: ''
+
+            dn: cn=${gid},ou=groups,ou=posix,${dbSuffix}
+            objectClass: top
+            objectClass: posixGroup
+            gidNumber: ${toString gidNumber}
+            ${lib.concatMapStrings (member: "memberUid: ${member}\n") members}
+          '';
+      in
+      {
+        virtualisation.memorySize = 256;
+        virtualisation.vlans = [ 1 ];
+        networking.firewall.allowedTCPPorts = [ 389 ];
+        services.openldap.enable = true;
+        services.openldap.dataDir = "/var/db/openldap";
+        services.openldap.configDir = "/var/db/slapd";
+        services.openldap.urlList = [
+          "ldap:///"
+          "ldapi:///"
+        ];
+        systemd.services.openldap = {
+          preStart = ''
+              set -e
+              # NOTE: slapd's config is always re-initialized.
+              rm -rf "${openldap.configDir}"/cn=config \
+                     "${openldap.configDir}"/cn=config.ldif
+              install -D -d -m 0700 -o "${openldap.user}" -g "${openldap.group}" "${openldap.configDir}"
+              # NOTE: olcDbDirectory must be created before adding the config.
+              '' +
+              unlinesAttrs (olcSuffix: {data, ...}: ''
+                # NOTE: database is always re-initialized.
+                rm -rf "${openldap.dataDir}/${olcSuffix}"
+                install -D -d -m 0700 -o "${openldap.user}" -g "${openldap.group}" \
+                 "${openldap.dataDir}/${olcSuffix}"
+                '') slapdDatabases
+              + ''
+              # NOTE: slapd is supposed to be stopped while in preStart,
+              #       hence slap* commands can safely be used.
+              umask 0077
+              ${pkgs.openldap}/bin/slapadd -n 0 \
+               -F "${openldap.configDir}" \
+               -l ${slapdConfig}
+              chown -R "${openldap.user}:${openldap.group}" "${openldap.configDir}"
+              # NOTE: slapadd(8): To populate the config database slapd-config(5),
+              #                   use -n 0 as it is always the first database.
+              #                   It must physically exist on the filesystem prior to this, however.
+            '' +
+            unlinesAttrs (olcSuffix: {data, ...}: ''
+              # NOTE: load database ${olcSuffix}
+              # (as root to avoid depending on sudo or chpst)
+              ${pkgs.openldap}/bin/slapadd \
+               -F "${openldap.configDir}" \
+               -l ${pkgs.writeText "data.ldif" data}
+              '' + ''
+              # NOTE: redundant with default openldap's preStart, but do not harm.
+              chown -R "${openldap.user}:${openldap.group}" \
+               "${openldap.dataDir}/${olcSuffix}"
+            '') slapdDatabases;
+        };
+      };
+
+    client1 = mkClient true; # use nss_pam_ldapd
+    client2 = mkClient false; # use nss_ldap and pam_ldap
+  };
+
+  testScript = ''
+    def expect_script(*commands):
+        script = ";".join(commands)
+        return f"${pkgs.expect}/bin/expect -c '{script}'"
+
+
+    server.start()
+    server.wait_for_unit("default.target")
+
+    with subtest("slapd: auth as database admin with SASL and check a POSIX account"):
+        server.succeed(
+            'test "$(ldapsearch -LLL -H ldapi:// -Y EXTERNAL '
+            + "-b 'uid=${ldapUser},ou=accounts,ou=posix,${dbSuffix}' "
+            + "-s base uidNumber | "
+            + "sed -ne 's/^uidNumber: \\(.*\\)/\\1/p')\" -eq ${toString ldapUserId}"
+        )
+
+    with subtest("slapd: auth as database admin with password and check a POSIX account"):
+        server.succeed(
+            "test \"$(ldapsearch -LLL -H ldap://server -D 'cn=admin,${dbSuffix}' "
+            + "-w '${dbAdminPwd}' -b 'uid=${ldapUser},ou=accounts,ou=posix,${dbSuffix}' "
+            + "-s base uidNumber | "
+            + "sed -ne 's/^uidNumber: \\(.*\\)/\\1/p')\" -eq ${toString ldapUserId}"
+        )
+
+    client1.start()
+    client1.wait_for_unit("default.target")
+
+    with subtest("password: su with password to a POSIX account"):
+        client1.succeed(
+            expect_script(
+                'spawn su "${ldapUser}"',
+                'expect "Password:"',
+                'send "${ldapUserPwd}\n"',
+                'expect "*"',
+                'send "whoami\n"',
+                'expect -ex "${ldapUser}" {exit}',
+                "exit 1",
+            )
+        )
+
+    with subtest("password: change password of a POSIX account as root"):
+        client1.succeed(
+            "chpasswd <<<'${ldapUser}:new-password'",
+            expect_script(
+                'spawn su "${ldapUser}"',
+                'expect "Password:"',
+                'send "new-password\n"',
+                'expect "*"',
+                'send "whoami\n"',
+                'expect -ex "${ldapUser}" {exit}',
+                "exit 1",
+            ),
+            "chpasswd <<<'${ldapUser}:${ldapUserPwd}'",
+        )
+
+    with subtest("password: change password of a POSIX account from itself"):
+        client1.succeed(
+            "chpasswd <<<'${ldapUser}:${ldapUserPwd}' ",
+            expect_script(
+                "spawn su --login ${ldapUser} -c passwd",
+                'expect "Password: "',
+                'send "${ldapUserPwd}\n"',
+                'expect "(current) UNIX password: "',
+                'send "${ldapUserPwd}\n"',
+                'expect "New password: "',
+                'send "new-password\n"',
+                'expect "Retype new password: "',
+                'send "new-password\n"',
+                'expect "passwd: password updated successfully" {exit}',
+                "exit 1",
+            ),
+            expect_script(
+                'spawn su "${ldapUser}"',
+                'expect "Password:"',
+                'send "${ldapUserPwd}\n"',
+                'expect "su: Authentication failure" {exit}',
+                "exit 1",
+            ),
+            expect_script(
+                'spawn su "${ldapUser}"',
+                'expect "Password:"',
+                'send "new-password\n"',
+                'expect "*"',
+                'send "whoami\n"',
+                'expect -ex "${ldapUser}" {exit}',
+                "exit 1",
+            ),
+            "chpasswd <<<'${ldapUser}:${ldapUserPwd}'",
+        )
+
+    client2.start()
+    client2.wait_for_unit("default.target")
+
+    with subtest("NSS"):
+        client1.succeed(
+            "test \"$(id -u    '${ldapUser}')\" -eq ${toString ldapUserId}",
+            "test \"$(id -u -n '${ldapUser}')\" =  '${ldapUser}'",
+            "test \"$(id -g    '${ldapUser}')\" -eq ${toString ldapGroupId}",
+            "test \"$(id -g -n '${ldapUser}')\" =  '${ldapGroup}'",
+            "test \"$(id -u    '${ldapUser}')\" -eq ${toString ldapUserId}",
+            "test \"$(id -u -n '${ldapUser}')\" =  '${ldapUser}'",
+            "test \"$(id -g    '${ldapUser}')\" -eq ${toString ldapGroupId}",
+            "test \"$(id -g -n '${ldapUser}')\" =  '${ldapGroup}'",
+        )
+
+    with subtest("PAM"):
+        client1.succeed(
+            "echo ${ldapUserPwd} | su -l '${ldapUser}' -c true",
+            "echo ${ldapUserPwd} | su -l '${ldapUser}' -c true",
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/leaps.nix b/nixpkgs/nixos/tests/leaps.nix
new file mode 100644
index 000000000000..65b475d734ec
--- /dev/null
+++ b/nixpkgs/nixos/tests/leaps.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ pkgs,  ... }:
+
+{
+  name = "leaps";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ qknight ];
+  };
+
+  nodes =
+    { 
+      client = { };
+
+      server =
+        { services.leaps = {
+            enable = true;
+            port = 6666;
+            path = "/leaps/";
+          };
+          networking.firewall.enable = false;
+        };
+    };
+
+  testScript =
+    ''
+      start_all()
+      server.wait_for_open_port(6666)
+      client.wait_for_unit("network.target")
+      assert "leaps" in client.succeed(
+          "${pkgs.curl}/bin/curl http://server:6666/leaps/"
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/lidarr.nix b/nixpkgs/nixos/tests/lidarr.nix
new file mode 100644
index 000000000000..d3f83e5d9145
--- /dev/null
+++ b/nixpkgs/nixos/tests/lidarr.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "lidarr";
+  meta.maintainers = with maintainers; [ etu ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.lidarr.enable = true; };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("lidarr.service")
+    machine.wait_for_open_port("8686")
+    machine.succeed("curl --fail http://localhost:8686/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lightdm.nix b/nixpkgs/nixos/tests/lightdm.nix
new file mode 100644
index 000000000000..46c2ed7ccc59
--- /dev/null
+++ b/nixpkgs/nixos/tests/lightdm.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "lightdm";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ aszlig worldofpeace ];
+  };
+
+  machine = { ... }: {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.displayManager.lightdm.enable = true;
+    services.xserver.displayManager.defaultSession = "none+icewm";
+    services.xserver.windowManager.icewm.enable = true;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    start_all()
+    machine.wait_for_text("${user.description}")
+    machine.screenshot("lightdm")
+    machine.send_chars("${user.password}\n")
+    machine.wait_for_file("${user.home}/.Xauthority")
+    machine.succeed("xauth merge ${user.home}/.Xauthority")
+    machine.wait_for_window("^IceWM ")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/limesurvey.nix b/nixpkgs/nixos/tests/limesurvey.nix
new file mode 100644
index 000000000000..7228fcb83315
--- /dev/null
+++ b/nixpkgs/nixos/tests/limesurvey.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "limesurvey";
+  meta.maintainers = [ pkgs.stdenv.lib.maintainers.aanderse ];
+
+  machine = { ... }: {
+    services.limesurvey = {
+      enable = true;
+      virtualHost = {
+        hostName = "example.local";
+        adminAddr = "root@example.local";
+      };
+    };
+
+    # limesurvey won't work without a dot in the hostname
+    networking.hosts."127.0.0.1" = [ "example.local" ];
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("phpfpm-limesurvey.service")
+    assert "The following surveys are available" in machine.succeed(
+        "curl http://example.local/"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/login.nix b/nixpkgs/nixos/tests/login.nix
new file mode 100644
index 000000000000..d36c1a91be43
--- /dev/null
+++ b/nixpkgs/nixos/tests/login.nix
@@ -0,0 +1,59 @@
+import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
+
+{
+  name = "login";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  machine =
+    { pkgs, lib, ... }:
+    { boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest;
+      sound.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+    };
+
+  testScript = ''
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+      machine.screenshot("postboot")
+
+      with subtest("create user"):
+          machine.succeed("useradd -m alice")
+          machine.succeed("(echo foobar; echo foobar) | passwd alice")
+
+      with subtest("Check whether switching VTs works"):
+          machine.fail("pgrep -f 'agetty.*tty2'")
+          machine.send_key("alt-f2")
+          machine.wait_until_succeeds("[ $(fgconsole) = 2 ]")
+          machine.wait_for_unit("getty@tty2.service")
+          machine.wait_until_succeeds("pgrep -f 'agetty.*tty2'")
+
+      with subtest("Log in as alice on a virtual console"):
+          machine.wait_until_tty_matches(2, "login: ")
+          machine.send_chars("alice\n")
+          machine.wait_until_tty_matches(2, "login: alice")
+          machine.wait_until_succeeds("pgrep login")
+          machine.wait_until_tty_matches(2, "Password: ")
+          machine.send_chars("foobar\n")
+          machine.wait_until_succeeds("pgrep -u alice bash")
+          machine.send_chars("touch done\n")
+          machine.wait_for_file("/home/alice/done")
+
+      with subtest("Systemd gives and removes device ownership as needed"):
+          machine.succeed("getfacl /dev/snd/timer | grep -q alice")
+          machine.send_key("alt-f1")
+          machine.wait_until_succeeds("[ $(fgconsole) = 1 ]")
+          machine.fail("getfacl /dev/snd/timer | grep -q alice")
+          machine.succeed("chvt 2")
+          machine.wait_until_succeeds("getfacl /dev/snd/timer | grep -q alice")
+
+      with subtest("Virtual console logout"):
+          machine.send_chars("exit\n")
+          machine.wait_until_fails("pgrep -u alice bash")
+          machine.screenshot("mingetty")
+
+      with subtest("Check whether ctrl-alt-delete works"):
+          machine.send_key("ctrl-alt-delete")
+          machine.wait_for_shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/loki.nix b/nixpkgs/nixos/tests/loki.nix
new file mode 100644
index 000000000000..dbf1e8a650f5
--- /dev/null
+++ b/nixpkgs/nixos/tests/loki.nix
@@ -0,0 +1,39 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "loki";
+
+  meta = with lib.maintainers; {
+    maintainers = [ willibutz ];
+  };
+
+  machine = { ... }: {
+    services.loki = {
+      enable = true;
+      configFile = "${pkgs.grafana-loki.src}/cmd/loki/loki-local-config.yaml";
+    };
+    systemd.services.promtail = {
+      description = "Promtail service for Loki test";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.grafana-loki}/bin/promtail --config.file ${pkgs.grafana-loki.src}/cmd/promtail/promtail-local-config.yaml
+        '';
+        DynamicUser = true;
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start
+    machine.wait_for_unit("loki.service")
+    machine.wait_for_unit("promtail.service")
+    machine.wait_for_open_port(3100)
+    machine.wait_for_open_port(9080)
+    machine.succeed("echo 'Loki Ingestion Test' > /var/log/testlog")
+    machine.wait_until_succeeds(
+        "${pkgs.grafana-loki}/bin/logcli --addr='http://localhost:3100' query --no-labels '{job=\"varlogs\",filename=\"/var/log/testlog\"}' | grep -q 'Loki Ingestion Test'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lorri/builder.sh b/nixpkgs/nixos/tests/lorri/builder.sh
new file mode 100644
index 000000000000..b586b2bf7985
--- /dev/null
+++ b/nixpkgs/nixos/tests/lorri/builder.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+printf "%s" "${name:?}" > "${out:?}"
diff --git a/nixpkgs/nixos/tests/lorri/default.nix b/nixpkgs/nixos/tests/lorri/default.nix
new file mode 100644
index 000000000000..198171082d88
--- /dev/null
+++ b/nixpkgs/nixos/tests/lorri/default.nix
@@ -0,0 +1,26 @@
+import ../make-test-python.nix {
+  machine = { pkgs, ... }: {
+    imports = [ ../../modules/profiles/minimal.nix ];
+    environment.systemPackages = [ pkgs.lorri ];
+  };
+
+  testScript = ''
+    # Copy files over
+    machine.succeed(
+        "cp '${./fake-shell.nix}' shell.nix"
+    )
+    machine.succeed(
+        "cp '${./builder.sh}' builder.sh"
+    )
+
+    # Start the daemon and wait until it is ready
+    machine.execute("lorri daemon > lorri.stdout 2> lorri.stderr &")
+    machine.wait_until_succeeds("grep --fixed-strings 'ready' lorri.stdout")
+
+    # Ping the daemon
+    machine.succeed("lorri internal__ping shell.nix")
+
+    # Wait for the daemon to finish the build
+    machine.wait_until_succeeds("grep --fixed-strings 'Completed' lorri.stdout")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/lorri/fake-shell.nix b/nixpkgs/nixos/tests/lorri/fake-shell.nix
new file mode 100644
index 000000000000..9de9d247e542
--- /dev/null
+++ b/nixpkgs/nixos/tests/lorri/fake-shell.nix
@@ -0,0 +1,5 @@
+derivation {
+  system = builtins.currentSystem;
+  name = "fake-shell";
+  builder = ./builder.sh;
+}
diff --git a/nixpkgs/nixos/tests/magic-wormhole-mailbox-server.nix b/nixpkgs/nixos/tests/magic-wormhole-mailbox-server.nix
new file mode 100644
index 000000000000..144a07e13492
--- /dev/null
+++ b/nixpkgs/nixos/tests/magic-wormhole-mailbox-server.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "magic-wormhole-mailbox-server";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mmahut ];
+  };
+
+  nodes = {
+    server = { ... }: {
+      networking.firewall.allowedTCPPorts = [ 4000 ];
+      services.magic-wormhole-mailbox-server.enable = true;
+    };
+
+    client_alice = { ... }: {
+      networking.firewall.enable = false;
+      environment.systemPackages = [ pkgs.magic-wormhole ];
+    };
+
+    client_bob = { ... }: {
+      environment.systemPackages = [ pkgs.magic-wormhole ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    # Start the wormhole relay server
+    server.wait_for_unit("magic-wormhole-mailbox-server.service")
+    server.wait_for_open_port(4000)
+
+    # Create a secret file and send it to Bob
+    client_alice.succeed("echo mysecret > secretfile")
+    client_alice.succeed("wormhole --relay-url=ws://server:4000/v1 send -0 secretfile &")
+
+    # Retrieve a secret file from Alice and check its content
+    client_bob.succeed("wormhole --relay-url=ws://server:4000/v1 receive -0 --accept-file")
+    client_bob.succeed("grep mysecret secretfile")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/magnetico.nix b/nixpkgs/nixos/tests/magnetico.nix
new file mode 100644
index 000000000000..6770d32358e8
--- /dev/null
+++ b/nixpkgs/nixos/tests/magnetico.nix
@@ -0,0 +1,40 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  port = 8081;
+in
+{
+  name = "magnetico";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    networking.firewall.allowedTCPPorts = [ 9000 ];
+
+    services.magnetico = {
+      enable = true;
+      crawler.port = 9000;
+      web.port = port;
+      web.credentials.user = "$2y$12$P88ZF6soFthiiAeXnz64aOWDsY3Dw7Yw8fZ6GtiqFNjknD70zDmNe";
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("magneticod")
+      machine.wait_for_unit("magneticow")
+      machine.succeed(
+          "${pkgs.curl}/bin/curl "
+          + "-u user:password http://localhost:${toString port}"
+      )
+      assert "Unauthorised." in machine.succeed(
+          "${pkgs.curl}/bin/curl "
+          + "-u user:wrongpwd http://localhost:${toString port}"
+      )
+      machine.shutdown()
+    '';
+})
diff --git a/nixpkgs/nixos/tests/mailcatcher.nix b/nixpkgs/nixos/tests/mailcatcher.nix
new file mode 100644
index 000000000000..2ef38544fe0a
--- /dev/null
+++ b/nixpkgs/nixos/tests/mailcatcher.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "mailcatcher";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  machine =
+    { pkgs, ... }:
+    {
+      services.mailcatcher.enable = true;
+
+      services.ssmtp.enable = true;
+      services.ssmtp.hostName = "localhost:1025";
+
+      environment.systemPackages = [ pkgs.mailutils ];
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("mailcatcher.service")
+    machine.wait_for_open_port("1025")
+    machine.succeed(
+        'echo "this is the body of the email" | mail -s "subject" root@example.org'
+    )
+    assert "this is the body of the email" in machine.succeed(
+        "curl http://localhost:1080/messages/1.source"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/make-test-python.nix b/nixpkgs/nixos/tests/make-test-python.nix
new file mode 100644
index 000000000000..89897fe7e61b
--- /dev/null
+++ b/nixpkgs/nixos/tests/make-test-python.nix
@@ -0,0 +1,9 @@
+f: {
+  system ? builtins.currentSystem,
+  pkgs ? import ../.. { inherit system; config = {}; },
+  ...
+} @ args:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+makeTest (if pkgs.lib.isFunction f then f (args // { inherit pkgs; inherit (pkgs) lib; }) else f)
diff --git a/nixpkgs/nixos/tests/make-test.nix b/nixpkgs/nixos/tests/make-test.nix
new file mode 100644
index 000000000000..cee5da93454a
--- /dev/null
+++ b/nixpkgs/nixos/tests/make-test.nix
@@ -0,0 +1,9 @@
+f: {
+  system ? builtins.currentSystem,
+  pkgs ? import ../.. { inherit system; config = {}; },
+  ...
+} @ args:
+
+with import ../lib/testing.nix { inherit system pkgs; };
+
+makeTest (if pkgs.lib.isFunction f then f (args // { inherit pkgs; inherit (pkgs) lib; }) else f)
diff --git a/nixpkgs/nixos/tests/mathics.nix b/nixpkgs/nixos/tests/mathics.nix
new file mode 100644
index 000000000000..fcbeeb18a727
--- /dev/null
+++ b/nixpkgs/nixos/tests/mathics.nix
@@ -0,0 +1,20 @@
+import ./make-test.nix ({ pkgs, ... }: {
+  name = "mathics";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ benley ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.mathics.enable = true;
+      services.mathics.port = 8888;
+    };
+  };
+
+  testScript = ''
+    startAll;
+    $machine->waitForUnit("mathics.service");
+    $machine->waitForOpenPort(8888);
+    $machine->succeed("curl http://localhost:8888/");
+  '';
+})
diff --git a/nixpkgs/nixos/tests/matomo.nix b/nixpkgs/nixos/tests/matomo.nix
new file mode 100644
index 000000000000..2bea237c8bdd
--- /dev/null
+++ b/nixpkgs/nixos/tests/matomo.nix
@@ -0,0 +1,48 @@
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../.. { inherit system config; } }:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  matomoTest = package:
+  makeTest {
+    machine = { config, pkgs, ... }: {
+      services.matomo = {
+        package = package;
+        enable = true;
+        nginx = {
+          forceSSL = false;
+          enableACME = false;
+        };
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mysql;
+      };
+      services.nginx.enable = true;
+    };
+
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("mysql.service")
+      machine.wait_for_unit("phpfpm-matomo.service")
+      machine.wait_for_unit("nginx.service")
+
+      # without the grep the command does not produce valid utf-8 for some reason
+      with subtest("welcome screen loads"):
+          machine.succeed(
+              "curl -sSfL http://localhost/ | grep '<title>Matomo[^<]*Installation'"
+          )
+    '';
+  };
+in {
+  matomo = matomoTest pkgs.matomo // {
+    name = "matomo";
+    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata ];
+  };
+  matomo-beta = matomoTest pkgs.matomo-beta // {
+    name = "matomo-beta";
+    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata ];
+  };
+}
diff --git a/nixpkgs/nixos/tests/matrix-synapse.nix b/nixpkgs/nixos/tests/matrix-synapse.nix
new file mode 100644
index 000000000000..f3623aa3c094
--- /dev/null
+++ b/nixpkgs/nixos/tests/matrix-synapse.nix
@@ -0,0 +1,90 @@
+import ./make-test-python.nix ({ pkgs, ... } : let
+
+
+  runWithOpenSSL = file: cmd: pkgs.runCommand file {
+    buildInputs = [ pkgs.openssl ];
+  } cmd;
+
+
+  ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
+  ca_pem = runWithOpenSSL "ca.pem" ''
+    openssl req \
+      -x509 -new -nodes -key ${ca_key} \
+      -days 10000 -out $out -subj "/CN=snakeoil-ca"
+  '';
+  key = runWithOpenSSL "matrix_key.pem" "openssl genrsa -out $out 2048";
+  csr = runWithOpenSSL "matrix.csr" ''
+    openssl req \
+       -new -key ${key} \
+       -out $out -subj "/CN=localhost" \
+  '';
+  cert = runWithOpenSSL "matrix_cert.pem" ''
+    openssl x509 \
+      -req -in ${csr} \
+      -CA ${ca_pem} -CAkey ${ca_key} \
+      -CAcreateserial -out $out \
+      -days 365
+  '';
+
+in {
+
+  name = "matrix-synapse";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ corngood ];
+  };
+
+  nodes = {
+    # Since 0.33.0, matrix-synapse doesn't allow underscores in server names
+    serverpostgres = { pkgs, ... }: {
+      services.matrix-synapse = {
+        enable = true;
+        database_type = "psycopg2";
+        tls_certificate_path = "${cert}";
+        tls_private_key_path = "${key}";
+        database_args = {
+          password = "synapse";
+        };
+      };
+      services.postgresql = {
+        enable = true;
+
+        # The database name and user are configured by the following options:
+        #   - services.matrix-synapse.database_name
+        #   - services.matrix-synapse.database_user
+        #
+        # The values used here represent the default values of the module.
+        initialScript = pkgs.writeText "synapse-init.sql" ''
+          CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
+          CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
+            TEMPLATE template0
+            LC_COLLATE = "C"
+            LC_CTYPE = "C";
+        '';
+      };
+    };
+
+    serversqlite = args: {
+      services.matrix-synapse = {
+        enable = true;
+        database_type = "sqlite3";
+        tls_certificate_path = "${cert}";
+        tls_private_key_path = "${key}";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    serverpostgres.wait_for_unit("matrix-synapse.service")
+    serverpostgres.wait_until_succeeds(
+        "curl -L --cacert ${ca_pem} https://localhost:8448/"
+    )
+    serverpostgres.require_unit_state("postgresql.service")
+    serversqlite.wait_for_unit("matrix-synapse.service")
+    serversqlite.wait_until_succeeds(
+        "curl -L --cacert ${ca_pem} https://localhost:8448/"
+    )
+    serversqlite.succeed("[ -e /var/lib/matrix-synapse/homeserver.db ]")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/mediawiki.nix b/nixpkgs/nixos/tests/mediawiki.nix
new file mode 100644
index 000000000000..9468c1de8ccb
--- /dev/null
+++ b/nixpkgs/nixos/tests/mediawiki.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "mediawiki";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  machine =
+    { ... }:
+    { services.mediawiki.enable = true;
+      services.mediawiki.virtualHost.hostName = "localhost";
+      services.mediawiki.virtualHost.adminAddr = "root@example.com";
+      services.mediawiki.passwordFile = pkgs.writeText "password" "correcthorsebatterystaple";
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("phpfpm-mediawiki.service")
+
+    page = machine.succeed("curl -L http://localhost/")
+    assert "MediaWiki has been installed" in page
+  '';
+})
diff --git a/nixpkgs/nixos/tests/memcached.nix b/nixpkgs/nixos/tests/memcached.nix
new file mode 100644
index 000000000000..31f5627d25ce
--- /dev/null
+++ b/nixpkgs/nixos/tests/memcached.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "memcached";
+
+  machine = {
+    imports = [ ../modules/profiles/minimal.nix ];
+    services.memcached.enable = true;
+  };
+
+  testScript = let
+    testScript = pkgs.writers.writePython3 "test_memcache" {
+      libraries = with pkgs.python3Packages; [ memcached ];
+    } ''
+      import memcache
+      c = memcache.Client(['localhost:11211'])
+      c.set('key', 'value')
+      assert 'value' == c.get('key')
+    '';
+  in ''
+    machine.start()
+    machine.wait_for_unit("memcached.service")
+    machine.wait_for_open_port(11211)
+    machine.succeed("${testScript}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mesos.nix b/nixpkgs/nixos/tests/mesos.nix
new file mode 100644
index 000000000000..2e6dc0eda063
--- /dev/null
+++ b/nixpkgs/nixos/tests/mesos.nix
@@ -0,0 +1,92 @@
+import ./make-test.nix ({ pkgs, ...} : rec {
+  name = "mesos";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ offline kamilchm cstrahan ];
+  };
+
+  nodes = {
+    master = { ... }: {
+      networking.firewall.enable = false;
+      services.zookeeper.enable = true;
+      services.mesos.master = {
+          enable = true;
+          zk = "zk://master:2181/mesos";
+      };
+    };
+
+    slave = { ... }: {
+      networking.firewall.enable = false;
+      networking.nat.enable = true;
+      virtualisation.docker.enable = true;
+      services.mesos = {
+        slave = {
+          enable = true;
+          master = "master:5050";
+          dockerRegistry = registry;
+          executorEnvironmentVariables = {
+            PATH = "/run/current-system/sw/bin";
+          };
+        };
+      };
+    };
+  };
+
+  simpleDocker = pkgs.dockerTools.buildImage {
+    name = "echo";
+    tag = "latest";
+    contents = [ pkgs.stdenv.shellPackage pkgs.coreutils ];
+    config = {
+      Env = [
+        # When shell=true, mesos invokes "sh -c '<cmd>'", so make sure "sh" is
+        # on the PATH.
+        "PATH=${pkgs.stdenv.shellPackage}/bin:${pkgs.coreutils}/bin"
+      ];
+      Entrypoint = [ "echo" ];
+    };
+  };
+
+  registry = pkgs.runCommand "registry" { } ''
+    mkdir -p $out
+    cp ${simpleDocker} $out/echo:latest.tar
+  '';
+
+  testFramework = pkgs.pythonPackages.buildPythonPackage {
+    name = "mesos-tests";
+    propagatedBuildInputs = [ pkgs.mesos ];
+    catchConflicts = false;
+    src = ./mesos_test.py;
+    phases = [ "installPhase" "fixupPhase" ];
+    installPhase = ''
+      install -Dvm 0755 $src $out/bin/mesos_test.py
+
+      echo "done" > test.result
+      tar czf $out/test.tar.gz test.result
+    '';
+  };
+
+  testScript =
+    ''
+      startAll;
+      $master->waitForUnit("zookeeper.service");
+      $master->waitForUnit("mesos-master.service");
+      $slave->waitForUnit("docker.service");
+      $slave->waitForUnit("mesos-slave.service");
+      $master->waitForOpenPort(2181);
+      $master->waitForOpenPort(5050);
+      $slave->waitForOpenPort(5051);
+
+      # is slave registered?
+      $master->waitUntilSucceeds("curl -s --fail http://master:5050/master/slaves".
+                                 " | grep -q \"\\\"hostname\\\":\\\"slave\\\"\"");
+
+      # try to run docker image
+      $master->succeed("${pkgs.mesos}/bin/mesos-execute --master=master:5050".
+                       " --resources=\"cpus:0.1;mem:32\" --name=simple-docker".
+                       " --containerizer=mesos --docker_image=echo:latest".
+                       " --shell=true --command=\"echo done\" | grep -q TASK_FINISHED");
+
+      # simple command with .tar.gz uri
+      $master->succeed("${testFramework}/bin/mesos_test.py master ".
+                       "${testFramework}/test.tar.gz");
+    '';
+})
diff --git a/nixpkgs/nixos/tests/mesos_test.py b/nixpkgs/nixos/tests/mesos_test.py
new file mode 100644
index 000000000000..be8bb32e49a7
--- /dev/null
+++ b/nixpkgs/nixos/tests/mesos_test.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+import uuid
+import time
+import subprocess
+import os
+
+import sys
+
+from mesos.interface import Scheduler
+from mesos.native import MesosSchedulerDriver
+from mesos.interface import mesos_pb2
+
+def log(msg):
+    process = subprocess.Popen("systemd-cat", stdin=subprocess.PIPE)
+    (out,err) = process.communicate(msg)
+
+class NixosTestScheduler(Scheduler):
+    def __init__(self):
+        self.master_ip = sys.argv[1]
+        self.download_uri = sys.argv[2]
+
+    def resourceOffers(self, driver, offers):
+        log("XXX got resource offer")
+
+        offer = offers[0]
+        task = self.new_task(offer)
+        uri = task.command.uris.add()
+        uri.value = self.download_uri
+        task.command.value = "cat test.result"
+        driver.launchTasks(offer.id, [task])
+
+    def statusUpdate(self, driver, update):
+        log("XXX status update")
+        if update.state == mesos_pb2.TASK_FAILED:
+            log("XXX test task failed with message: " + update.message)
+            driver.stop()
+            sys.exit(1)
+        elif update.state == mesos_pb2.TASK_FINISHED:
+            driver.stop()
+            sys.exit(0)
+
+    def new_task(self, offer):
+        task = mesos_pb2.TaskInfo()
+        id = uuid.uuid4()
+        task.task_id.value = str(id)
+        task.slave_id.value = offer.slave_id.value
+        task.name = "task {}".format(str(id))
+
+        cpus = task.resources.add()
+        cpus.name = "cpus"
+        cpus.type = mesos_pb2.Value.SCALAR
+        cpus.scalar.value = 0.1
+
+        mem = task.resources.add()
+        mem.name = "mem"
+        mem.type = mesos_pb2.Value.SCALAR
+        mem.scalar.value = 32
+
+        return task
+
+if __name__ == '__main__':
+    log("XXX framework started")
+
+    framework = mesos_pb2.FrameworkInfo()
+    framework.user = "root"
+    framework.name = "nixos-test-framework"
+    driver = MesosSchedulerDriver(
+        NixosTestScheduler(),
+        framework,
+        sys.argv[1] + ":5050"
+    )
+    driver.run()
diff --git a/nixpkgs/nixos/tests/metabase.nix b/nixpkgs/nixos/tests/metabase.nix
new file mode 100644
index 000000000000..1450a4e9086f
--- /dev/null
+++ b/nixpkgs/nixos/tests/metabase.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "metabase";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mmahut ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.metabase.enable = true;
+      virtualisation.memorySize = 1024;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("metabase.service")
+    machine.wait_for_open_port(3000)
+    machine.wait_until_succeeds("curl -L http://localhost:3000/setup | grep Metabase")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/minidlna.nix b/nixpkgs/nixos/tests/minidlna.nix
new file mode 100644
index 000000000000..d852c7f60bc4
--- /dev/null
+++ b/nixpkgs/nixos/tests/minidlna.nix
@@ -0,0 +1,39 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "minidlna";
+
+  nodes = {
+    server =
+      { ... }:
+      {
+        imports = [ ../modules/profiles/minimal.nix ];
+        networking.firewall.allowedTCPPorts = [ 8200 ];
+        services.minidlna = {
+          enable = true;
+          loglevel = "error";
+          mediaDirs = [
+           "PV,/tmp/stuff"
+          ];
+          friendlyName = "rpi3";
+          rootContainer = "B";
+          extraConfig =
+          ''
+            album_art_names=Cover.jpg/cover.jpg/AlbumArtSmall.jpg/albumartsmall.jpg
+            album_art_names=AlbumArt.jpg/albumart.jpg/Album.jpg/album.jpg
+            album_art_names=Folder.jpg/folder.jpg/Thumb.jpg/thumb.jpg
+            notify_interval=60
+          '';
+        };
+      };
+      client = { ... }: { };
+  };
+
+  testScript =
+  ''
+    start_all()
+    server.succeed("mkdir -p /tmp/stuff && chown minidlna: /tmp/stuff")
+    server.wait_for_unit("minidlna")
+    server.wait_for_open_port("8200")
+    server.succeed("curl --fail http://localhost:8200/")
+    client.succeed("curl --fail http://server:8200/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/miniflux.nix b/nixpkgs/nixos/tests/miniflux.nix
new file mode 100644
index 000000000000..7d83d061a9df
--- /dev/null
+++ b/nixpkgs/nixos/tests/miniflux.nix
@@ -0,0 +1,56 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  port = 3142;
+  username = "alice";
+  password = "correcthorsebatterystaple";
+  defaultPort = 8080;
+  defaultUsername = "admin";
+  defaultPassword = "password";
+in
+with lib;
+{
+  name = "miniflux";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ bricewge ];
+
+  nodes = {
+    default =
+      { ... }:
+      {
+        services.miniflux.enable = true;
+      };
+
+    customized =
+      { ... }:
+      {
+        services.miniflux = {
+          enable = true;
+          config = {
+            CLEANUP_FREQUENCY = "48";
+            LISTEN_ADDR = "localhost:${toString port}";
+          };
+          adminCredentialsFile = pkgs.writeText "admin-credentials" ''
+            ADMIN_USERNAME=${username}
+            ADMIN_PASSWORD=${password}
+          '';
+        };
+      };
+  };
+  testScript = ''
+    start_all()
+
+    default.wait_for_unit("miniflux.service")
+    default.wait_for_open_port(${toString defaultPort})
+    default.succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep -q OK")
+    default.succeed(
+        "curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep -q '\"is_admin\":true'"
+    )
+
+    customized.wait_for_unit("miniflux.service")
+    customized.wait_for_open_port(${toString port})
+    customized.succeed("curl --fail 'http://localhost:${toString port}/healthcheck' | grep -q OK")
+    customized.succeed(
+        "curl 'http://localhost:${toString port}/v1/me' -u '${username}:${password}' -H Content-Type:application/json | grep -q '\"is_admin\":true'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/minio.nix b/nixpkgs/nixos/tests/minio.nix
new file mode 100644
index 000000000000..3b0619742671
--- /dev/null
+++ b/nixpkgs/nixos/tests/minio.nix
@@ -0,0 +1,55 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+let
+    accessKey = "BKIKJAA5BMMU2RHO6IBB";
+    secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
+    minioPythonScript = pkgs.writeScript "minio-test.py" ''
+      #! ${pkgs.python3.withPackages(ps: [ ps.minio ])}/bin/python
+      import io
+      import os
+      from minio import Minio
+      minioClient = Minio('localhost:9000',
+                    access_key='${accessKey}',
+                    secret_key='${secretKey}',
+                    secure=False)
+      sio = io.BytesIO()
+      sio.write(b'Test from Python')
+      sio.seek(0, os.SEEK_END)
+      sio_len = sio.tell()
+      sio.seek(0)
+      minioClient.put_object('test-bucket', 'test.txt', sio, sio_len, content_type='text/plain')
+    '';
+in {
+  name = "minio";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ bachp ];
+  };
+
+  nodes = {
+    machine = { pkgs, ... }: {
+      services.minio = {
+        enable = true;
+        inherit accessKey secretKey;
+      };
+      environment.systemPackages = [ pkgs.minio-client ];
+
+      # Minio requires at least 1GiB of free disk space to run.
+      virtualisation.diskSize = 4 * 1024;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("minio.service")
+    machine.wait_for_open_port(9000)
+
+    # Create a test bucket on the server
+    machine.succeed(
+        "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} S3v4"
+    )
+    machine.succeed("mc mb minio/test-bucket")
+    machine.succeed("${minioPythonScript}")
+    assert "test-bucket" in machine.succeed("mc ls minio")
+    assert "Test from Python" in machine.succeed("mc cat minio/test-bucket/test.txt")
+    machine.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/misc.nix b/nixpkgs/nixos/tests/misc.nix
new file mode 100644
index 000000000000..17260ce64067
--- /dev/null
+++ b/nixpkgs/nixos/tests/misc.nix
@@ -0,0 +1,130 @@
+# Miscellaneous small tests that don't warrant their own VM run.
+
+import ./make-test-python.nix ({ pkgs, ...} : rec {
+  name = "misc";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  foo = pkgs.writeText "foo" "Hello World";
+
+  machine =
+    { lib, ... }:
+    with lib;
+    { swapDevices = mkOverride 0
+        [ { device = "/root/swapfile"; size = 128; } ];
+      environment.variables.EDITOR = mkOverride 0 "emacs";
+      documentation.nixos.enable = mkOverride 0 true;
+      systemd.tmpfiles.rules = [ "d /tmp 1777 root root 10d" ];
+      fileSystems = mkVMOverride { "/tmp2" =
+        { fsType = "tmpfs";
+          options = [ "mode=1777" "noauto" ];
+        };
+      };
+      systemd.automounts = singleton
+        { wantedBy = [ "multi-user.target" ];
+          where = "/tmp2";
+        };
+      users.users.sybil = { isNormalUser = true; group = "wheel"; };
+      security.sudo = { enable = true; wheelNeedsPassword = false; };
+      boot.kernel.sysctl."vm.swappiness" = 1;
+      boot.kernelParams = [ "vsyscall=emulate" ];
+      system.extraDependencies = [ foo ];
+    };
+
+  testScript =
+    ''
+      import json
+
+
+      def get_path_info(path):
+          result = machine.succeed(f"nix path-info --json {path}")
+          parsed = json.loads(result)
+          return parsed
+
+
+      with subtest("nix-db"):
+          info = get_path_info("${foo}")
+
+          if (
+              info[0]["narHash"]
+              != "sha256:0afw0d9j1hvwiz066z93jiddc33nxg6i6qyp26vnqyglpyfivlq5"
+          ):
+              raise Exception("narHash not set")
+
+          if info[0]["narSize"] != 128:
+              raise Exception("narSize not set")
+
+      with subtest("nixos-version"):
+          machine.succeed("[ `nixos-version | wc -w` = 2 ]")
+
+      with subtest("nixos-rebuild"):
+          assert "NixOS module" in machine.succeed("nixos-rebuild --help")
+
+      with subtest("Sanity check for uid/gid assignment"):
+          assert "4" == machine.succeed("id -u messagebus").strip()
+          assert "4" == machine.succeed("id -g messagebus").strip()
+          assert "users:x:100:" == machine.succeed("getent group users").strip()
+
+      with subtest("Regression test for GMP aborts on QEMU."):
+          machine.succeed("expr 1 + 2")
+
+      with subtest("the swap file got created"):
+          machine.wait_for_unit("root-swapfile.swap")
+          machine.succeed("ls -l /root/swapfile | grep 134217728")
+
+      with subtest("whether kernel.poweroff_cmd is set"):
+          machine.succeed('[ -x "$(cat /proc/sys/kernel/poweroff_cmd)" ]')
+
+      with subtest("whether the blkio controller is properly enabled"):
+          machine.succeed("[ -e /sys/fs/cgroup/blkio/blkio.reset_stats ]")
+
+      with subtest("whether we have a reboot record in wtmp"):
+          machine.shutdown
+          machine.wait_for_unit("multi-user.target")
+          machine.succeed("last | grep reboot >&2")
+
+      with subtest("whether we can override environment variables"):
+          machine.succeed('[ "$EDITOR" = emacs ]')
+
+      with subtest("whether hostname (and by extension nss_myhostname) works"):
+          assert "machine" == machine.succeed("hostname").strip()
+          assert "machine" == machine.succeed("hostname -s").strip()
+
+      with subtest("whether systemd-udevd automatically loads modules for our hardware"):
+          machine.succeed("systemctl start systemd-udev-settle.service")
+          machine.wait_for_unit("systemd-udev-settle.service")
+          assert "mousedev" in machine.succeed("lsmod")
+
+      with subtest("whether systemd-tmpfiles-clean works"):
+          machine.succeed(
+              "touch /tmp/foo", "systemctl start systemd-tmpfiles-clean", "[ -e /tmp/foo ]"
+          )
+          # move into the future
+          machine.succeed(
+              'date -s "@$(($(date +%s) + 1000000))"',
+              "systemctl start systemd-tmpfiles-clean",
+          )
+          machine.fail("[ -e /tmp/foo ]")
+
+      with subtest("whether automounting works"):
+          machine.fail("grep '/tmp2 tmpfs' /proc/mounts")
+          machine.succeed("touch /tmp2/x")
+          machine.succeed("grep '/tmp2 tmpfs' /proc/mounts")
+
+      with subtest("shell-vars"):
+          machine.succeed('[ -n "$NIX_PATH" ]')
+
+      with subtest("nix-db"):
+          machine.succeed("nix-store -qR /run/current-system | grep nixos-")
+
+      with subtest("Test sysctl"):
+          machine.wait_for_unit("systemd-sysctl.service")
+          assert "1" == machine.succeed("sysctl -ne vm.swappiness").strip()
+          machine.execute("sysctl vm.swappiness=60")
+          assert "60" == machine.succeed("sysctl -ne vm.swappiness").strip()
+
+      with subtest("Test boot parameters"):
+          assert "vsyscall=emulate" in machine.succeed("cat /proc/cmdline")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/moinmoin.nix b/nixpkgs/nixos/tests/moinmoin.nix
new file mode 100644
index 000000000000..ac327498eba0
--- /dev/null
+++ b/nixpkgs/nixos/tests/moinmoin.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "moinmoin";
+  meta.maintainers = with lib.maintainers; [ mmilata ];
+
+  machine =
+    { ... }:
+    { services.moinmoin.enable = true;
+      services.moinmoin.wikis.ExampleWiki.superUsers = [ "admin" ];
+      services.moinmoin.wikis.ExampleWiki.webHost = "localhost";
+
+      services.nginx.virtualHosts.localhost.enableACME = false;
+      services.nginx.virtualHosts.localhost.forceSSL = false;
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("moin-ExampleWiki.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_file("/run/moin/ExampleWiki/gunicorn.sock")
+
+    assert "If you have just installed" in machine.succeed("curl -L http://localhost/")
+
+    assert "status success" in machine.succeed(
+        "moin-ExampleWiki account create --name=admin --email=admin@example.com --password=foo 2>&1"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mongodb.nix b/nixpkgs/nixos/tests/mongodb.nix
new file mode 100644
index 000000000000..a637ec4bfc00
--- /dev/null
+++ b/nixpkgs/nixos/tests/mongodb.nix
@@ -0,0 +1,52 @@
+# This test start mongodb, runs a query using mongo shell
+
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    testQuery = pkgs.writeScript "nixtest.js" ''
+      db.greetings.insert({ "greeting": "hello" });
+      print(db.greetings.findOne().greeting);
+    '';
+
+    runMongoDBTest = pkg: ''
+      node.execute("(rm -rf data || true) && mkdir data")
+      node.execute(
+          "${pkg}/bin/mongod --fork --logpath logs --dbpath data"
+      )
+      node.wait_for_open_port(27017)
+
+      assert "hello" in node.succeed(
+          "mongo ${testQuery}"
+      )
+
+      node.execute(
+          "${pkg}/bin/mongod --shutdown --dbpath data"
+      )
+      node.wait_for_closed_port(27017)
+    '';
+
+  in {
+    name = "mongodb";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ bluescreen303 offline cstrahan rvl phile314 ];
+    };
+
+    nodes = {
+      node = {...}: {
+        environment.systemPackages = with pkgs; [
+          mongodb-3_4
+          mongodb-3_6
+          mongodb-4_0
+        ];
+      };
+    };
+
+    testScript = ''
+      node.start()
+    ''
+      + runMongoDBTest pkgs.mongodb-3_4
+      + runMongoDBTest pkgs.mongodb-3_6 
+      + runMongoDBTest pkgs.mongodb-4_0
+      + ''
+        node.shutdown()
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/moodle.nix b/nixpkgs/nixos/tests/moodle.nix
new file mode 100644
index 000000000000..56aa62596c07
--- /dev/null
+++ b/nixpkgs/nixos/tests/moodle.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "moodle";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  machine =
+    { ... }:
+    { services.moodle.enable = true;
+      services.moodle.virtualHost.hostName = "localhost";
+      services.moodle.virtualHost.adminAddr = "root@example.com";
+      services.moodle.initialPassword = "correcthorsebatterystaple";
+
+      # Ensure the virtual machine has enough memory to avoid errors like:
+      # Fatal error: Out of memory (allocated 152047616) (tried to allocate 33554440 bytes)
+      virtualisation.memorySize = 2000;
+    };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("phpfpm-moodle.service")
+    machine.wait_until_succeeds("curl http://localhost/ | grep 'You are not logged in'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/morty.nix b/nixpkgs/nixos/tests/morty.nix
new file mode 100644
index 000000000000..64c5a27665d6
--- /dev/null
+++ b/nixpkgs/nixos/tests/morty.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "morty";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ leenaars ];
+  };
+
+  nodes =
+    { mortyProxyWithKey =
+
+      { ... }:
+      { services.morty = {
+        enable = true;
+	key = "78a9cd0cfee20c672f78427efb2a2a96036027f0";
+	port = 3001;
+	};
+      };
+
+    };
+
+  testScript =
+    { ... }:
+    ''
+      mortyProxyWithKey.wait_for_unit("default.target")
+      mortyProxyWithKey.wait_for_open_port(3001)
+      mortyProxyWithKey.succeed("curl -L 127.0.0.1:3001 | grep MortyProxy")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/mosquitto.nix b/nixpkgs/nixos/tests/mosquitto.nix
new file mode 100644
index 000000000000..1f2fdf4237fa
--- /dev/null
+++ b/nixpkgs/nixos/tests/mosquitto.nix
@@ -0,0 +1,90 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  port = 1888;
+  username = "mqtt";
+  password = "VERY_secret";
+  topic = "test/foo";
+in {
+  name = "mosquitto";
+  meta = with pkgs.stdenv.lib; {
+    maintainers = with maintainers; [ peterhoeg ];
+  };
+
+  nodes = let
+    client = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [ mosquitto ];
+    };
+  in {
+    server = { pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ port ];
+      services.mosquitto = {
+        inherit port;
+        enable = true;
+        host = "0.0.0.0";
+        checkPasswords = true;
+        users.${username} = {
+          inherit password;
+          acl = [
+            "topic readwrite ${topic}"
+          ];
+        };
+      };
+    };
+
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = let
+    file = "/tmp/msg";
+  in ''
+    def mosquitto_cmd(binary):
+        return (
+            "${pkgs.mosquitto}/bin/mosquitto_{} "
+            "-V mqttv311 "
+            "-h server "
+            "-p ${toString port} "
+            "-u ${username} "
+            "-P '${password}' "
+            "-t ${topic}"
+        ).format(binary)
+
+
+    def publish(args):
+        return "{} {}".format(mosquitto_cmd("pub"), args)
+
+
+    def subscribe(args):
+        return "({} -C 1 {} | tee ${file} &)".format(mosquitto_cmd("sub"), args)
+
+
+    start_all()
+    server.wait_for_unit("mosquitto.service")
+
+    for machine in server, client1, client2:
+        machine.fail("test -f ${file}")
+
+    # QoS = 0, so only one subscribers should get it
+    server.execute(subscribe("-q 0"))
+
+    # we need to give the subscribers some time to connect
+    client2.execute("sleep 5")
+    client2.succeed(publish("-m FOO -q 0"))
+
+    server.wait_until_succeeds("grep -q FOO ${file}")
+    server.execute("rm ${file}")
+
+    # QoS = 1, so both subscribers should get it
+    server.execute(subscribe("-q 1"))
+    client1.execute(subscribe("-q 1"))
+
+    # we need to give the subscribers some time to connect
+    client2.execute("sleep 5")
+    client2.succeed(publish("-m BAR -q 1"))
+
+    for machine in server, client1:
+        machine.wait_until_succeeds("grep -q BAR ${file}")
+        machine.execute("rm ${file}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mpd.nix b/nixpkgs/nixos/tests/mpd.nix
new file mode 100644
index 000000000000..60aef586ad5c
--- /dev/null
+++ b/nixpkgs/nixos/tests/mpd.nix
@@ -0,0 +1,132 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  let
+    track = pkgs.fetchurl {
+      # Sourced from http://freemusicarchive.org/music/Blue_Wave_Theory/Surf_Music_Month_Challenge/Skyhawk_Beach_fade_in
+      # License: http://creativecommons.org/licenses/by-sa/4.0/
+
+      name = "Blue_Wave_Theory-Skyhawk_Beach.mp3";
+      url = "https://freemusicarchive.org/file/music/ccCommunity/Blue_Wave_Theory/Surf_Music_Month_Challenge/Blue_Wave_Theory_-_04_-_Skyhawk_Beach.mp3";
+      sha256 = "0xw417bxkx4gqqy139bb21yldi37xx8xjfxrwaqa0gyw19dl6mgp";
+    };
+
+    defaultCfg = rec {
+      user = "mpd";
+      group = "mpd";
+      dataDir = "/var/lib/mpd";
+      musicDirectory = "${dataDir}/music";
+    };
+
+    defaultMpdCfg = with defaultCfg; {
+      inherit dataDir musicDirectory user group;
+      enable = true;
+    };
+
+    musicService = { user, group, musicDirectory }: {
+      description = "Sets up the music file(s) for MPD to use.";
+      requires = [ "mpd.service" ];
+      after = [ "mpd.service" ];
+      wantedBy = [ "default.target" ];
+      script = ''
+        mkdir -p ${musicDirectory} && chown -R ${user}:${group} ${musicDirectory}
+        cp ${track} ${musicDirectory}
+        chown ${user}:${group} ${musicDirectory}/$(basename ${track})
+      '';
+    };
+
+    mkServer = { mpd, musicService, }:
+      { boot.kernelModules = [ "snd-dummy" ];
+        sound.enable = true;
+        services.mpd = mpd;
+        systemd.services.musicService = musicService;
+      };
+  in {
+    name = "mpd";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ emmanuelrosa ];
+    };
+
+  nodes =
+    { client =
+      { ... }: { };
+
+      serverALSA =
+        { ... }: lib.mkMerge [
+          (mkServer {
+            mpd = defaultMpdCfg // {
+              network.listenAddress = "any";
+              extraConfig = ''
+                audio_output {
+                  type "alsa"
+                  name "ALSA"
+                  mixer_type "null"
+                }
+              '';
+            };
+            musicService = with defaultMpdCfg; musicService { inherit user group musicDirectory; };
+          })
+          { networking.firewall.allowedTCPPorts = [ 6600 ]; }
+        ];
+
+      serverPulseAudio =
+        { ... }: lib.mkMerge [
+          (mkServer {
+            mpd = defaultMpdCfg // {
+              extraConfig = ''
+                audio_output {
+                  type "pulse"
+                  name "The Pulse"
+                }
+              '';
+            };
+
+            musicService = with defaultCfg; musicService { inherit user group musicDirectory; };
+          })
+          {
+            hardware.pulseaudio = {
+              enable = true;
+              systemWide = true;
+              tcp.enable = true;
+              tcp.anonymousClients.allowAll = true;
+            };
+            systemd.services.mpd.environment.PULSE_SERVER = "localhost";
+          }
+        ];
+    };
+
+  testScript = ''
+    mpc = "${pkgs.mpc_cli}/bin/mpc --wait"
+
+    # Connects to the given server and attempts to play a tune.
+    def play_some_music(server):
+        server.wait_for_unit("mpd.service")
+        server.succeed(f"{mpc} update")
+        _, tracks = server.execute(f"{mpc} ls")
+
+        for track in tracks.splitlines():
+            server.succeed(f"{mpc} add {track}")
+
+        _, added_tracks = server.execute(f"{mpc} listall")
+
+        # Check we succeeded adding audio tracks to the playlist
+        assert len(added_tracks.splitlines()) > 0
+
+        server.succeed(f"{mpc} play")
+
+        _, output = server.execute(f"{mpc} status")
+        # Assure audio track is playing
+        assert "playing" in output
+
+        server.succeed(f"{mpc} stop")
+
+
+    play_some_music(serverALSA)
+    play_some_music(serverPulseAudio)
+
+    client.wait_for_unit("multi-user.target")
+    client.succeed(f"{mpc} -h serverALSA status")
+
+    # The PulseAudio-based server is configured not to accept external client connections
+    # to perform the following test:
+    client.fail(f"{mpc} -h serverPulseAudio status")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mpich-example.c b/nixpkgs/nixos/tests/mpich-example.c
new file mode 100644
index 000000000000..c48e3c45b72e
--- /dev/null
+++ b/nixpkgs/nixos/tests/mpich-example.c
@@ -0,0 +1,21 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+int
+main (int argc, char *argv[])
+{
+  int rank, size, length;
+  char name[BUFSIZ];
+
+  MPI_Init (&argc, &argv);
+  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+  MPI_Comm_size (MPI_COMM_WORLD, &size);
+  MPI_Get_processor_name (name, &length);
+
+  printf ("%s: hello world from process %d of %d\n", name, rank, size);
+
+  MPI_Finalize ();
+
+  return EXIT_SUCCESS;
+}
diff --git a/nixpkgs/nixos/tests/mumble.nix b/nixpkgs/nixos/tests/mumble.nix
new file mode 100644
index 000000000000..e9b6d14c6a1f
--- /dev/null
+++ b/nixpkgs/nixos/tests/mumble.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  client = { pkgs, ... }: {
+    imports = [ ./common/x11.nix ];
+    environment.systemPackages = [ pkgs.mumble ];
+  };
+in
+{
+  name = "mumble";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ thoughtpolice eelco ];
+  };
+
+  nodes = {
+    server = { config, ... }: {
+      services.murmur.enable       = true;
+      services.murmur.registerName = "NixOS tests";
+      networking.firewall.allowedTCPPorts = [ config.services.murmur.port ];
+    };
+
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("murmur.service")
+    client1.wait_for_x()
+    client2.wait_for_x()
+
+    client1.execute("mumble mumble://client1\@server/test &")
+    client2.execute("mumble mumble://client2\@server/test &")
+
+    # cancel client audio configuration
+    client1.wait_for_window(r"Audio Tuning Wizard")
+    client2.wait_for_window(r"Audio Tuning Wizard")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+    client1.send_key("esc")
+    client2.send_key("esc")
+
+    # cancel client cert configuration
+    client1.wait_for_window(r"Certificate Management")
+    client2.wait_for_window(r"Certificate Management")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+    client1.send_key("esc")
+    client2.send_key("esc")
+
+    # accept server certificate
+    client1.wait_for_window(r"^Mumble$")
+    client2.wait_for_window(r"^Mumble$")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+    client1.send_chars("y")
+    client2.send_chars("y")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+
+    # sometimes the wrong of the 2 windows is focused, we switch focus and try pressing "y" again
+    client1.send_key("alt-tab")
+    client2.send_key("alt-tab")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+    client1.send_chars("y")
+    client2.send_chars("y")
+
+    # Find clients in logs
+    server.wait_until_succeeds("journalctl -eu murmur -o cat | grep -q client1")
+    server.wait_until_succeeds("journalctl -eu murmur -o cat | grep -q client2")
+
+    server.sleep(5)  # wait to get screenshot
+    client1.screenshot("screen1")
+    client2.screenshot("screen2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/munin.nix b/nixpkgs/nixos/tests/munin.nix
new file mode 100644
index 000000000000..7b674db7768d
--- /dev/null
+++ b/nixpkgs/nixos/tests/munin.nix
@@ -0,0 +1,44 @@
+# This test runs basic munin setup with node and cron job running on the same
+# machine.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "munin";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ domenkozar eelco ];
+  };
+
+  nodes = {
+    one =
+      { config, ... }:
+        {
+          services = {
+            munin-node = {
+              enable = true;
+              # disable a failing plugin to prevent irrelevant error message, see #23049
+              disabledPlugins = [ "apc_nis" ];
+            };
+            munin-cron = {
+             enable = true;
+             hosts = ''
+               [${config.networking.hostName}]
+               address localhost
+             '';
+            };
+          };
+
+          # increase the systemd timer interval so it fires more often
+          systemd.timers.munin-cron.timerConfig.OnCalendar = pkgs.stdenv.lib.mkForce "*:*:0/10";
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    with subtest("ensure munin-node starts and listens on 4949"):
+        one.wait_for_unit("munin-node.service")
+        one.wait_for_open_port(4949)
+    with subtest("ensure munin-cron output is correct"):
+        one.wait_for_file("/var/lib/munin/one/one-uptime-uptime-g.rrd")
+        one.wait_for_file("/var/www/munin/one/index.html")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mutable-users.nix b/nixpkgs/nixos/tests/mutable-users.nix
new file mode 100644
index 000000000000..49c7f78b82ed
--- /dev/null
+++ b/nixpkgs/nixos/tests/mutable-users.nix
@@ -0,0 +1,45 @@
+# Mutable users tests.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "mutable-users";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ gleber ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      users.mutableUsers = false;
+    };
+    mutable = { ... }: {
+      users.mutableUsers = true;
+    };
+  };
+
+  testScript = {nodes, ...}: let
+    immutableSystem = nodes.machine.config.system.build.toplevel;
+    mutableSystem = nodes.mutable.config.system.build.toplevel;
+  in ''
+    machine.start()
+    machine.wait_for_unit("default.target")
+
+    # Machine starts in immutable mode. Add a user and test if reactivating
+    # configuration removes the user.
+    with subtest("Machine in immutable mode"):
+        assert "foobar" not in machine.succeed("cat /etc/passwd")
+        machine.succeed("sudo useradd foobar")
+        assert "foobar" in machine.succeed("cat /etc/passwd")
+        machine.succeed(
+            "${immutableSystem}/bin/switch-to-configuration test"
+        )
+        assert "foobar" not in machine.succeed("cat /etc/passwd")
+
+    # In immutable mode passwd is not wrapped, while in mutable mode it is
+    # wrapped.
+    with subtest("Password is wrapped in mutable mode"):
+        assert "/run/current-system/" in machine.succeed("which passwd")
+        machine.succeed(
+            "${mutableSystem}/bin/switch-to-configuration test"
+        )
+        assert "/run/wrappers/" in machine.succeed("which passwd")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mxisd.nix b/nixpkgs/nixos/tests/mxisd.nix
new file mode 100644
index 000000000000..b2b60db4d822
--- /dev/null
+++ b/nixpkgs/nixos/tests/mxisd.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ... } : {
+
+  name = "mxisd";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mguentner ];
+  };
+
+  nodes = {
+    server_mxisd = args : {
+      services.mxisd.enable = true;
+      services.mxisd.matrix.domain = "example.org";
+    };
+
+    server_ma1sd = args : {
+      services.mxisd.enable = true;
+      services.mxisd.matrix.domain = "example.org";
+      services.mxisd.package = pkgs.ma1sd;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server_mxisd.wait_for_unit("mxisd.service")
+    server_mxisd.wait_for_open_port(8090)
+    server_mxisd.succeed("curl -Ssf 'http://127.0.0.1:8090/_matrix/identity/api/v1'")
+    server_ma1sd.wait_for_unit("mxisd.service")
+    server_ma1sd.wait_for_open_port(8090)
+    server_ma1sd.succeed("curl -Ssf 'http://127.0.0.1:8090/_matrix/identity/api/v1'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mysql-backup.nix b/nixpkgs/nixos/tests/mysql-backup.nix
new file mode 100644
index 000000000000..a0595e4d5539
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql-backup.nix
@@ -0,0 +1,56 @@
+# Test whether mysqlBackup option works
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "mysql-backup";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ rvl ];
+  };
+
+  nodes = {
+    master = { pkgs, ... }: {
+      services.mysql = {
+        enable = true;
+        initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
+        package = pkgs.mysql;
+      };
+
+      services.mysqlBackup = {
+        enable = true;
+        databases = [ "doesnotexist" "testdb" ];
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    # Delete backup file that may be left over from a previous test run.
+    # This is not needed on Hydra but useful for repeated local test runs.
+    master.execute("rm -f /var/backup/mysql/testdb.gz")
+
+    # Need to have mysql started so that it can be populated with data.
+    master.wait_for_unit("mysql.service")
+
+    # Wait for testdb to be fully populated (5 rows).
+    master.wait_until_succeeds(
+        "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+    )
+
+    # Do a backup and wait for it to start
+    master.start_job("mysql-backup.service")
+    master.wait_for_unit("mysql-backup.service")
+
+    # wait for backup to fail, because of database 'doesnotexist'
+    master.wait_until_fails("systemctl is-active -q mysql-backup.service")
+
+    # wait for backup file and check that data appears in backup
+    master.wait_for_file("/var/backup/mysql/testdb.gz")
+    master.succeed(
+        "${pkgs.gzip}/bin/zcat /var/backup/mysql/testdb.gz | grep hello"
+    )
+
+    # Check that a failed backup is logged
+    master.succeed(
+        "journalctl -u mysql-backup.service | grep 'fail.*doesnotexist' > /dev/null"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mysql-replication.nix b/nixpkgs/nixos/tests/mysql-replication.nix
new file mode 100644
index 000000000000..a2654f041add
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql-replication.nix
@@ -0,0 +1,89 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  replicateUser = "replicate";
+  replicatePassword = "secret";
+in
+
+{
+  name = "mysql-replication";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco shlevy ];
+  };
+
+  nodes = {
+    master =
+      { pkgs, ... }:
+
+      {
+        services.mysql.enable = true;
+        services.mysql.package = pkgs.mysql;
+        services.mysql.replication.role = "master";
+        services.mysql.replication.slaveHost = "%";
+        services.mysql.replication.masterUser = replicateUser;
+        services.mysql.replication.masterPassword = replicatePassword;
+        services.mysql.initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
+        networking.firewall.allowedTCPPorts = [ 3306 ];
+      };
+
+    slave1 =
+      { pkgs, nodes, ... }:
+
+      {
+        services.mysql.enable = true;
+        services.mysql.package = pkgs.mysql;
+        services.mysql.replication.role = "slave";
+        services.mysql.replication.serverId = 2;
+        services.mysql.replication.masterHost = nodes.master.config.networking.hostName;
+        services.mysql.replication.masterUser = replicateUser;
+        services.mysql.replication.masterPassword = replicatePassword;
+      };
+
+    slave2 =
+      { pkgs, nodes, ... }:
+
+      {
+        services.mysql.enable = true;
+        services.mysql.package = pkgs.mysql;
+        services.mysql.replication.role = "slave";
+        services.mysql.replication.serverId = 3;
+        services.mysql.replication.masterHost = nodes.master.config.networking.hostName;
+        services.mysql.replication.masterUser = replicateUser;
+        services.mysql.replication.masterPassword = replicatePassword;
+      };
+  };
+
+  testScript = ''
+    master.start()
+    master.wait_for_unit("mysql")
+    master.wait_for_open_port(3306)
+    # Wait for testdb to be fully populated (5 rows).
+    master.wait_until_succeeds(
+        "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+    )
+
+    slave1.start()
+    slave2.start()
+    slave1.wait_for_unit("mysql")
+    slave1.wait_for_open_port(3306)
+    slave2.wait_for_unit("mysql")
+    slave2.wait_for_open_port(3306)
+
+    # wait for replications to finish
+    slave1.wait_until_succeeds(
+        "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+    )
+    slave2.wait_until_succeeds(
+        "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+    )
+
+    slave2.succeed("systemctl stop mysql")
+    master.succeed("echo 'insert into testdb.tests values (123, 456);' | mysql -u root -N")
+    slave2.succeed("systemctl start mysql")
+    slave2.wait_for_unit("mysql")
+    slave2.wait_for_open_port(3306)
+    slave2.wait_until_succeeds(
+        "echo 'select * from testdb.tests where Id = 123;' | mysql -u root -N | grep 456"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mysql.nix b/nixpkgs/nixos/tests/mysql.nix
new file mode 100644
index 000000000000..11c1dabf9360
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql.nix
@@ -0,0 +1,143 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "mysql";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco shlevy ];
+  };
+
+  nodes = {
+    mysql =
+      { pkgs, ... }:
+
+      {
+        services.mysql.enable = true;
+        services.mysql.initialDatabases = [
+          { name = "testdb"; schema = ./testdb.sql; }
+          { name = "empty_testdb"; }
+        ];
+        # note that using pkgs.writeText here is generally not a good idea,
+        # as it will store the password in world-readable /nix/store ;)
+        services.mysql.initialScript = pkgs.writeText "mysql-init.sql" ''
+          CREATE USER 'passworduser'@'localhost' IDENTIFIED BY 'password123';
+        '';
+        services.mysql.package = pkgs.mysql57;
+      };
+
+    mysql80 =
+      { pkgs, ... }:
+
+      {
+        # prevent oom:
+        # Kernel panic - not syncing: Out of memory: compulsory panic_on_oom is enabled
+        virtualisation.memorySize = 1024;
+
+        services.mysql.enable = true;
+        services.mysql.initialDatabases = [
+          { name = "testdb"; schema = ./testdb.sql; }
+          { name = "empty_testdb"; }
+        ];
+        # note that using pkgs.writeText here is generally not a good idea,
+        # as it will store the password in world-readable /nix/store ;)
+        services.mysql.initialScript = pkgs.writeText "mysql-init.sql" ''
+          CREATE USER 'passworduser'@'localhost' IDENTIFIED BY 'password123';
+        '';
+        services.mysql.package = pkgs.mysql80;
+      };
+
+    mariadb =
+      { pkgs, ... }:
+
+      {
+        users.users.testuser = { };
+        users.users.testuser2 = { };
+        services.mysql.enable = true;
+        services.mysql.initialScript = pkgs.writeText "mariadb-init.sql" ''
+          ALTER USER root@localhost IDENTIFIED WITH unix_socket;
+          DELETE FROM mysql.user WHERE password = ''' AND plugin = ''';
+          DELETE FROM mysql.user WHERE user = ''';
+          FLUSH PRIVILEGES;
+        '';
+        services.mysql.ensureDatabases = [ "testdb" "testdb2" ];
+        services.mysql.ensureUsers = [{
+          name = "testuser";
+          ensurePermissions = {
+            "testdb.*" = "ALL PRIVILEGES";
+          };
+        } {
+          name = "testuser2";
+          ensurePermissions = {
+            "testdb2.*" = "ALL PRIVILEGES";
+          };
+        }];
+        services.mysql.settings = {
+          mysqld = {
+            plugin-load-add = [ "ha_tokudb.so" "ha_rocksdb.so" ];
+          };
+        };
+        services.mysql.package = pkgs.mariadb;
+      };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    mysql.wait_for_unit("mysql")
+    mysql.succeed("echo 'use empty_testdb;' | mysql -u root")
+    mysql.succeed("echo 'use testdb; select * from tests;' | mysql -u root -N | grep 4")
+    # ';' acts as no-op, just check whether login succeeds with the user created from the initialScript
+    mysql.succeed("echo ';' | mysql -u passworduser --password=password123")
+
+    mysql80.wait_for_unit("mysql")
+    mysql80.succeed("echo 'use empty_testdb;' | mysql -u root")
+    mysql80.succeed("echo 'use testdb; select * from tests;' | mysql -u root -N | grep 4")
+    # ';' acts as no-op, just check whether login succeeds with the user created from the initialScript
+    mysql80.succeed("echo ';' | mysql -u passworduser --password=password123")
+
+    mariadb.wait_for_unit("mysql")
+    mariadb.succeed(
+        "echo 'use testdb; create table tests (test_id INT, PRIMARY KEY (test_id));' | sudo -u testuser mysql -u testuser"
+    )
+    mariadb.succeed(
+        "echo 'use testdb; insert into tests values (42);' | sudo -u testuser mysql -u testuser"
+    )
+    # Ensure testuser2 is not able to insert into testdb as mysql testuser2
+    mariadb.fail(
+        "echo 'use testdb; insert into tests values (23);' | sudo -u testuser2 mysql -u testuser2"
+    )
+    # Ensure testuser2 is not able to authenticate as mysql testuser
+    mariadb.fail(
+        "echo 'use testdb; insert into tests values (23);' | sudo -u testuser2 mysql -u testuser"
+    )
+    mariadb.succeed(
+        "echo 'use testdb; select test_id from tests;' | sudo -u testuser mysql -u testuser -N | grep 42"
+    )
+
+    # Check if TokuDB plugin works
+    mariadb.succeed(
+        "echo 'use testdb; create table tokudb (test_id INT, PRIMARY KEY (test_id)) ENGINE = TokuDB;' | sudo -u testuser mysql -u testuser"
+    )
+    mariadb.succeed(
+        "echo 'use testdb; insert into tokudb values (25);' | sudo -u testuser mysql -u testuser"
+    )
+    mariadb.succeed(
+        "echo 'use testdb; select test_id from tokudb;' | sudo -u testuser mysql -u testuser -N | grep 25"
+    )
+    mariadb.succeed(
+        "echo 'use testdb; drop table tokudb;' | sudo -u testuser mysql -u testuser"
+    )
+
+    # Check if RocksDB plugin works
+    mariadb.succeed(
+        "echo 'use testdb; create table rocksdb (test_id INT, PRIMARY KEY (test_id)) ENGINE = RocksDB;' | sudo -u testuser mysql -u testuser"
+    )
+    mariadb.succeed(
+        "echo 'use testdb; insert into rocksdb values (28);' | sudo -u testuser mysql -u testuser"
+    )
+    mariadb.succeed(
+        "echo 'use testdb; select test_id from rocksdb;' | sudo -u testuser mysql -u testuser -N | grep 28"
+    )
+    mariadb.succeed(
+        "echo 'use testdb; drop table rocksdb;' | sudo -u testuser mysql -u testuser"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nagios.nix b/nixpkgs/nixos/tests/nagios.nix
new file mode 100644
index 000000000000..6f5d44472878
--- /dev/null
+++ b/nixpkgs/nixos/tests/nagios.nix
@@ -0,0 +1,116 @@
+import ./make-test-python.nix (
+  { pkgs, ... }: {
+    name = "nagios";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ symphorien ];
+    };
+
+    machine = { lib, ... }: let
+      writer = pkgs.writeShellScript "write" ''
+        set -x
+        echo "$@"  >> /tmp/notifications
+      '';
+    in
+      {
+        # tested service
+        services.sshd.enable = true;
+        # nagios
+        services.nagios = {
+          enable = true;
+          # make state transitions faster
+          extraConfig.interval_length = "5";
+          objectDefs =
+            (map (x: "${pkgs.nagios}/etc/objects/${x}.cfg") [ "templates" "timeperiods" "commands" ]) ++ [
+              (
+                pkgs.writeText "objects.cfg" ''
+                  # notifications are written to /tmp/notifications
+                  define command {
+                  command_name notify-host-by-file
+                  command_line ${writer} "$HOSTNAME is $HOSTSTATE$"
+                  }
+                  define command {
+                  command_name notify-service-by-file
+                  command_line ${writer} "$SERVICEDESC$ is $SERVICESTATE$"
+                  }
+
+                  # nagios boilerplate
+                  define contact {
+                  contact_name                    alice
+                  alias                           alice
+                  host_notifications_enabled      1
+                  service_notifications_enabled   1
+                  service_notification_period     24x7
+                  host_notification_period        24x7
+                  service_notification_options    w,u,c,r,f,s
+                  host_notification_options       d,u,r,f,s
+                  service_notification_commands   notify-service-by-file
+                  host_notification_commands      notify-host-by-file
+                  email                           foo@example.com
+                  }
+                  define contactgroup {
+                  contactgroup_name   admins
+                  alias               Admins
+                  members alice
+                  }
+                  define hostgroup{
+                  hostgroup_name  allhosts
+                  alias  All hosts
+                  }
+
+                  # monitored objects
+                  define host {
+                  use         generic-host
+                  host_name   localhost
+                  alias       localhost
+                  address     localhost
+                  hostgroups  allhosts
+                  contact_groups admins
+                  # make state transitions faster.
+                  max_check_attempts 2
+                  check_interval 1
+                  retry_interval 1
+                  }
+                  define service {
+                  use                 generic-service
+                  host_name           localhost
+                  service_description ssh
+                  check_command       check_ssh
+                  # make state transitions faster.
+                  max_check_attempts 2
+                  check_interval 1
+                  retry_interval 1
+                  }
+                ''
+              )
+            ];
+        };
+      };
+
+    testScript = { ... }: ''
+      with subtest("ensure sshd starts"):
+          machine.wait_for_unit("sshd.service")
+
+
+      with subtest("ensure nagios starts"):
+          machine.wait_for_file("/var/log/nagios/current")
+
+
+      def assert_notify(text):
+          machine.wait_for_file("/tmp/notifications")
+          real = machine.succeed("cat /tmp/notifications").strip()
+          print(f"got {real!r}, expected {text!r}")
+          assert text == real
+
+
+      with subtest("ensure we get a notification when sshd is down"):
+          machine.succeed("systemctl stop sshd")
+          assert_notify("ssh is CRITICAL")
+
+
+      with subtest("ensure tests can succeed"):
+          machine.succeed("systemctl start sshd")
+          machine.succeed("rm /tmp/notifications")
+          assert_notify("ssh is OK")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/nat.nix b/nixpkgs/nixos/tests/nat.nix
new file mode 100644
index 000000000000..0d1f7aaedfa2
--- /dev/null
+++ b/nixpkgs/nixos/tests/nat.nix
@@ -0,0 +1,120 @@
+# This is a simple distributed test involving a topology with two
+# separate virtual networks - the "inside" and the "outside" - with a
+# client on the inside network, a server on the outside network, and a
+# router connected to both that performs Network Address Translation
+# for the client.
+import ./make-test-python.nix ({ pkgs, lib, withFirewall, withConntrackHelpers ? false, ... }:
+  let
+    unit = if withFirewall then "firewall" else "nat";
+
+    routerBase =
+      lib.mkMerge [
+        { virtualisation.vlans = [ 2 1 ];
+          networking.firewall.enable = withFirewall;
+          networking.nat.internalIPs = [ "192.168.1.0/24" ];
+          networking.nat.externalInterface = "eth1";
+        }
+        (lib.optionalAttrs withConntrackHelpers {
+          networking.firewall.connectionTrackingModules = [ "ftp" ];
+          networking.firewall.autoLoadConntrackHelpers = true;
+        })
+      ];
+  in
+  {
+    name = "nat" + (if withFirewall then "WithFirewall" else "Standalone")
+                 + (lib.optionalString withConntrackHelpers "withConntrackHelpers");
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ eelco rob ];
+    };
+
+    nodes =
+      { client =
+          { pkgs, nodes, ... }:
+          lib.mkMerge [
+            { virtualisation.vlans = [ 1 ];
+              networking.defaultGateway =
+                (pkgs.lib.head nodes.router.config.networking.interfaces.eth2.ipv4.addresses).address;
+            }
+            (lib.optionalAttrs withConntrackHelpers {
+              networking.firewall.connectionTrackingModules = [ "ftp" ];
+              networking.firewall.autoLoadConntrackHelpers = true;
+            })
+          ];
+
+        router =
+        { ... }: lib.mkMerge [
+          routerBase
+          { networking.nat.enable = true; }
+        ];
+
+        routerDummyNoNat =
+        { ... }: lib.mkMerge [
+          routerBase
+          { networking.nat.enable = false; }
+        ];
+
+        server =
+          { ... }:
+          { virtualisation.vlans = [ 2 ];
+            networking.firewall.enable = false;
+            services.httpd.enable = true;
+            services.httpd.adminAddr = "foo@example.org";
+            services.vsftpd.enable = true;
+            services.vsftpd.anonymousUser = true;
+          };
+      };
+
+    testScript =
+      { nodes, ... }: let
+        routerDummyNoNatClosure = nodes.routerDummyNoNat.config.system.build.toplevel;
+        routerClosure = nodes.router.config.system.build.toplevel;
+      in ''
+        client.start()
+        router.start()
+        server.start()
+
+        # The router should have access to the server.
+        server.wait_for_unit("network.target")
+        server.wait_for_unit("httpd")
+        router.wait_for_unit("network.target")
+        router.succeed("curl --fail http://server/ >&2")
+
+        # The client should be also able to connect via the NAT router.
+        router.wait_for_unit("${unit}")
+        client.wait_for_unit("network.target")
+        client.succeed("curl --fail http://server/ >&2")
+        client.succeed("ping -c 1 server >&2")
+
+        # Test whether passive FTP works.
+        server.wait_for_unit("vsftpd")
+        server.succeed("echo Hello World > /home/ftp/foo.txt")
+        client.succeed("curl -v ftp://server/foo.txt >&2")
+
+        # Test whether active FTP works.
+        client.${if withConntrackHelpers then "succeed" else "fail"}("curl -v -P - ftp://server/foo.txt >&2")
+
+        # Test ICMP.
+        client.succeed("ping -c 1 router >&2")
+        router.succeed("ping -c 1 client >&2")
+
+        # If we turn off NAT, the client shouldn't be able to reach the server.
+        router.succeed(
+            "${routerDummyNoNatClosure}/bin/switch-to-configuration test 2>&1"
+        )
+        client.fail("curl --fail --connect-timeout 5 http://server/ >&2")
+        client.fail("ping -c 1 server >&2")
+
+        # And make sure that reloading the NAT job works.
+        router.succeed(
+            "${routerClosure}/bin/switch-to-configuration test 2>&1"
+        )
+        # FIXME: this should not be necessary, but nat.service is not started because
+        #        network.target is not triggered
+        #        (https://github.com/NixOS/nixpkgs/issues/16230#issuecomment-226408359)
+        ${lib.optionalString (!withFirewall) ''
+          router.succeed("systemctl start nat.service")
+        ''}
+        client.succeed("curl --fail http://server/ >&2")
+        client.succeed("ping -c 1 server >&2")
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/ndppd.nix b/nixpkgs/nixos/tests/ndppd.nix
new file mode 100644
index 000000000000..b67b26a79341
--- /dev/null
+++ b/nixpkgs/nixos/tests/ndppd.nix
@@ -0,0 +1,60 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "ndppd";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ fpletz ];
+  };
+
+  nodes = {
+    upstream = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.tcpdump ];
+      networking.useDHCP = false;
+      networking.interfaces = {
+        eth1 = {
+          ipv6.addresses = [
+            { address = "fd23::1"; prefixLength = 112; }
+          ];
+          ipv6.routes = [
+            { address = "fd42::";
+              prefixLength = 112;
+            }
+          ];
+        };
+      };
+    };
+    server = { pkgs, ... }: {
+      boot.kernel.sysctl = {
+        "net.ipv6.conf.all.forwarding" = "1";
+        "net.ipv6.conf.default.forwarding" = "1";
+      };
+      environment.systemPackages = [ pkgs.tcpdump ];
+      networking.useDHCP = false;
+      networking.interfaces = {
+        eth1 = {
+          ipv6.addresses = [
+            { address = "fd23::2"; prefixLength = 112; }
+          ];
+        };
+      };
+      services.ndppd = {
+        enable = true;
+        proxies.eth1.rules."fd42::/112" = {};
+      };
+      containers.client = {
+        autoStart = true;
+        privateNetwork = true;
+        hostAddress = "192.168.255.1";
+        localAddress = "192.168.255.2";
+        hostAddress6 = "fd42::1";
+        localAddress6 = "fd42::2";
+        config = {};
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("multi-user.target")
+    upstream.wait_for_unit("multi-user.target")
+    upstream.wait_until_succeeds("ping -c5 fd42::2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/neo4j.nix b/nixpkgs/nixos/tests/neo4j.nix
new file mode 100644
index 000000000000..32ee7f501b8b
--- /dev/null
+++ b/nixpkgs/nixos/tests/neo4j.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix {
+  name = "neo4j";
+
+  nodes = {
+    master =
+      { ... }:
+
+      {
+        services.neo4j.enable = true;
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    master.wait_for_unit("neo4j")
+    master.wait_for_open_port(7474)
+    master.succeed("curl http://localhost:7474/")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/netdata.nix b/nixpkgs/nixos/tests/netdata.nix
new file mode 100644
index 000000000000..4ddc96e8bc22
--- /dev/null
+++ b/nixpkgs/nixos/tests/netdata.nix
@@ -0,0 +1,38 @@
+# This test runs netdata and checks for data via apps.plugin
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "netdata";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ cransom ];
+  };
+
+  nodes = {
+    netdata =
+      { pkgs, ... }:
+        {
+          environment.systemPackages = with pkgs; [ curl jq ];
+          services.netdata.enable = true;
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    netdata.wait_for_unit("netdata.service")
+
+    # wait for the service to listen before sending a request
+    netdata.wait_for_open_port(19999)
+
+    # check if the netdata main page loads.
+    netdata.succeed("curl --fail http://localhost:19999/")
+    netdata.succeed("sleep 4")
+
+    # check if netdata can read disk ops for root owned processes.
+    # if > 0, successful. verifies both netdata working and
+    # apps.plugin has elevated capabilities.
+    url = "http://localhost:19999/api/v1/data\?chart=users.pwrites"
+    filter = '[.data[range(10)][.labels | indices("root")[0]]] | add | . > 0'
+    cmd = f"curl -s {url} | jq -e '{filter}'"
+    netdata.wait_until_succeeds(cmd)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/networking-proxy.nix b/nixpkgs/nixos/tests/networking-proxy.nix
new file mode 100644
index 000000000000..bae9c66ed61a
--- /dev/null
+++ b/nixpkgs/nixos/tests/networking-proxy.nix
@@ -0,0 +1,135 @@
+# Test whether `networking.proxy' work as expected.
+
+# TODO: use a real proxy node and put this test into networking.nix
+# TODO: test whether nix tools work as expected behind a proxy
+
+let default-config = {
+        imports = [ ./common/user-account.nix ];
+
+        services.xserver.enable = false;
+
+        virtualisation.memorySize = 128;
+      };
+in import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "networking-proxy";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [  ];
+  };
+
+  nodes = {
+    # no proxy
+    machine =
+      { ... }:
+
+      default-config;
+
+    # proxy default
+    machine2 =
+      { ... }:
+
+      default-config // {
+        networking.proxy.default = "http://user:pass@host:port";
+      };
+
+    # specific proxy options
+    machine3 =
+      { ... }:
+
+      default-config //
+      {
+        networking.proxy = {
+          # useless because overriden by the next options
+          default = "http://user:pass@host:port";
+          # advanced proxy setup
+          httpProxy = "123-http://user:pass@http-host:port";
+          httpsProxy = "456-http://user:pass@https-host:port";
+          rsyncProxy = "789-http://user:pass@rsync-host:port";
+          ftpProxy = "101112-http://user:pass@ftp-host:port";
+          noProxy = "131415-127.0.0.1,localhost,.localdomain";
+        };
+      };
+
+    # mix default + proxy options
+    machine4 =
+      { ... }:
+
+      default-config // {
+        networking.proxy = {
+          # open for all *_proxy env var
+          default = "000-http://user:pass@default-host:port";
+          # except for those 2
+          rsyncProxy = "123-http://user:pass@http-host:port";
+          noProxy = "131415-127.0.0.1,localhost,.localdomain";
+        };
+      };
+    };
+
+  testScript =
+    ''
+      from typing import Dict, Optional
+
+
+      def get_machine_env(machine: Machine, user: Optional[str] = None) -> Dict[str, str]:
+          """
+          Gets the environment from a given machine, and returns it as a
+          dictionary in the form:
+              {"lowercase_var_name": "value"}
+
+          Duplicate environment variables with the same name
+          (e.g. "foo" and "FOO") are handled in an undefined manner.
+          """
+          if user is not None:
+              env = machine.succeed("su - {} -c 'env -0'".format(user))
+          else:
+              env = machine.succeed("env -0")
+          ret = {}
+          for line in env.split("\0"):
+              if "=" not in line:
+                  continue
+
+              key, val = line.split("=", 1)
+              ret[key.lower()] = val
+          return ret
+
+
+      start_all()
+
+      with subtest("no proxy"):
+          assert "proxy" not in machine.succeed("env").lower()
+          assert "proxy" not in machine.succeed("su - alice -c env").lower()
+
+      with subtest("default proxy"):
+          assert "proxy" in machine2.succeed("env").lower()
+          assert "proxy" in machine2.succeed("su - alice -c env").lower()
+
+      with subtest("explicitly-set proxy"):
+          env = get_machine_env(machine3)
+          assert "123" in env["http_proxy"]
+          assert "456" in env["https_proxy"]
+          assert "789" in env["rsync_proxy"]
+          assert "101112" in env["ftp_proxy"]
+          assert "131415" in env["no_proxy"]
+
+          env = get_machine_env(machine3, "alice")
+          assert "123" in env["http_proxy"]
+          assert "456" in env["https_proxy"]
+          assert "789" in env["rsync_proxy"]
+          assert "101112" in env["ftp_proxy"]
+          assert "131415" in env["no_proxy"]
+
+      with subtest("default proxy + some other specifics"):
+          env = get_machine_env(machine4)
+          assert "000" in env["http_proxy"]
+          assert "000" in env["https_proxy"]
+          assert "123" in env["rsync_proxy"]
+          assert "000" in env["ftp_proxy"]
+          assert "131415" in env["no_proxy"]
+
+          env = get_machine_env(machine4, "alice")
+          assert "000" in env["http_proxy"]
+          assert "000" in env["https_proxy"]
+          assert "123" in env["rsync_proxy"]
+          assert "000" in env["ftp_proxy"]
+          assert "131415" in env["no_proxy"]
+    '';
+})
diff --git a/nixpkgs/nixos/tests/networking.nix b/nixpkgs/nixos/tests/networking.nix
new file mode 100644
index 000000000000..3d8ab761a446
--- /dev/null
+++ b/nixpkgs/nixos/tests/networking.nix
@@ -0,0 +1,699 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+# bool: whether to use networkd in the tests
+, networkd }:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  router = { config, pkgs, ... }:
+    with pkgs.lib;
+    let
+      vlanIfs = range 1 (length config.virtualisation.vlans);
+    in {
+      environment.systemPackages = [ pkgs.iptables ]; # to debug firewall rules
+      virtualisation.vlans = [ 1 2 3 ];
+      boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true;
+      networking = {
+        useDHCP = false;
+        useNetworkd = networkd;
+        firewall.checkReversePath = true;
+        firewall.allowedUDPPorts = [ 547 ];
+        interfaces = mkOverride 0 (listToAttrs (forEach vlanIfs (n:
+          nameValuePair "eth${toString n}" {
+            ipv4.addresses = [ { address = "192.168.${toString n}.1"; prefixLength = 24; } ];
+            ipv6.addresses = [ { address = "fd00:1234:5678:${toString n}::1"; prefixLength = 64; } ];
+          })));
+      };
+      services.dhcpd4 = {
+        enable = true;
+        interfaces = map (n: "eth${toString n}") vlanIfs;
+        extraConfig = ''
+          authoritative;
+        '' + flip concatMapStrings vlanIfs (n: ''
+          subnet 192.168.${toString n}.0 netmask 255.255.255.0 {
+            option routers 192.168.${toString n}.1;
+            # XXX: technically it's _not guaranteed_ that IP addresses will be
+            # issued from the first item in range onwards! We assume that in
+            # our tests however.
+            range 192.168.${toString n}.2 192.168.${toString n}.254;
+          }
+        '');
+      };
+      services.radvd = {
+        enable = true;
+        config = flip concatMapStrings vlanIfs (n: ''
+          interface eth${toString n} {
+            AdvSendAdvert on;
+            AdvManagedFlag on;
+            AdvOtherConfigFlag on;
+
+            prefix fd00:1234:5678:${toString n}::/64 {
+              AdvAutonomous off;
+            };
+          };
+        '');
+      };
+      services.dhcpd6 = {
+        enable = true;
+        interfaces = map (n: "eth${toString n}") vlanIfs;
+        extraConfig = ''
+          authoritative;
+        '' + flip concatMapStrings vlanIfs (n: ''
+          subnet6 fd00:1234:5678:${toString n}::/64 {
+            range6 fd00:1234:5678:${toString n}::2 fd00:1234:5678:${toString n}::2;
+          }
+        '');
+      };
+    };
+
+  testCases = {
+    loopback = {
+      name = "Loopback";
+      machine.networking.useDHCP = false;
+      machine.networking.useNetworkd = networkd;
+      testScript = ''
+        start_all()
+        machine.wait_for_unit("network.target")
+        loopback_addresses = machine.succeed("ip addr show lo")
+        assert "inet 127.0.0.1/8" in loopback_addresses
+        assert "inet6 ::1/128" in loopback_addresses
+      '';
+    };
+    static = {
+      name = "Static";
+      nodes.router = router;
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.vlans = [ 1 2 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          defaultGateway = "192.168.1.1";
+          interfaces.eth1.ipv4.addresses = mkOverride 0 [
+            { address = "192.168.1.2"; prefixLength = 24; }
+            { address = "192.168.1.3"; prefixLength = 32; }
+            { address = "192.168.1.10"; prefixLength = 32; }
+          ];
+          interfaces.eth2.ipv4.addresses = mkOverride 0 [
+            { address = "192.168.2.2"; prefixLength = 24; }
+          ];
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          client.wait_for_unit("network.target")
+          router.wait_for_unit("network-online.target")
+
+          with subtest("Make sure dhcpcd is not started"):
+              client.fail("systemctl status dhcpcd.service")
+
+          with subtest("Test vlan 1"):
+              client.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client.wait_until_succeeds("ping -c 1 192.168.1.3")
+              client.wait_until_succeeds("ping -c 1 192.168.1.10")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+              router.wait_until_succeeds("ping -c 1 192.168.1.3")
+              router.wait_until_succeeds("ping -c 1 192.168.1.10")
+
+          with subtest("Test vlan 2"):
+              client.wait_until_succeeds("ping -c 1 192.168.2.1")
+              client.wait_until_succeeds("ping -c 1 192.168.2.2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.2.1")
+              router.wait_until_succeeds("ping -c 1 192.168.2.2")
+
+          with subtest("Test default gateway"):
+              router.wait_until_succeeds("ping -c 1 192.168.3.1")
+              client.wait_until_succeeds("ping -c 1 192.168.3.1")
+        '';
+    };
+    dhcpSimple = {
+      name = "SimpleDHCP";
+      nodes.router = router;
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.vlans = [ 1 2 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.eth1 = {
+            ipv4.addresses = mkOverride 0 [ ];
+            ipv6.addresses = mkOverride 0 [ ];
+            useDHCP = true;
+          };
+          interfaces.eth2 = {
+            ipv4.addresses = mkOverride 0 [ ];
+            ipv6.addresses = mkOverride 0 [ ];
+            useDHCP = true;
+          };
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          client.wait_for_unit("network.target")
+          router.wait_for_unit("network-online.target")
+
+          with subtest("Wait until we have an ip address on each interface"):
+              client.wait_until_succeeds("ip addr show dev eth1 | grep -q '192.168.1'")
+              client.wait_until_succeeds("ip addr show dev eth1 | grep -q 'fd00:1234:5678:1:'")
+              client.wait_until_succeeds("ip addr show dev eth2 | grep -q '192.168.2'")
+              client.wait_until_succeeds("ip addr show dev eth2 | grep -q 'fd00:1234:5678:2:'")
+
+          with subtest("Test vlan 1"):
+              client.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::1")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+              router.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::1")
+              router.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::2")
+
+          with subtest("Test vlan 2"):
+              client.wait_until_succeeds("ping -c 1 192.168.2.1")
+              client.wait_until_succeeds("ping -c 1 192.168.2.2")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:2::1")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:2::2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.2.1")
+              router.wait_until_succeeds("ping -c 1 192.168.2.2")
+              router.wait_until_succeeds("ping -c 1 fd00:1234:5678:2::1")
+              router.wait_until_succeeds("ping -c 1 fd00:1234:5678:2::2")
+        '';
+    };
+    dhcpOneIf = {
+      name = "OneInterfaceDHCP";
+      nodes.router = router;
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.vlans = [ 1 2 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.eth1 = {
+            ipv4.addresses = mkOverride 0 [ ];
+            mtu = 1343;
+            useDHCP = true;
+          };
+          interfaces.eth2.ipv4.addresses = mkOverride 0 [ ];
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to come up"):
+              client.wait_for_unit("network.target")
+              router.wait_for_unit("network.target")
+
+          with subtest("Wait until we have an ip address on each interface"):
+              client.wait_until_succeeds("ip addr show dev eth1 | grep -q '192.168.1'")
+
+          with subtest("ensure MTU is set"):
+              assert "mtu 1343" in client.succeed("ip link show dev eth1")
+
+          with subtest("Test vlan 1"):
+              client.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client.wait_until_succeeds("ping -c 1 192.168.1.2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+
+          with subtest("Test vlan 2"):
+              client.wait_until_succeeds("ping -c 1 192.168.2.1")
+              client.fail("ping -c 1 192.168.2.2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.2.1")
+              router.fail("ping -c 1 192.168.2.2")
+        '';
+    };
+    bond = let
+      node = address: { pkgs, ... }: with pkgs.lib; {
+        virtualisation.vlans = [ 1 2 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          bonds.bond = {
+            interfaces = [ "eth1" "eth2" ];
+            driverOptions.mode = "balance-rr";
+          };
+          interfaces.eth1.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.eth2.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.bond.ipv4.addresses = mkOverride 0
+            [ { inherit address; prefixLength = 30; } ];
+        };
+      };
+    in {
+      name = "Bond";
+      nodes.client1 = node "192.168.1.1";
+      nodes.client2 = node "192.168.1.2";
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to come up"):
+              client1.wait_for_unit("network.target")
+              client2.wait_for_unit("network.target")
+
+          with subtest("Test bonding"):
+              client1.wait_until_succeeds("ping -c 2 192.168.1.1")
+              client1.wait_until_succeeds("ping -c 2 192.168.1.2")
+
+              client2.wait_until_succeeds("ping -c 2 192.168.1.1")
+              client2.wait_until_succeeds("ping -c 2 192.168.1.2")
+        '';
+    };
+    bridge = let
+      node = { address, vlan }: { pkgs, ... }: with pkgs.lib; {
+        virtualisation.vlans = [ vlan ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.eth1.ipv4.addresses = mkOverride 0
+            [ { inherit address; prefixLength = 24; } ];
+        };
+      };
+    in {
+      name = "Bridge";
+      nodes.client1 = node { address = "192.168.1.2"; vlan = 1; };
+      nodes.client2 = node { address = "192.168.1.3"; vlan = 2; };
+      nodes.router = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.vlans = [ 1 2 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          bridges.bridge.interfaces = [ "eth1" "eth2" ];
+          interfaces.eth1.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.eth2.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.bridge.ipv4.addresses = mkOverride 0
+            [ { address = "192.168.1.1"; prefixLength = 24; } ];
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to come up"):
+              for machine in client1, client2, router:
+                  machine.wait_for_unit("network.target")
+
+          with subtest("Test bridging"):
+              client1.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client1.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client1.wait_until_succeeds("ping -c 1 192.168.1.3")
+
+              client2.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client2.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client2.wait_until_succeeds("ping -c 1 192.168.1.3")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+              router.wait_until_succeeds("ping -c 1 192.168.1.3")
+        '';
+    };
+    macvlan = {
+      name = "MACVLAN";
+      nodes.router = router;
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        environment.systemPackages = [ pkgs.iptables ]; # to debug firewall rules
+        virtualisation.vlans = [ 1 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          firewall.logReversePathDrops = true; # to debug firewall rules
+          # reverse path filtering rules for the macvlan interface seem
+          # to be incorrect, causing the test to fail. Disable temporarily.
+          firewall.checkReversePath = false;
+          macvlans.macvlan.interface = "eth1";
+          interfaces.eth1 = {
+            ipv4.addresses = mkOverride 0 [ ];
+            useDHCP = true;
+          };
+          interfaces.macvlan = {
+            useDHCP = true;
+          };
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to come up"):
+              client.wait_for_unit("network.target")
+              router.wait_for_unit("network.target")
+
+          with subtest("Wait until we have an ip address on each interface"):
+              client.wait_until_succeeds("ip addr show dev eth1 | grep -q '192.168.1'")
+              client.wait_until_succeeds("ip addr show dev macvlan | grep -q '192.168.1'")
+
+          with subtest("Print lots of diagnostic information"):
+              router.log("**********************************************")
+              router.succeed("ip addr >&2")
+              router.succeed("ip route >&2")
+              router.execute("iptables-save >&2")
+              client.log("==============================================")
+              client.succeed("ip addr >&2")
+              client.succeed("ip route >&2")
+              client.execute("iptables-save >&2")
+              client.log("##############################################")
+
+          with subtest("Test macvlan creates routable ips"):
+              client.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client.wait_until_succeeds("ping -c 1 192.168.1.3")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+              router.wait_until_succeeds("ping -c 1 192.168.1.3")
+        '';
+    };
+    sit = let
+      node = { address4, remote, address6 }: { pkgs, ... }: with pkgs.lib; {
+        virtualisation.vlans = [ 1 ];
+        networking = {
+          useNetworkd = networkd;
+          firewall.enable = false;
+          useDHCP = false;
+          sits.sit = {
+            inherit remote;
+            local = address4;
+            dev = "eth1";
+          };
+          interfaces.eth1.ipv4.addresses = mkOverride 0
+            [ { address = address4; prefixLength = 24; } ];
+          interfaces.sit.ipv6.addresses = mkOverride 0
+            [ { address = address6; prefixLength = 64; } ];
+        };
+      };
+    in {
+      name = "Sit";
+      nodes.client1 = node { address4 = "192.168.1.1"; remote = "192.168.1.2"; address6 = "fc00::1"; };
+      nodes.client2 = node { address4 = "192.168.1.2"; remote = "192.168.1.1"; address6 = "fc00::2"; };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to be configured"):
+              client1.wait_for_unit("network.target")
+              client2.wait_for_unit("network.target")
+
+              # Print diagnostic information
+              client1.succeed("ip addr >&2")
+              client2.succeed("ip addr >&2")
+
+          with subtest("Test ipv6"):
+              client1.wait_until_succeeds("ping -c 1 fc00::1")
+              client1.wait_until_succeeds("ping -c 1 fc00::2")
+
+              client2.wait_until_succeeds("ping -c 1 fc00::1")
+              client2.wait_until_succeeds("ping -c 1 fc00::2")
+        '';
+    };
+    vlan = let
+      node = address: { pkgs, ... }: with pkgs.lib; {
+        #virtualisation.vlans = [ 1 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          vlans.vlan = {
+            id = 1;
+            interface = "eth0";
+          };
+          interfaces.eth0.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.eth1.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.vlan.ipv4.addresses = mkOverride 0
+            [ { inherit address; prefixLength = 24; } ];
+        };
+      };
+    in {
+      name = "vlan";
+      nodes.client1 = node "192.168.1.1";
+      nodes.client2 = node "192.168.1.2";
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to be configured"):
+              client1.wait_for_unit("network.target")
+              client2.wait_for_unit("network.target")
+
+          with subtest("Test vlan is setup"):
+              client1.succeed("ip addr show dev vlan >&2")
+              client2.succeed("ip addr show dev vlan >&2")
+        '';
+    };
+    virtual = {
+      name = "Virtual";
+      machine = {
+        networking.useNetworkd = networkd;
+        networking.useDHCP = false;
+        networking.interfaces.tap0 = {
+          ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+          ipv6.addresses = [ { address = "2001:1470:fffd:2096::"; prefixLength = 64; } ];
+          virtual = true;
+          mtu = 1342;
+          macAddress = "02:de:ad:be:ef:01";
+        };
+        networking.interfaces.tun0 = {
+          ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
+          ipv6.addresses = [ { address = "2001:1470:fffd:2097::"; prefixLength = 64; } ];
+          virtual = true;
+          mtu = 1343;
+        };
+      };
+
+      testScript = ''
+        targetList = """
+        tap0: tap persist user 0
+        tun0: tun persist user 0
+        """.strip()
+
+        with subtest("Wait for networking to come up"):
+            machine.start()
+            machine.wait_for_unit("network.target")
+
+        with subtest("Test interfaces set up"):
+            list = machine.succeed("ip tuntap list | sort").strip()
+            assert (
+                list == targetList
+            ), """
+            The list of virtual interfaces does not match the expected one:
+            Result:
+              {}
+            Expected:
+              {}
+            """.format(
+                list, targetList
+            )
+        with subtest("Test MTU and MAC Address are configured"):
+            assert "mtu 1342" in machine.succeed("ip link show dev tap0")
+            assert "mtu 1343" in machine.succeed("ip link show dev tun0")
+            assert "02:de:ad:be:ef:01" in machine.succeed("ip link show dev tap0")
+      '' # network-addresses-* only exist in scripted networking
+      + optionalString (!networkd) ''
+        with subtest("Test interfaces clean up"):
+            machine.succeed("systemctl stop network-addresses-tap0")
+            machine.sleep(10)
+            machine.succeed("systemctl stop network-addresses-tun0")
+            machine.sleep(10)
+            residue = machine.succeed("ip tuntap list")
+            assert (
+                residue is ""
+            ), "Some virtual interface has not been properly cleaned:\n{}".format(residue)
+      '';
+    };
+    privacy = {
+      name = "Privacy";
+      nodes.router = { ... }: {
+        virtualisation.vlans = [ 1 ];
+        boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.eth1.ipv6.addresses = singleton {
+            address = "fd00:1234:5678:1::1";
+            prefixLength = 64;
+          };
+        };
+        services.radvd = {
+          enable = true;
+          config = ''
+            interface eth1 {
+              AdvSendAdvert on;
+              AdvManagedFlag on;
+              AdvOtherConfigFlag on;
+
+              prefix fd00:1234:5678:1::/64 {
+                AdvAutonomous on;
+                AdvOnLink on;
+              };
+            };
+          '';
+        };
+      };
+      nodes.client_with_privacy = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.vlans = [ 1 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.eth1 = {
+            tempAddress = "default";
+            ipv4.addresses = mkOverride 0 [ ];
+            ipv6.addresses = mkOverride 0 [ ];
+            useDHCP = true;
+          };
+        };
+      };
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.vlans = [ 1 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.eth1 = {
+            tempAddress = "enabled";
+            ipv4.addresses = mkOverride 0 [ ];
+            ipv6.addresses = mkOverride 0 [ ];
+            useDHCP = true;
+          };
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          client.wait_for_unit("network.target")
+          client_with_privacy.wait_for_unit("network.target")
+          router.wait_for_unit("network-online.target")
+
+          with subtest("Wait until we have an ip address"):
+              client_with_privacy.wait_until_succeeds(
+                  "ip addr show dev eth1 | grep -q 'fd00:1234:5678:1:'"
+              )
+              client.wait_until_succeeds("ip addr show dev eth1 | grep -q 'fd00:1234:5678:1:'")
+
+          with subtest("Test vlan 1"):
+              client_with_privacy.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::1")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::1")
+
+          with subtest("Test address used is temporary"):
+              client_with_privacy.wait_until_succeeds(
+                  "! ip route get fd00:1234:5678:1::1 | grep -q ':[a-f0-9]*ff:fe[a-f0-9]*:'"
+              )
+
+          with subtest("Test address used is EUI-64"):
+              client.wait_until_succeeds(
+                  "ip route get fd00:1234:5678:1::1 | grep -q ':[a-f0-9]*ff:fe[a-f0-9]*:'"
+              )
+        '';
+    };
+    routes = {
+      name = "routes";
+      machine = {
+        networking.useDHCP = false;
+        networking.interfaces.eth0 = {
+          ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
+          ipv6.addresses = [ { address = "2001:1470:fffd:2097::"; prefixLength = 64; } ];
+          ipv6.routes = [
+            { address = "fdfd:b3f0::"; prefixLength = 48; }
+            { address = "2001:1470:fffd:2098::"; prefixLength = 64; via = "fdfd:b3f0::1"; }
+          ];
+          ipv4.routes = [
+            { address = "10.0.0.0"; prefixLength = 16; options = { mtu = "1500"; }; }
+            { address = "192.168.2.0"; prefixLength = 24; via = "192.168.1.1"; }
+          ];
+        };
+        virtualisation.vlans = [ ];
+      };
+
+      testScript = ''
+        targetIPv4Table = [
+            "10.0.0.0/16 proto static scope link mtu 1500",
+            "192.168.1.0/24 proto kernel scope link src 192.168.1.2",
+            "192.168.2.0/24 via 192.168.1.1 proto static",
+        ]
+
+        targetIPv6Table = [
+            "2001:1470:fffd:2097::/64 proto kernel metric 256 pref medium",
+            "2001:1470:fffd:2098::/64 via fdfd:b3f0::1 proto static metric 1024 pref medium",
+            "fdfd:b3f0::/48 proto static metric 1024 pref medium",
+        ]
+
+        machine.start()
+        machine.wait_for_unit("network.target")
+
+        with subtest("test routing tables"):
+            ipv4Table = machine.succeed("ip -4 route list dev eth0 | head -n3").strip()
+            ipv6Table = machine.succeed("ip -6 route list dev eth0 | head -n3").strip()
+            assert [
+                l.strip() for l in ipv4Table.splitlines()
+            ] == targetIPv4Table, """
+              The IPv4 routing table does not match the expected one:
+                Result:
+                  {}
+                Expected:
+                  {}
+              """.format(
+                ipv4Table, targetIPv4Table
+            )
+            assert [
+                l.strip() for l in ipv6Table.splitlines()
+            ] == targetIPv6Table, """
+              The IPv6 routing table does not match the expected one:
+                Result:
+                  {}
+                Expected:
+                  {}
+              """.format(
+                ipv6Table, targetIPv6Table
+            )
+
+        with subtest("test clean-up of the tables"):
+            machine.succeed("systemctl stop network-addresses-eth0")
+            ipv4Residue = machine.succeed("ip -4 route list dev eth0 | head -n-3").strip()
+            ipv6Residue = machine.succeed("ip -6 route list dev eth0 | head -n-3").strip()
+            assert (
+                ipv4Residue is ""
+            ), "The IPv4 routing table has not been properly cleaned:\n{}".format(ipv4Residue)
+            assert (
+                ipv6Residue is ""
+            ), "The IPv6 routing table has not been properly cleaned:\n{}".format(ipv6Residue)
+      '';
+    };
+    # even with disabled networkd, systemd.network.links should work
+    # (as it's handled by udev, not networkd)
+    link = {
+      name = "Link";
+      nodes.client = { pkgs, ... }: {
+        virtualisation.vlans = [ 1 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+        };
+        systemd.network.links."50-foo" = {
+          matchConfig = {
+            Name = "foo";
+            Driver = "dummy";
+          };
+          linkConfig.MTUBytes = "1442";
+        };
+      };
+      testScript = ''
+        print(client.succeed("ip l add name foo type dummy"))
+        print(client.succeed("stat /etc/systemd/network/50-foo.link"))
+        client.succeed("udevadm settle")
+        assert "mtu 1442" in client.succeed("ip l show dummy0")
+      '';
+    };
+  };
+
+in mapAttrs (const (attrs: makeTest (attrs // {
+  name = "${attrs.name}-Networking-${if networkd then "Networkd" else "Scripted"}";
+}))) testCases
diff --git a/nixpkgs/nixos/tests/nextcloud/basic.nix b/nixpkgs/nixos/tests/nextcloud/basic.nix
new file mode 100644
index 000000000000..92ac5c46e8f0
--- /dev/null
+++ b/nixpkgs/nixos/tests/nextcloud/basic.nix
@@ -0,0 +1,64 @@
+import ../make-test-python.nix ({ pkgs, ...}: let
+  adminpass = "notproduction";
+  adminuser = "root";
+in {
+  name = "nextcloud-basic";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ globin eqyiel ];
+  };
+
+  nodes = {
+    # The only thing the client needs to do is download a file.
+    client = { ... }: {};
+
+    nextcloud = { config, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.nextcloud = {
+        enable = true;
+        nginx.enable = true;
+        hostName = "nextcloud";
+        config = {
+          # Don't inherit adminuser since "root" is supposed to be the default
+          inherit adminpass;
+        };
+        autoUpdateApps = {
+          enable = true;
+          startAt = "20:00";
+        };
+      };
+    };
+  };
+
+  testScript = let
+    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
+      #!${pkgs.runtimeShell}
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/webdav/"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
+      "''${@}"
+    '';
+    copySharedFile = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'hi' | ${withRcloneEnv} ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
+    '';
+
+    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
+      #!${pkgs.runtimeShell}
+      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
+    '';
+  in ''
+    start_all()
+    nextcloud.wait_for_unit("multi-user.target")
+    nextcloud.succeed("curl -sSf http://nextcloud/login")
+    nextcloud.succeed(
+        "${withRcloneEnv} ${copySharedFile}"
+    )
+    client.wait_for_unit("multi-user.target")
+    client.succeed(
+        "${withRcloneEnv} ${diffSharedFile}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nextcloud/default.nix b/nixpkgs/nixos/tests/nextcloud/default.nix
new file mode 100644
index 000000000000..e4c7a70606cf
--- /dev/null
+++ b/nixpkgs/nixos/tests/nextcloud/default.nix
@@ -0,0 +1,9 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; }
+}:
+{
+  basic = import ./basic.nix { inherit system pkgs; };
+  with-postgresql-and-redis = import ./with-postgresql-and-redis.nix { inherit system pkgs; };
+  with-mysql-and-memcached = import ./with-mysql-and-memcached.nix { inherit system pkgs; };
+}
diff --git a/nixpkgs/nixos/tests/nextcloud/with-mysql-and-memcached.nix b/nixpkgs/nixos/tests/nextcloud/with-mysql-and-memcached.nix
new file mode 100644
index 000000000000..8db630be893a
--- /dev/null
+++ b/nixpkgs/nixos/tests/nextcloud/with-mysql-and-memcached.nix
@@ -0,0 +1,100 @@
+import ../make-test-python.nix ({ pkgs, ...}: let
+  adminpass = "hunter2";
+  adminuser = "root";
+in {
+  name = "nextcloud-with-mysql-and-memcached";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eqyiel ];
+  };
+
+  nodes = {
+    # The only thing the client needs to do is download a file.
+    client = { ... }: {};
+
+    nextcloud = { config, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.nextcloud = {
+        enable = true;
+        hostName = "nextcloud";
+        nginx.enable = true;
+        https = true;
+        caching = {
+          apcu = true;
+          redis = false;
+          memcached = true;
+        };
+        config = {
+          dbtype = "mysql";
+          dbname = "nextcloud";
+          dbuser = "nextcloud";
+          dbhost = "127.0.0.1";
+          dbport = 3306;
+          dbpass = "hunter2";
+          # Don't inherit adminuser since "root" is supposed to be the default
+          inherit adminpass;
+        };
+      };
+
+      services.mysql = {
+        enable = true;
+        bind = "127.0.0.1";
+        package = pkgs.mariadb;
+        initialScript = pkgs.writeText "mysql-init" ''
+          CREATE USER 'nextcloud'@'localhost' IDENTIFIED BY 'hunter2';
+          CREATE DATABASE IF NOT EXISTS nextcloud;
+          GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, INDEX, ALTER,
+            CREATE TEMPORARY TABLES ON nextcloud.* TO 'nextcloud'@'localhost'
+            IDENTIFIED BY 'hunter2';
+          FLUSH privileges;
+        '';
+      };
+
+      systemd.services.nextcloud-setup= {
+        requires = ["mysql.service"];
+        after = ["mysql.service"];
+      };
+
+      services.memcached.enable = true;
+    };
+  };
+
+  testScript = let
+    configureMemcached = pkgs.writeScript "configure-memcached" ''
+      #!${pkgs.runtimeShell}
+      nextcloud-occ config:system:set memcached_servers 0 0 --value 127.0.0.1 --type string
+      nextcloud-occ config:system:set memcached_servers 0 1 --value 11211 --type integer
+      nextcloud-occ config:system:set memcache.local --value '\OC\Memcache\APCu' --type string
+      nextcloud-occ config:system:set memcache.distributed --value '\OC\Memcache\Memcached' --type string
+    '';
+    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
+      #!${pkgs.runtimeShell}
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/webdav/"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
+    '';
+    copySharedFile = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'hi' | ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
+    '';
+
+    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
+      #!${pkgs.runtimeShell}
+      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
+    '';
+  in ''
+    start_all()
+    nextcloud.wait_for_unit("multi-user.target")
+    nextcloud.succeed("${configureMemcached}")
+    nextcloud.succeed("curl -sSf http://nextcloud/login")
+    nextcloud.succeed(
+        "${withRcloneEnv} ${copySharedFile}"
+    )
+    client.wait_for_unit("multi-user.target")
+    client.succeed(
+        "${withRcloneEnv} ${diffSharedFile}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix b/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix
new file mode 100644
index 000000000000..95219cac9be8
--- /dev/null
+++ b/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix
@@ -0,0 +1,100 @@
+import ../make-test-python.nix ({ pkgs, ...}: let
+  adminpass = "hunter2";
+  adminuser = "custom-admin-username";
+in {
+  name = "nextcloud-with-postgresql-and-redis";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eqyiel ];
+  };
+
+  nodes = {
+    # The only thing the client needs to do is download a file.
+    client = { ... }: {};
+
+    nextcloud = { config, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.nextcloud = {
+        enable = true;
+        hostName = "nextcloud";
+        nginx.enable = true;
+        caching = {
+          apcu = false;
+          redis = true;
+          memcached = false;
+        };
+        config = {
+          dbtype = "pgsql";
+          dbname = "nextcloud";
+          dbuser = "nextcloud";
+          dbhost = "/run/postgresql";
+          inherit adminuser;
+          adminpassFile = toString (pkgs.writeText "admin-pass-file" ''
+            ${adminpass}
+          '');
+        };
+      };
+
+      services.redis = {
+        enable = true;
+      };
+
+      systemd.services.nextcloud-setup= {
+        requires = ["postgresql.service"];
+        after = [
+          "postgresql.service"
+        ];
+      };
+
+      services.postgresql = {
+        enable = true;
+        ensureDatabases = [ "nextcloud" ];
+        ensureUsers = [
+          { name = "nextcloud";
+            ensurePermissions."DATABASE nextcloud" = "ALL PRIVILEGES";
+          }
+        ];
+      };
+    };
+  };
+
+  testScript = let
+    configureRedis = pkgs.writeScript "configure-redis" ''
+      #!${pkgs.runtimeShell}
+      nextcloud-occ config:system:set redis 'host' --value 'localhost' --type string
+      nextcloud-occ config:system:set redis 'port' --value 6379 --type integer
+      nextcloud-occ config:system:set memcache.local --value '\OC\Memcache\Redis' --type string
+      nextcloud-occ config:system:set memcache.locking --value '\OC\Memcache\Redis' --type string
+    '';
+    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
+      #!${pkgs.runtimeShell}
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/webdav/"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
+      "''${@}"
+    '';
+    copySharedFile = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'hi' | ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
+    '';
+
+    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
+      #!${pkgs.runtimeShell}
+      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
+    '';
+  in ''
+    start_all()
+    nextcloud.wait_for_unit("multi-user.target")
+    nextcloud.succeed("${configureRedis}")
+    nextcloud.succeed("curl -sSf http://nextcloud/login")
+    nextcloud.succeed(
+        "${withRcloneEnv} ${copySharedFile}"
+    )
+    client.wait_for_unit("multi-user.target")
+    client.succeed(
+        "${withRcloneEnv} ${diffSharedFile}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nexus.nix b/nixpkgs/nixos/tests/nexus.nix
new file mode 100644
index 000000000000..1ec5c40476a6
--- /dev/null
+++ b/nixpkgs/nixos/tests/nexus.nix
@@ -0,0 +1,32 @@
+# verifies:
+#   1. nexus service starts on server
+#   2. nexus service can startup on server (creating database and all other initial stuff)
+#   3. the web application is reachable via HTTP
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "nexus";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ironpinguin ma27 ];
+  };
+
+  nodes = {
+
+    server =
+      { ... }:
+      { virtualisation.memorySize = 2047; # qemu-system-i386 has a 2047M limit
+        virtualisation.diskSize = 8192;
+
+        services.nexus.enable = true;
+      };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("nexus")
+    server.wait_for_open_port(8081)
+
+    server.succeed("curl -f 127.0.0.1:8081")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nfs/default.nix b/nixpkgs/nixos/tests/nfs/default.nix
new file mode 100644
index 000000000000..6bc803c91b46
--- /dev/null
+++ b/nixpkgs/nixos/tests/nfs/default.nix
@@ -0,0 +1,9 @@
+{ version ? 4
+, system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+}: {
+  simple = import ./simple.nix { inherit version system pkgs; };
+} // pkgs.lib.optionalAttrs (version == 4) {
+  # TODO: Test kerberos + nfsv3
+  kerberos = import ./kerberos.nix { inherit version system pkgs; };
+}
diff --git a/nixpkgs/nixos/tests/nfs/kerberos.nix b/nixpkgs/nixos/tests/nfs/kerberos.nix
new file mode 100644
index 000000000000..1f2d0d453ea0
--- /dev/null
+++ b/nixpkgs/nixos/tests/nfs/kerberos.nix
@@ -0,0 +1,133 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }:
+
+with lib;
+
+let
+  krb5 = 
+    { enable = true;
+      domain_realm."nfs.test"   = "NFS.TEST";
+      libdefaults.default_realm = "NFS.TEST";
+      realms."NFS.TEST" =
+        { admin_server = "server.nfs.test";
+          kdc = "server.nfs.test";
+        };
+    };
+
+  hosts =
+    ''
+      192.168.1.1 client.nfs.test
+      192.168.1.2 server.nfs.test
+    '';
+
+  users = {
+    users.alice = {
+        isNormalUser = true;
+        name = "alice";
+        uid = 1000;
+      };
+  };
+
+in
+
+{
+  name = "nfsv4-with-kerberos";
+ 
+  nodes = {
+    client = { lib, ... }:
+      { inherit krb5 users;
+
+        networking.extraHosts = hosts;
+        networking.domain = "nfs.test";
+        networking.hostName = "client";
+
+        fileSystems = lib.mkVMOverride
+          { "/data" = {
+              device  = "server.nfs.test:/";
+              fsType  = "nfs";
+              options = [ "nfsvers=4" "sec=krb5p" "noauto" ];
+            };
+          };
+      };
+
+    server = { lib, ...}:
+      { inherit krb5 users;
+
+        networking.extraHosts = hosts;
+        networking.domain = "nfs.test";
+        networking.hostName = "server";
+
+        networking.firewall.allowedTCPPorts = [
+          111  # rpc
+          2049 # nfs
+          88   # kerberos
+          749  # kerberos admin
+        ];
+
+        services.kerberos_server.enable = true;
+        services.kerberos_server.realms =
+          { "NFS.TEST".acl =
+            [ { access = "all"; principal = "admin/admin"; } ];
+          };
+
+        services.nfs.server.enable = true;
+        services.nfs.server.createMountPoints = true;
+        services.nfs.server.exports =
+          ''
+            /data *(rw,no_root_squash,fsid=0,sec=krb5p)
+          '';
+      };
+  };
+
+  testScript =
+    ''
+      server.succeed("mkdir -p /data/alice")
+      server.succeed("chown alice:users /data/alice")
+
+      # set up kerberos database
+      server.succeed(
+          "kdb5_util create -s -r NFS.TEST -P master_key",
+          "systemctl restart kadmind.service kdc.service",
+      )
+      server.wait_for_unit(f"kadmind.service")
+      server.wait_for_unit(f"kdc.service")
+
+      # create principals
+      server.succeed(
+          "kadmin.local add_principal -randkey nfs/server.nfs.test",
+          "kadmin.local add_principal -randkey nfs/client.nfs.test",
+          "kadmin.local add_principal -pw admin_pw admin/admin",
+          "kadmin.local add_principal -pw alice_pw alice",
+      )
+
+      # add principals to server keytab
+      server.succeed("kadmin.local ktadd nfs/server.nfs.test")
+      server.succeed("systemctl start rpc-gssd.service rpc-svcgssd.service")
+      server.wait_for_unit(f"rpc-gssd.service")
+      server.wait_for_unit(f"rpc-svcgssd.service")
+
+      client.wait_for_unit("network-online.target")
+
+      # add principals to client keytab
+      client.succeed("echo admin_pw | kadmin -p admin/admin ktadd nfs/client.nfs.test")
+      client.succeed("systemctl start rpc-gssd.service")
+      client.wait_for_unit("rpc-gssd.service")
+
+      with subtest("nfs share mounts"):
+          client.succeed("systemctl restart data.mount")
+          client.wait_for_unit("data.mount")
+
+      with subtest("permissions on nfs share are enforced"):
+          client.fail("su alice -c 'ls /data'")
+          client.succeed("su alice -c 'echo alice_pw | kinit'")
+          client.succeed("su alice -c 'ls /data'")
+
+          client.fail("su alice -c 'echo bla >> /data/foo'")
+          client.succeed("su alice -c 'echo bla >> /data/alice/foo'")
+          server.succeed("test -e /data/alice/foo")
+
+      with subtest("uids/gids are mapped correctly on nfs share"):
+          ids = client.succeed("stat -c '%U %G' /data/alice").split()
+          expected = ["alice", "users"]
+          assert ids == expected, f"ids incorrect: got {ids} expected {expected}"
+    '';
+})
diff --git a/nixpkgs/nixos/tests/nfs/simple.nix b/nixpkgs/nixos/tests/nfs/simple.nix
new file mode 100644
index 000000000000..c49ebddc2fdd
--- /dev/null
+++ b/nixpkgs/nixos/tests/nfs/simple.nix
@@ -0,0 +1,94 @@
+import ../make-test-python.nix ({ pkgs, version ? 4, ... }:
+
+let
+
+  client =
+    { pkgs, ... }:
+    { fileSystems = pkgs.lib.mkVMOverride
+        { "/data" =
+           { # nfs4 exports the export with fsid=0 as a virtual root directory
+             device = if (version == 4) then "server:/" else "server:/data";
+             fsType = "nfs";
+             options = [ "vers=${toString version}" ];
+           };
+        };
+      networking.firewall.enable = false; # FIXME: only open statd
+    };
+
+in
+
+{
+  name = "nfs";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes =
+    { client1 = client;
+      client2 = client;
+
+      server =
+        { ... }:
+        { services.nfs.server.enable = true;
+          services.nfs.server.exports =
+            ''
+              /data 192.168.1.0/255.255.255.0(rw,no_root_squash,no_subtree_check,fsid=0)
+            '';
+          services.nfs.server.createMountPoints = true;
+          networking.firewall.enable = false; # FIXME: figure out what ports need to be allowed
+        };
+    };
+
+  testScript =
+    ''
+      import time
+
+      server.wait_for_unit("nfs-server")
+      server.succeed("systemctl start network-online.target")
+      server.wait_for_unit("network-online.target")
+
+      start_all()
+
+      client1.wait_for_unit("data.mount")
+      client1.succeed("echo bla > /data/foo")
+      server.succeed("test -e /data/foo")
+
+      client2.wait_for_unit("data.mount")
+      client2.succeed("echo bla > /data/bar")
+      server.succeed("test -e /data/bar")
+
+      with subtest("restarting 'nfs-server' works correctly"):
+          server.succeed("systemctl restart nfs-server")
+          # will take 90 seconds due to the NFS grace period
+          client2.succeed("echo bla >> /data/bar")
+
+      with subtest("can get a lock"):
+          client2.succeed("time flock -n -s /data/lock true")
+
+      with subtest("client 2 fails to acquire lock held by client 1"):
+          client1.succeed("flock -x /data/lock -c 'touch locked; sleep 100000' &")
+          client1.wait_for_file("locked")
+          client2.fail("flock -n -s /data/lock true")
+
+      with subtest("client 2 obtains lock after resetting client 1"):
+          client2.succeed(
+              "flock -x /data/lock -c 'echo acquired; touch locked; sleep 100000' >&2 &"
+          )
+          client1.crash()
+          client1.start()
+          client2.wait_for_file("locked")
+
+      with subtest("locks survive server reboot"):
+          client1.wait_for_unit("data.mount")
+          server.shutdown()
+          server.start()
+          client1.succeed("touch /data/xyzzy")
+          client1.fail("time flock -n -s /data/lock true")
+
+      with subtest("unmounting during shutdown happens quickly"):
+          t1 = time.monotonic()
+          client1.shutdown()
+          duration = time.monotonic() - t1
+          assert duration < 30, f"shutdown took too long ({duration} seconds)"
+    '';
+})
diff --git a/nixpkgs/nixos/tests/nghttpx.nix b/nixpkgs/nixos/tests/nghttpx.nix
new file mode 100644
index 000000000000..d83c1c4cae63
--- /dev/null
+++ b/nixpkgs/nixos/tests/nghttpx.nix
@@ -0,0 +1,61 @@
+let
+  nginxRoot = "/run/nginx";
+in
+  import ./make-test-python.nix ({...}: {
+    name  = "nghttpx";
+    nodes = {
+      webserver = {
+        networking.firewall.allowedTCPPorts = [ 80 ];
+        systemd.services.nginx = {
+          preStart = ''
+            mkdir -p ${nginxRoot}
+            echo "Hello world!" > ${nginxRoot}/hello-world.txt
+          '';
+        };
+
+        services.nginx = {
+          enable = true;
+          virtualHosts.server = {
+            locations."/".root = nginxRoot;
+          };
+        };
+      };
+
+      proxy = {
+        networking.firewall.allowedTCPPorts = [ 80 ];
+        services.nghttpx = {
+          enable = true;
+          frontends = [
+            { server = {
+                host = "*";
+                port = 80;
+              };
+
+              params = {
+                tls = "no-tls";
+              };
+            }
+          ];
+          backends = [
+            { server = {
+                host = "webserver";
+                port = 80;
+              };
+              patterns = [ "/" ];
+              params.proto = "http/1.1";
+            }
+          ];
+        };
+      };
+
+      client = {};
+    };
+
+    testScript = ''
+      start_all()
+
+      webserver.wait_for_open_port("80")
+      proxy.wait_for_open_port("80")
+      client.wait_until_succeeds("curl -s --fail http://proxy/hello-world.txt")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/nginx-etag.nix b/nixpkgs/nixos/tests/nginx-etag.nix
new file mode 100644
index 000000000000..63ab2e0c6c27
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-etag.nix
@@ -0,0 +1,89 @@
+import ./make-test-python.nix {
+  name = "nginx-etag";
+
+  nodes = {
+    server = { pkgs, lib, ... }: {
+      networking.firewall.enable = false;
+      services.nginx.enable = true;
+      services.nginx.virtualHosts.server = {
+        root = pkgs.runCommandLocal "testdir" {} ''
+          mkdir "$out"
+          cat > "$out/test.js" <<EOF
+          document.getElementById('foobar').setAttribute('foo', 'bar');
+          EOF
+          cat > "$out/index.html" <<EOF
+          <!DOCTYPE html>
+          <div id="foobar">test</div>
+          <script src="test.js"></script>
+          EOF
+        '';
+      };
+
+      specialisation.pass-checks.configuration = {
+        services.nginx.virtualHosts.server = {
+          root = lib.mkForce (pkgs.runCommandLocal "testdir2" {} ''
+            mkdir "$out"
+            cat > "$out/test.js" <<EOF
+            document.getElementById('foobar').setAttribute('foo', 'yay');
+            EOF
+            cat > "$out/index.html" <<EOF
+            <!DOCTYPE html>
+            <div id="foobar">test</div>
+            <script src="test.js"></script>
+            EOF
+          '');
+        };
+      };
+    };
+
+    client = { pkgs, lib, ... }: {
+      virtualisation.memorySize = 512;
+      environment.systemPackages = let
+        testRunner = pkgs.writers.writePython3Bin "test-runner" {
+          libraries = [ pkgs.python3Packages.selenium ];
+        } ''
+          import os
+          import time
+
+          from selenium.webdriver import Firefox
+          from selenium.webdriver.firefox.options import Options
+
+          options = Options()
+          options.add_argument('--headless')
+          driver = Firefox(options=options)
+
+          driver.implicitly_wait(20)
+          driver.get('http://server/')
+          driver.find_element_by_xpath('//div[@foo="bar"]')
+          open('/tmp/passed_stage1', 'w')
+
+          while not os.path.exists('/tmp/proceed'):
+              time.sleep(0.5)
+
+          driver.get('http://server/')
+          driver.find_element_by_xpath('//div[@foo="yay"]')
+          open('/tmp/passed', 'w')
+        '';
+      in [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    inherit (nodes.server.config.system.build) toplevel;
+    newSystem = "${toplevel}/specialisation/pass-checks";
+  in ''
+    start_all()
+
+    server.wait_for_unit("nginx.service")
+    client.wait_for_unit("multi-user.target")
+    client.execute("test-runner &")
+    client.wait_for_file("/tmp/passed_stage1")
+
+    server.succeed(
+        "${newSystem}/bin/switch-to-configuration test >&2"
+    )
+    client.succeed("touch /tmp/proceed")
+
+    client.wait_for_file("/tmp/passed")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/nginx-pubhtml.nix b/nixpkgs/nixos/tests/nginx-pubhtml.nix
new file mode 100644
index 000000000000..432913cb42d2
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-pubhtml.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix {
+  name = "nginx-pubhtml";
+
+  machine = { pkgs, ... }: {
+    services.nginx.enable = true;
+    services.nginx.virtualHosts.localhost = {
+      locations."~ ^/\\~([a-z0-9_]+)(/.*)?$".alias = "/home/$1/public_html$2";
+    };
+    users.users.foo.isNormalUser = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nginx")
+    machine.wait_for_open_port(80)
+    machine.succeed("chmod 0711 /home/foo")
+    machine.succeed("su -c 'mkdir -p /home/foo/public_html' foo")
+    machine.succeed("su -c 'echo bar > /home/foo/public_html/bar.txt' foo")
+    machine.succeed('test "$(curl -fvvv http://localhost/~foo/bar.txt)" = bar')
+  '';
+}
diff --git a/nixpkgs/nixos/tests/nginx-sso.nix b/nixpkgs/nixos/tests/nginx-sso.nix
new file mode 100644
index 000000000000..8834fc31c387
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-sso.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx-sso";
+  meta = {
+    maintainers = with pkgs.stdenv.lib.maintainers; [ delroth ];
+  };
+
+  machine = {
+    services.nginx.sso = {
+      enable = true;
+      configuration = {
+        listen = { addr = "127.0.0.1"; port = 8080; };
+
+        providers.token.tokens = {
+          myuser = "MyToken";
+        };
+
+        acl = {
+          rule_sets = [
+            {
+              rules = [ { field = "x-application"; equals = "MyApp"; } ];
+              allow = [ "myuser" ];
+            }
+          ];
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("nginx-sso.service")
+    machine.wait_for_open_port(8080)
+
+    with subtest("No valid user -> 401"):
+        machine.fail("curl -sSf http://localhost:8080/auth")
+
+    with subtest("Valid user but no matching ACL -> 403"):
+        machine.fail(
+            "curl -sSf -H 'Authorization: Token MyToken' http://localhost:8080/auth"
+        )
+
+    with subtest("Valid user and matching ACL -> 200"):
+        machine.succeed(
+            "curl -sSf -H 'Authorization: Token MyToken' -H 'X-Application: MyApp' http://localhost:8080/auth"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx.nix b/nixpkgs/nixos/tests/nginx.nix
new file mode 100644
index 000000000000..18822f095688
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx.nix
@@ -0,0 +1,129 @@
+# verifies:
+#   1. nginx generates config file with shared http context definitions above
+#      generated virtual hosts config.
+#   2. whether the ETag header is properly generated whenever we're serving
+#      files in Nix store paths
+#   3. nginx doesn't restart on configuration changes (only reloads)
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mbbx6spp danbst ];
+  };
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.nginx.enable = true;
+      services.nginx.commonHttpConfig = ''
+        log_format ceeformat '@cee: {"status":"$status",'
+          '"request_time":$request_time,'
+          '"upstream_response_time":$upstream_response_time,'
+          '"pipe":"$pipe","bytes_sent":$bytes_sent,'
+          '"connection":"$connection",'
+          '"remote_addr":"$remote_addr",'
+          '"host":"$host",'
+          '"timestamp":"$time_iso8601",'
+          '"request":"$request",'
+          '"http_referer":"$http_referer",'
+          '"upstream_addr":"$upstream_addr"}';
+      '';
+      services.nginx.virtualHosts."0.my.test" = {
+        extraConfig = ''
+          access_log syslog:server=unix:/dev/log,facility=user,tag=mytag,severity=info ceeformat;
+          location /favicon.ico { allow all; access_log off; log_not_found off; }
+        '';
+      };
+
+      services.nginx.virtualHosts.localhost = {
+        root = pkgs.runCommand "testdir" {} ''
+          mkdir "$out"
+          echo hello world > "$out/index.html"
+        '';
+      };
+
+      services.nginx.enableReload = true;
+
+      specialisation.etagSystem.configuration = {
+        services.nginx.virtualHosts.localhost = {
+          root = lib.mkForce (pkgs.runCommand "testdir2" {} ''
+            mkdir "$out"
+            echo content changed > "$out/index.html"
+          '');
+        };
+      };
+
+      specialisation.justReloadSystem.configuration = {
+        services.nginx.virtualHosts."1.my.test".listen = [ { addr = "127.0.0.1"; port = 8080; }];
+      };
+
+      specialisation.reloadRestartSystem.configuration = {
+        services.nginx.package = pkgs.nginxUnstable;
+      };
+
+      specialisation.reloadWithErrorsSystem.configuration = {
+        services.nginx.package = pkgs.nginxUnstable;
+        services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    etagSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/etagSystem";
+    justReloadSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/justReloadSystem";
+    reloadRestartSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/reloadRestartSystem";
+    reloadWithErrorsSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/reloadWithErrorsSystem";
+  in ''
+    url = "http://localhost/index.html"
+
+
+    def check_etag():
+        etag = webserver.succeed(
+            f'curl -v {url} 2>&1 | sed -n -e "s/^< etag: *//ip"'
+        ).rstrip()
+        http_code = webserver.succeed(
+            f"curl -w '%{{http_code}}' --head --fail -H 'If-None-Match: {etag}' {url}"
+        )
+        assert http_code.split("\n")[-1] == "304"
+
+        return etag
+
+
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    with subtest("check ETag if serving Nix store paths"):
+        old_etag = check_etag()
+        webserver.succeed(
+            "${etagSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.sleep(1)
+        new_etag = check_etag()
+        assert old_etag != new_etag
+
+    with subtest("config is reloaded on nixos-rebuild switch"):
+        webserver.succeed(
+            "${justReloadSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.wait_for_open_port(8080)
+        webserver.fail("journalctl -u nginx | grep -q -i stopped")
+        webserver.succeed("journalctl -u nginx | grep -q -i reloaded")
+
+    with subtest("restart when nginx package changes"):
+        webserver.succeed(
+            "${reloadRestartSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.wait_for_unit("nginx")
+        webserver.succeed("journalctl -u nginx | grep -q -i stopped")
+
+    with subtest("nixos-rebuild --switch should fail when there are configuration errors"):
+        webserver.fail(
+            "${reloadWithErrorsSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.succeed("[[ $(systemctl is-failed nginx-config-reload) == failed ]]")
+        webserver.succeed("[[ $(systemctl is-failed nginx) == active ]]")
+        # just to make sure operation is idempotent. During development I had a situation
+        # when first time it shows error, but stops showing it on subsequent rebuilds
+        webserver.fail(
+            "${reloadWithErrorsSystem}/bin/switch-to-configuration test >&2"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nix-ssh-serve.nix b/nixpkgs/nixos/tests/nix-ssh-serve.nix
new file mode 100644
index 000000000000..03f83542c7c1
--- /dev/null
+++ b/nixpkgs/nixos/tests/nix-ssh-serve.nix
@@ -0,0 +1,45 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let inherit (import ./ssh-keys.nix pkgs)
+      snakeOilPrivateKey snakeOilPublicKey;
+    ssh-config = builtins.toFile "ssh.conf" ''
+      UserKnownHostsFile=/dev/null
+      StrictHostKeyChecking=no
+    '';
+in
+   { name = "nix-ssh-serve";
+     meta.maintainers = [ lib.maintainers.shlevy ];
+     nodes =
+       { server.nix.sshServe =
+           { enable = true;
+             keys = [ snakeOilPublicKey ];
+             protocol = "ssh-ng";
+           };
+         server.nix.package = pkgs.nix;
+         client.nix.package = pkgs.nix;
+       };
+     testScript = ''
+       start_all()
+
+       client.succeed("mkdir -m 700 /root/.ssh")
+       client.succeed(
+           "cat ${ssh-config} > /root/.ssh/config"
+       )
+       client.succeed(
+           "cat ${snakeOilPrivateKey} > /root/.ssh/id_ecdsa"
+       )
+       client.succeed("chmod 600 /root/.ssh/id_ecdsa")
+
+       client.succeed("nix-store --add /etc/machine-id > mach-id-path")
+
+       server.wait_for_unit("sshd")
+
+       client.fail("diff /root/other-store$(cat mach-id-path) /etc/machine-id")
+       # Currently due to shared store this is a noop :(
+       client.succeed("nix copy --to ssh-ng://nix-ssh@server $(cat mach-id-path)")
+       client.succeed(
+           "nix-store --realise $(cat mach-id-path) --store /root/other-store --substituters ssh-ng://nix-ssh@server"
+       )
+       client.succeed("diff /root/other-store$(cat mach-id-path) /etc/machine-id")
+     '';
+   }
+)
diff --git a/nixpkgs/nixos/tests/nixos-generate-config.nix b/nixpkgs/nixos/tests/nixos-generate-config.nix
new file mode 100644
index 000000000000..6c83ccecc70a
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixos-generate-config.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ lib, ... } : {
+  name = "nixos-generate-config";
+  meta.maintainers = with lib.maintainers; [ basvandijk ];
+  machine = {
+    system.nixos-generate-config.configuration = ''
+      # OVERRIDDEN
+      { config, pkgs, ... }: {
+        imports = [ ./hardware-configuration.nix ];
+      $bootLoaderConfig
+      }
+    '';
+  };
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("nixos-generate-config")
+
+    # Test if the configuration really is overridden
+    machine.succeed("grep 'OVERRIDDEN' /etc/nixos/configuration.nix")
+
+    # Test of if the Perl variable $bootLoaderConfig is spliced correctly:
+    machine.succeed(
+        "grep 'boot\\.loader\\.grub\\.enable = true;' /etc/nixos/configuration.nix"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/novacomd.nix b/nixpkgs/nixos/tests/novacomd.nix
new file mode 100644
index 000000000000..940210dee235
--- /dev/null
+++ b/nixpkgs/nixos/tests/novacomd.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "novacomd";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ dtzWill ];
+  };
+
+  machine = { ... }: {
+    services.novacomd.enable = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("novacomd.service")
+
+    with subtest("Make sure the daemon is really listening"):
+        machine.wait_for_open_port(6968)
+        machine.succeed("novacom -l")
+
+    with subtest("Stop the daemon, double-check novacom fails if daemon isn't working"):
+        machine.stop_job("novacomd")
+        machine.fail("novacom -l")
+
+    with subtest("Make sure the daemon starts back up again"):
+        machine.start_job("novacomd")
+        # make sure the daemon is really listening
+        machine.wait_for_open_port(6968)
+        machine.succeed("novacom -l")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nsd.nix b/nixpkgs/nixos/tests/nsd.nix
new file mode 100644
index 000000000000..bcc14e817a87
--- /dev/null
+++ b/nixpkgs/nixos/tests/nsd.nix
@@ -0,0 +1,99 @@
+let
+  common = { pkgs, ... }: {
+    networking.firewall.enable = false;
+    networking.useDHCP = false;
+    # for a host utility with IPv6 support
+    environment.systemPackages = [ pkgs.bind ];
+  };
+in import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "nsd";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ aszlig ];
+  };
+
+  nodes = {
+    clientv4 = { lib, nodes, ... }: {
+      imports = [ common ];
+      networking.nameservers = lib.mkForce [
+        (lib.head nodes.server.config.networking.interfaces.eth1.ipv4.addresses).address
+      ];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = "192.168.0.2"; prefixLength = 24; }
+      ];
+    };
+
+    clientv6 = { lib, nodes, ... }: {
+      imports = [ common ];
+      networking.nameservers = lib.mkForce [
+        (lib.head nodes.server.config.networking.interfaces.eth1.ipv6.addresses).address
+      ];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = "dead:beef::2"; prefixLength = 24; }
+      ];
+    };
+
+    server = { lib, ... }: {
+      imports = [ common ];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = "192.168.0.1"; prefixLength = 24; }
+      ];
+      networking.interfaces.eth1.ipv6.addresses = [
+        { address = "dead:beef::1"; prefixLength = 64; }
+      ];
+      services.nsd.enable = true;
+      services.nsd.rootServer = true;
+      services.nsd.interfaces = lib.mkForce [];
+      services.nsd.zones."example.com.".data = ''
+        @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600
+        ipv4 A 1.2.3.4
+        ipv6 AAAA abcd::eeff
+        deleg NS ns.example.com
+        ns A 192.168.0.1
+        ns AAAA dead:beef::1
+      '';
+      services.nsd.zones."deleg.example.com.".data = ''
+        @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600
+        @ A 9.8.7.6
+        @ AAAA fedc::bbaa
+      '';
+      services.nsd.zones.".".data = ''
+        @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600
+        root A 1.8.7.4
+        root AAAA acbd::4
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    clientv4.wait_for_unit("network.target")
+    clientv6.wait_for_unit("network.target")
+    server.wait_for_unit("nsd.service")
+
+
+    def assert_host(type, rr, query, expected):
+        self = clientv4 if type == 4 else clientv6
+        out = self.succeed(f"host -{type} -t {rr} {query}").rstrip()
+        self.log(f"output: {out}")
+        assert re.search(
+            expected, out
+        ), f"DNS IPv{type} query on {query} gave '{out}' instead of '{expected}'"
+
+
+    for ipv in 4, 6:
+        with subtest(f"IPv{ipv}"):
+            assert_host(ipv, "a", "example.com", "has no [^ ]+ record")
+            assert_host(ipv, "aaaa", "example.com", "has no [^ ]+ record")
+
+            assert_host(ipv, "soa", "example.com", "SOA.*?noc\.example\.com")
+            assert_host(ipv, "a", "ipv4.example.com", "address 1.2.3.4$")
+            assert_host(ipv, "aaaa", "ipv6.example.com", "address abcd::eeff$")
+
+            assert_host(ipv, "a", "deleg.example.com", "address 9.8.7.6$")
+            assert_host(ipv, "aaaa", "deleg.example.com", "address fedc::bbaa$")
+
+            assert_host(ipv, "a", "root", "address 1.8.7.4$")
+            assert_host(ipv, "aaaa", "root", "address acbd::4$")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nzbget.nix b/nixpkgs/nixos/tests/nzbget.nix
new file mode 100644
index 000000000000..12d8ed6ea8da
--- /dev/null
+++ b/nixpkgs/nixos/tests/nzbget.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "nzbget";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ aanderse flokli ];
+  };
+
+  nodes = {
+    server = { ... }: {
+      services.nzbget.enable = true;
+
+      # hack, don't add (unfree) unrar to nzbget's path,
+      # so we can run this test in CI
+      systemd.services.nzbget.path = pkgs.stdenv.lib.mkForce [ pkgs.p7zip ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("nzbget.service")
+    server.wait_for_unit("network.target")
+    server.wait_for_open_port(6789)
+    assert "This file is part of nzbget" in server.succeed(
+        "curl -s -u nzbget:tegbzn6789 http://127.0.0.1:6789"
+    )
+    server.succeed(
+        "${pkgs.nzbget}/bin/nzbget -n -o Control_iP=127.0.0.1 -o Control_port=6789 -o Control_password=tegbzn6789 -V"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/openarena.nix b/nixpkgs/nixos/tests/openarena.nix
new file mode 100644
index 000000000000..395ed9153ea1
--- /dev/null
+++ b/nixpkgs/nixos/tests/openarena.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  client =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      hardware.opengl.driSupport = true;
+      environment.systemPackages = [ pkgs.openarena ];
+    };
+
+in {
+  name = "openarena";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ fpletz ];
+  };
+
+  nodes =
+    { server =
+        { services.openarena = {
+            enable = true;
+            extraFlags = [ "+set g_gametype 0" "+map oa_dm7" "+addbot Angelyss" "+addbot Arachna" ];
+            openPorts = true;
+          };
+        };
+
+      client1 = client;
+      client2 = client;
+    };
+
+  testScript =
+    ''
+      start_all()
+
+      server.wait_for_unit("openarena")
+      server.wait_until_succeeds("ss --numeric --udp --listening | grep -q 27960")
+
+      client1.wait_for_x()
+      client2.wait_for_x()
+
+      client1.execute("openarena +set r_fullscreen 0 +set name Foo +connect server &")
+      client2.execute("openarena +set r_fullscreen 0 +set name Bar +connect server &")
+
+      server.wait_until_succeeds(
+          "journalctl -u openarena -e | grep -q 'Foo.*entered the game'"
+      )
+      server.wait_until_succeeds(
+          "journalctl -u openarena -e | grep -q 'Bar.*entered the game'"
+      )
+
+      server.sleep(10)  # wait for a while to get a nice screenshot
+
+      client1.screenshot("screen_client1_1")
+      client2.screenshot("screen_client2_1")
+
+      client1.block()
+
+      server.sleep(10)
+
+      client1.screenshot("screen_client1_2")
+      client2.screenshot("screen_client2_2")
+
+      client1.unblock()
+
+      server.sleep(10)
+
+      client1.screenshot("screen_client1_3")
+      client2.screenshot("screen_client2_3")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/openldap.nix b/nixpkgs/nixos/tests/openldap.nix
new file mode 100644
index 000000000000..f8321a2c522d
--- /dev/null
+++ b/nixpkgs/nixos/tests/openldap.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix {
+  name = "openldap";
+
+  machine = { pkgs, ... }: {
+    services.openldap = {
+      enable = true;
+      suffix = "dc=example";
+      rootdn = "cn=root,dc=example";
+      rootpw = "notapassword";
+      database = "bdb";
+      extraDatabaseConfig = ''
+        directory /var/db/openldap
+      '';
+      declarativeContents = ''
+        dn: dc=example
+        objectClass: domain
+        dc: example
+
+        dn: ou=users,dc=example
+        objectClass: organizationalUnit
+        ou: users
+      '';
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("openldap.service")
+    machine.succeed(
+        "systemctl status openldap.service",
+        'ldapsearch -LLL -D "cn=root,dc=example" -w notapassword -b "dc=example"',
+    )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/opensmtpd.nix b/nixpkgs/nixos/tests/opensmtpd.nix
new file mode 100644
index 000000000000..17c1a569ba0d
--- /dev/null
+++ b/nixpkgs/nixos/tests/opensmtpd.nix
@@ -0,0 +1,125 @@
+import ./make-test-python.nix {
+  name = "opensmtpd";
+
+  nodes = {
+    smtp1 = { pkgs, ... }: {
+      imports = [ common/user-account.nix ];
+      networking = {
+        firewall.allowedTCPPorts = [ 25 ];
+        useDHCP = false;
+        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.1"; prefixLength = 24; }
+        ];
+      };
+      environment.systemPackages = [ pkgs.opensmtpd ];
+      services.opensmtpd = {
+        enable = true;
+        extraServerArgs = [ "-v" ];
+        serverConfiguration = ''
+          listen on 0.0.0.0
+          action do_relay relay
+          # DO NOT DO THIS IN PRODUCTION!
+          # Setting up authentication requires a certificate which is painful in
+          # a test environment, but THIS WOULD BE DANGEROUS OUTSIDE OF A
+          # WELL-CONTROLLED ENVIRONMENT!
+          match from any for any action do_relay
+        '';
+      };
+    };
+
+    smtp2 = { pkgs, ... }: {
+      imports = [ common/user-account.nix ];
+      networking = {
+        firewall.allowedTCPPorts = [ 25 143 ];
+        useDHCP = false;
+        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.2"; prefixLength = 24; }
+        ];
+      };
+      environment.systemPackages = [ pkgs.opensmtpd ];
+      services.opensmtpd = {
+        enable = true;
+        extraServerArgs = [ "-v" ];
+        serverConfiguration = ''
+          listen on 0.0.0.0
+          action dovecot_deliver mda \
+            "${pkgs.dovecot}/libexec/dovecot/deliver -d %{user.username}"
+          match from any for local action dovecot_deliver
+        '';
+      };
+      services.dovecot2 = {
+        enable = true;
+        enableImap = true;
+        mailLocation = "maildir:~/mail";
+        protocols = [ "imap" ];
+      };
+    };
+
+    client = { pkgs, ... }: {
+      networking = {
+        useDHCP = false;
+        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.3"; prefixLength = 24; }
+        ];
+      };
+      environment.systemPackages = let
+        sendTestMail = pkgs.writeScriptBin "send-a-test-mail" ''
+          #!${pkgs.python3.interpreter}
+          import smtplib, sys
+
+          with smtplib.SMTP('192.168.1.1') as smtp:
+            smtp.sendmail('alice@[192.168.1.1]', 'bob@[192.168.1.2]', """
+              From: alice@smtp1
+              To: bob@smtp2
+              Subject: Test
+
+              Hello World
+            """)
+        '';
+
+        checkMailLanded = pkgs.writeScriptBin "check-mail-landed" ''
+          #!${pkgs.python3.interpreter}
+          import imaplib
+
+          with imaplib.IMAP4('192.168.1.2', 143) as imap:
+            imap.login('bob', 'foobar')
+            imap.select()
+            status, refs = imap.search(None, 'ALL')
+            assert status == 'OK'
+            assert len(refs) == 1
+            status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+            assert status == 'OK'
+            content = msg[0][1]
+            print("===> content:", content)
+            split = content.split(b'\r\n')
+            print("===> split:", split)
+            lastline = split[-3]
+            print("===> lastline:", lastline)
+            assert lastline.strip() == b'Hello World'
+        '';
+      in [ sendTestMail checkMailLanded ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    client.wait_for_unit("network-online.target")
+    smtp1.wait_for_unit("opensmtpd")
+    smtp2.wait_for_unit("opensmtpd")
+    smtp2.wait_for_unit("dovecot2")
+
+    # To prevent sporadic failures during daemon startup, make sure
+    # services are listening on their ports before sending requests
+    smtp1.wait_for_open_port(25)
+    smtp2.wait_for_open_port(25)
+    smtp2.wait_for_open_port(143)
+
+    client.succeed("send-a-test-mail")
+    smtp1.wait_until_fails("smtpctl show queue | egrep .")
+    smtp2.wait_until_fails("smtpctl show queue | egrep .")
+    client.succeed("check-mail-landed >&2")
+  '';
+
+  meta.timeout = 1800;
+}
diff --git a/nixpkgs/nixos/tests/openssh.nix b/nixpkgs/nixos/tests/openssh.nix
new file mode 100644
index 000000000000..e9692b503272
--- /dev/null
+++ b/nixpkgs/nixos/tests/openssh.nix
@@ -0,0 +1,112 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let inherit (import ./ssh-keys.nix pkgs)
+      snakeOilPrivateKey snakeOilPublicKey;
+in {
+  name = "openssh";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ aszlig eelco ];
+  };
+
+  nodes = {
+
+    server =
+      { ... }:
+
+      {
+        services.openssh.enable = true;
+        security.pam.services.sshd.limits =
+          [ { domain = "*"; item = "memlock"; type = "-"; value = 1024; } ];
+        users.users.root.openssh.authorizedKeys.keys = [
+          snakeOilPublicKey
+        ];
+      };
+
+    server_lazy =
+      { ... }:
+
+      {
+        services.openssh = { enable = true; startWhenNeeded = true; };
+        security.pam.services.sshd.limits =
+          [ { domain = "*"; item = "memlock"; type = "-"; value = 1024; } ];
+        users.users.root.openssh.authorizedKeys.keys = [
+          snakeOilPublicKey
+        ];
+      };
+
+    server_localhost_only =
+      { ... }:
+
+      {
+        services.openssh = {
+          enable = true; listenAddresses = [ { addr = "127.0.0.1"; port = 22; } ];
+        };
+      };
+
+    server_localhost_only_lazy =
+      { ... }:
+
+      {
+        services.openssh = {
+          enable = true; startWhenNeeded = true; listenAddresses = [ { addr = "127.0.0.1"; port = 22; } ];
+        };
+      };
+
+    client =
+      { ... }: { };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("sshd")
+
+    with subtest("manual-authkey"):
+        client.succeed("mkdir -m 700 /root/.ssh")
+        client.succeed(
+            '${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ""'
+        )
+        public_key = client.succeed(
+            "${pkgs.openssh}/bin/ssh-keygen -y -f /root/.ssh/id_ed25519"
+        )
+        public_key = public_key.strip()
+        client.succeed("chmod 600 /root/.ssh/id_ed25519")
+
+        server.succeed("mkdir -m 700 /root/.ssh")
+        server.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key))
+        server_lazy.succeed("mkdir -m 700 /root/.ssh")
+        server_lazy.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key))
+
+        client.wait_for_unit("network.target")
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'echo hello world' >&2"
+        )
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'ulimit -l' | grep 1024"
+        )
+
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server_lazy 'echo hello world' >&2"
+        )
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server_lazy 'ulimit -l' | grep 1024"
+        )
+
+    with subtest("configured-authkey"):
+        client.succeed(
+            "cat ${snakeOilPrivateKey} > privkey.snakeoil"
+        )
+        client.succeed("chmod 600 privkey.snakeoil")
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server true"
+        )
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server_lazy true"
+        )
+
+    with subtest("localhost-only"):
+        server_localhost_only.succeed("ss -nlt | grep '127.0.0.1:22'")
+        server_localhost_only_lazy.succeed("ss -nlt | grep '127.0.0.1:22'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/openstack-image.nix b/nixpkgs/nixos/tests/openstack-image.nix
new file mode 100644
index 000000000000..97c9137fe1d6
--- /dev/null
+++ b/nixpkgs/nixos/tests/openstack-image.nix
@@ -0,0 +1,94 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
+
+with import common/ec2.nix { inherit makeTest pkgs; };
+
+let
+  image =
+    (import ../lib/eval-config.nix {
+      inherit system;
+      modules = [
+        ../maintainers/scripts/openstack/openstack-image.nix
+        ../modules/testing/test-instrumentation.nix
+        ../modules/profiles/qemu-guest.nix
+        {
+          # Needed by nixos-rebuild due to lack of network access.
+          system.extraDependencies = with pkgs; [
+            stdenv
+          ];
+        }
+      ];
+    }).config.system.build.openstackImage + "/nixos.qcow2";
+
+  sshKeys = import ./ssh-keys.nix pkgs;
+  snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
+  snakeOilPublicKey = sshKeys.snakeOilPublicKey;
+
+in {
+  metadata = makeEc2Test {
+    name = "openstack-ec2-metadata";
+    inherit image;
+    sshPublicKey = snakeOilPublicKey;
+    userData = ''
+      SSH_HOST_ED25519_KEY_PUB:${snakeOilPublicKey}
+      SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey}
+    '';
+    script = ''
+      $machine->start;
+      $machine->waitForFile("/etc/ec2-metadata/user-data");
+      $machine->waitForUnit("sshd.service");
+
+      $machine->succeed("grep unknown /etc/ec2-metadata/ami-manifest-path");
+
+      # We have no keys configured on the client side yet, so this should fail
+      $machine->fail("ssh -o BatchMode=yes localhost exit");
+
+      # Let's install our client private key
+      $machine->succeed("mkdir -p ~/.ssh");
+
+      $machine->succeed("echo '${snakeOilPrivateKey}' > ~/.ssh/id_ed25519");
+      $machine->succeed("chmod 600 ~/.ssh/id_ed25519");
+
+      # We haven't configured the host key yet, so this should still fail
+      $machine->fail("ssh -o BatchMode=yes localhost exit");
+
+      # Add the host key; ssh should finally succeed
+      $machine->succeed("echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts");
+      $machine->succeed("ssh -o BatchMode=yes localhost exit");
+
+      # Just to make sure resizing is idempotent.
+      $machine->shutdown;
+      $machine->start;
+      $machine->waitForFile("/etc/ec2-metadata/user-data");
+    '';
+  };
+
+  userdata = makeEc2Test {
+    name = "openstack-ec2-metadata";
+    inherit image;
+    sshPublicKey = snakeOilPublicKey;
+    userData = ''
+      { pkgs, ... }:
+      {
+        imports = [
+          <nixpkgs/nixos/modules/virtualisation/openstack-config.nix>
+          <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          <nixpkgs/nixos/modules/profiles/qemu-guest.nix>
+        ];
+        environment.etc.testFile = {
+          text = "whoa";
+        };
+      }
+    '';
+    script = ''
+      $machine->start;
+      $machine->waitForFile("/etc/testFile");
+      $machine->succeed("cat /etc/testFile | grep -q 'whoa'");
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/orangefs.nix b/nixpkgs/nixos/tests/orangefs.nix
new file mode 100644
index 000000000000..24b7737058c8
--- /dev/null
+++ b/nixpkgs/nixos/tests/orangefs.nix
@@ -0,0 +1,82 @@
+import ./make-test-python.nix ({ ... } :
+
+let
+  server = { pkgs, ... } : {
+    networking.firewall.allowedTCPPorts = [ 3334 ];
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb
+    '';
+
+    virtualisation.emptyDiskImages = [ 4096 ];
+
+    fileSystems = pkgs.lib.mkVMOverride
+      { "/data" =
+          { device = "/dev/disk/by-label/data";
+            fsType = "ext4";
+          };
+      };
+
+    services.orangefs.server = {
+      enable = true;
+      dataStorageSpace = "/data/storage";
+      metadataStorageSpace = "/data/meta";
+      servers = {
+        server1 = "tcp://server1:3334";
+        server2 = "tcp://server2:3334";
+      };
+    };
+  };
+
+  client = { lib, ... } : {
+    networking.firewall.enable = true;
+
+    services.orangefs.client = {
+      enable = true;
+      fileSystems = [{
+        target = "tcp://server1:3334/orangefs";
+        mountPoint = "/orangefs";
+      }];
+    };
+  };
+
+in {
+  name = "orangefs";
+
+  nodes = {
+    server1 = server;
+    server2 = server;
+
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = ''
+    # format storage
+    for server in server1, server2:
+        server.start()
+        server.wait_for_unit("multi-user.target")
+        server.succeed("mkdir -p /data/storage /data/meta")
+        server.succeed("chown orangefs:orangefs /data/storage /data/meta")
+        server.succeed("chmod 0770 /data/storage /data/meta")
+        server.succeed(
+            "sudo -g orangefs -u orangefs pvfs2-server -f /etc/orangefs/server.conf"
+        )
+
+    # start services after storage is formated on all machines
+    for server in server1, server2:
+        server.succeed("systemctl start orangefs-server.service")
+
+    with subtest("clients can reach and mount the FS"):
+        for client in client1, client2:
+            client.start()
+            client.wait_for_unit("orangefs-client.service")
+            # Both servers need to be reachable
+            client.succeed("pvfs2-check-server -h server1 -f orangefs -n tcp -p 3334")
+            client.succeed("pvfs2-check-server -h server2 -f orangefs -n tcp -p 3334")
+            client.wait_for_unit("orangefs.mount")
+
+    with subtest("R/W test between clients"):
+        client1.succeed("echo test > /orangefs/file1")
+        client2.succeed("grep test /orangefs/file1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/os-prober.nix b/nixpkgs/nixos/tests/os-prober.nix
new file mode 100644
index 000000000000..6a38f5ca531c
--- /dev/null
+++ b/nixpkgs/nixos/tests/os-prober.nix
@@ -0,0 +1,120 @@
+import ./make-test.nix ({pkgs, lib, ...}:
+let
+  # A filesystem image with a (presumably) bootable debian
+  debianImage = pkgs.vmTools.diskImageFuns.debian9i386 {
+    # os-prober cannot detect systems installed on disks without a partition table
+    # so we create the disk ourselves
+    createRootFS = with pkgs; ''
+      ${parted}/bin/parted --script /dev/vda mklabel msdos
+      ${parted}/sbin/parted --script /dev/vda -- mkpart primary ext2 1M -1s
+      mkdir /mnt
+      ${e2fsprogs}/bin/mkfs.ext4 /dev/vda1
+      ${utillinux}/bin/mount -t ext4 /dev/vda1 /mnt
+
+      if test -e /mnt/.debug; then
+        exec ${bash}/bin/sh
+      fi
+      touch /mnt/.debug
+
+      mkdir /mnt/proc /mnt/dev /mnt/sys
+    '';
+    extraPackages = [
+      # /etc/os-release
+      "base-files"
+      # make the disk bootable-looking
+      "grub2" "linux-image-686"
+    ];
+    # install grub
+    postInstall = ''
+      ln -sf /proc/self/mounts > /etc/mtab
+      PATH=/usr/bin:/bin:/usr/sbin:/sbin $chroot /mnt \
+        grub-install /dev/vda --force
+      PATH=/usr/bin:/bin:/usr/sbin:/sbin $chroot /mnt \
+        update-grub
+    '';
+  };
+
+  # options to add the disk to the test vm
+  QEMU_OPTS = "-drive index=2,file=${debianImage}/disk-image.qcow2,read-only,if=virtio";
+
+  # a part of the configuration of the test vm
+  simpleConfig = {
+    boot.loader.grub = {
+      enable = true;
+      useOSProber = true;
+      device = "/dev/vda";
+      # vda is a filesystem without partition table
+      forceInstall = true;
+    };
+    nix.binaryCaches = lib.mkForce [ ];
+    nix.extraOptions = ''
+      hashed-mirrors =
+      connect-timeout = 1
+    '';
+    # save some memory
+    documentation.enable = false;
+  };
+  # /etc/nixos/configuration.nix for the vm
+  configFile = pkgs.writeText "configuration.nix"  ''
+    {config, pkgs, ...}: ({
+    imports =
+          [ ./hardware-configuration.nix
+            <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          ];
+    } // (builtins.fromJSON (builtins.readFile ${
+      pkgs.writeText "simpleConfig.json" (builtins.toJSON simpleConfig)
+    })))
+  '';
+in {
+  name = "os-prober";
+
+  machine = { config, pkgs, ... }: (simpleConfig // {
+      imports = [ ../modules/profiles/installation-device.nix
+                  ../modules/profiles/base.nix ];
+      virtualisation.memorySize = 1024;
+      # The test cannot access the network, so any packages
+      # nixos-rebuild needs must be included in the VM.
+      system.extraDependencies = with pkgs;
+        [ sudo
+          libxml2.bin
+          libxslt.bin
+          desktop-file-utils
+          docbook5
+          docbook_xsl_ns
+          unionfs-fuse
+          ntp
+          nixos-artwork.wallpapers.simple-dark-gray-bottom
+          perlPackages.XMLLibXML
+          perlPackages.ListCompare
+          shared-mime-info
+          texinfo
+          xorg.lndir
+          grub2
+
+          # add curl so that rather than seeing the test attempt to download
+          # curl's tarball, we see what it's trying to download
+          curl
+        ];
+  });
+
+  testScript = ''
+    # hack to add the secondary disk
+    $machine->{startCommand} = "QEMU_OPTS=\"\$QEMU_OPTS \"${lib.escapeShellArg QEMU_OPTS} ".$machine->{startCommand};
+
+    $machine->start;
+    $machine->succeed("udevadm settle");
+    $machine->waitForUnit("multi-user.target");
+
+    # check that os-prober works standalone
+    $machine->succeed("${pkgs.os-prober}/bin/os-prober | grep /dev/vdb1");
+
+    # rebuild and test that debian is available in the grub menu
+    $machine->succeed("nixos-generate-config");
+    $machine->copyFileFromHost(
+        "${configFile}",
+        "/etc/nixos/configuration.nix");
+    $machine->succeed("nixos-rebuild boot >&2");
+
+    $machine->succeed("egrep 'menuentry.*debian' /boot/grub/grub.cfg");
+  '';
+})
diff --git a/nixpkgs/nixos/tests/osrm-backend.nix b/nixpkgs/nixos/tests/osrm-backend.nix
new file mode 100644
index 000000000000..db67a5a589f9
--- /dev/null
+++ b/nixpkgs/nixos/tests/osrm-backend.nix
@@ -0,0 +1,57 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  port = 5000;
+in {
+  name = "osrm-backend";
+  meta.maintainers = [ lib.maintainers.erictapen ];
+
+  machine = { config, pkgs, ... }:{
+
+    services.osrm = {
+      enable = true;
+      inherit port;
+      dataFile = let
+        filename = "monaco";
+        osrm-data = pkgs.stdenv.mkDerivation {
+          name = "osrm-data";
+
+          buildInputs = [ pkgs.osrm-backend ];
+
+          # This is a pbf file of monaco, downloaded at 2019-01-04 from
+          # http://download.geofabrik.de/europe/monaco-latest.osm.pbf
+          # as apparently no provider of OSM files guarantees immutability,
+          # this is hosted as a gist on GitHub.
+          src = pkgs.fetchgit {
+            url = "https://gist.github.com/erictapen/01e39f73a6c856eac53ba809a94cdb83";
+            rev = "9b1ff0f24deb40e5cf7df51f843dbe860637b8ce";
+            sha256 = "1scqhmrfnpwsy5i2a9jpggqnvfgj4hv9p4qyvc79321pzkbv59nx";
+          };
+
+          buildCommand = ''
+            cp $src/${filename}.osm.pbf .
+            ${pkgs.osrm-backend}/bin/osrm-extract -p ${pkgs.osrm-backend}/share/osrm/profiles/car.lua ${filename}.osm.pbf
+            ${pkgs.osrm-backend}/bin/osrm-partition ${filename}.osrm
+            ${pkgs.osrm-backend}/bin/osrm-customize ${filename}.osrm
+            mkdir -p $out
+            cp ${filename}* $out/
+          '';
+        };
+      in "${osrm-data}/${filename}.osrm";
+    };
+
+    environment.systemPackages = [ pkgs.jq ];
+  };
+
+  testScript = let
+    query = "http://localhost:${toString port}/route/v1/driving/7.41720,43.73304;7.42463,43.73886?steps=true";
+  in ''
+    machine.wait_for_unit("osrm.service")
+    machine.wait_for_open_port(${toString port})
+    assert "Boulevard Rainier III" in machine.succeed(
+        "curl --silent '${query}' | jq .waypoints[0].name"
+    )
+    assert "Avenue de la Costa" in machine.succeed(
+        "curl --silent '${query}' | jq .waypoints[1].name"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/overlayfs.nix b/nixpkgs/nixos/tests/overlayfs.nix
new file mode 100644
index 000000000000..33794deb9ed8
--- /dev/null
+++ b/nixpkgs/nixos/tests/overlayfs.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "overlayfs";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ bachp ];
+
+  machine = { pkgs, ... }: {
+    virtualisation.emptyDiskImages = [ 512 ];
+    networking.hostId = "deadbeef";
+    environment.systemPackages = with pkgs; [ parted ];
+  };
+
+  testScript = ''
+    machine.succeed("ls /dev")
+
+    machine.succeed("mkdir -p /tmp/mnt")
+
+    # Test ext4 + overlayfs
+    machine.succeed(
+        """
+          mkfs.ext4 -F -L overlay-ext4 /dev/vdb
+          mount -t ext4 /dev/vdb /tmp/mnt
+          mkdir -p /tmp/mnt/upper /tmp/mnt/lower /tmp/mnt/work /tmp/mnt/merged
+          # Setup some existing files
+          echo 'Replace' > /tmp/mnt/lower/replace.txt
+          echo 'Append' > /tmp/mnt/lower/append.txt
+          echo 'Overwrite' > /tmp/mnt/lower/overwrite.txt
+          mount -t overlay overlay -o lowerdir=/tmp/mnt/lower,upperdir=/tmp/mnt/upper,workdir=/tmp/mnt/work /tmp/mnt/merged
+          # Test new
+          echo 'New' > /tmp/mnt/merged/new.txt
+          [[ "\$(cat /tmp/mnt/merged/new.txt)" == "New" ]]
+          # Test replace
+          [[ "\$(cat /tmp/mnt/merged/replace.txt)" == "Replace" ]]
+          echo 'Replaced' > /tmp/mnt/merged/replace-tmp.txt
+          mv /tmp/mnt/merged/replace-tmp.txt /tmp/mnt/merged/replace.txt
+          [[ "\$(cat /tmp/mnt/merged/replace.txt)" == "Replaced" ]]
+          # Overwrite
+          [[ "\$(cat /tmp/mnt/merged/overwrite.txt)" == "Overwrite" ]]
+          echo 'Overwritten' > /tmp/mnt/merged/overwrite.txt
+          [[ "\$(cat /tmp/mnt/merged/overwrite.txt)" == "Overwritten" ]]
+          # Test append
+          [[ "\$(cat /tmp/mnt/merged/append.txt)" == "Append" ]]
+          echo 'ed' >> /tmp/mnt/merged/append.txt
+          #"cat /tmp/mnt/merged/append.txt && exit 1
+          [[ "\$(cat /tmp/mnt/merged/append.txt)" == "Append\ned" ]]
+          umount /tmp/mnt/merged
+          umount /tmp/mnt
+          udevadm settle
+      """
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/packagekit.nix b/nixpkgs/nixos/tests/packagekit.nix
new file mode 100644
index 000000000000..7e93ad35e80a
--- /dev/null
+++ b/nixpkgs/nixos/tests/packagekit.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "packagekit";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ peterhoeg ];
+  };
+
+  machine = { ... }: {
+    environment.systemPackages = with pkgs; [ dbus ];
+    services.packagekit = {
+      enable = true;
+      backend = "test_nop";
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    # send a dbus message to activate the service
+    machine.succeed(
+        "dbus-send --system --type=method_call --print-reply --dest=org.freedesktop.PackageKit /org/freedesktop/PackageKit org.freedesktop.DBus.Introspectable.Introspect"
+    )
+
+    # so now it should be running
+    machine.wait_for_unit("packagekit.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pam-oath-login.nix b/nixpkgs/nixos/tests/pam-oath-login.nix
new file mode 100644
index 000000000000..6d48199eda97
--- /dev/null
+++ b/nixpkgs/nixos/tests/pam-oath-login.nix
@@ -0,0 +1,108 @@
+import ./make-test-python.nix ({ ... }:
+
+let
+  oathSnakeoilSecret = "cdd4083ef8ff1fa9178c6d46bfb1a3";
+
+  # With HOTP mode the password is calculated based on a counter of
+  # how many passwords have been made. In this env, we'll always be on
+  # the 0th counter, so the password is static.
+  #
+  # Generated in nix-shell -p oathToolkit
+  # via: oathtool -v -d6 -w10 cdd4083ef8ff1fa9178c6d46bfb1a3
+  # and picking a the first 4:
+  oathSnakeOilPassword1 = "143349";
+  oathSnakeOilPassword2 = "801753";
+
+  alicePassword = "foobar";
+  # Generated via: mkpasswd -m sha-512 and passing in "foobar"
+  hashedAlicePassword = "$6$MsMrE1q.1HrCgTS$Vq2e/uILzYjSN836TobAyN9xh9oi7EmCmucnZID25qgPoibkw8qTCugiAPnn4eCGvn1A.7oEBFJaaGUaJsQQY.";
+
+in
+{
+  name = "pam-oath-login";
+
+  machine =
+    { ... }:
+    {
+      security.pam.oath = {
+        enable = true;
+      };
+
+      users.users.alice = {
+        isNormalUser = true;
+        name = "alice";
+        uid = 1000;
+        hashedPassword = hashedAlicePassword;
+        extraGroups = [ "wheel" ];
+        createHome = true;
+        home = "/home/alice";
+      };
+
+
+      systemd.services.setupOathSnakeoilFile = {
+        wantedBy = [ "default.target" ];
+        before = [ "default.target" ];
+        unitConfig = {
+          type = "oneshot";
+          RemainAfterExit = true;
+        };
+        script = ''
+          touch /etc/users.oath
+          chmod 600 /etc/users.oath
+          chown root /etc/users.oath
+          echo "HOTP/E/6 alice - ${oathSnakeoilSecret}" > /etc/users.oath
+        '';
+      };
+    };
+
+  testScript = ''
+    def switch_to_tty(tty_number):
+        machine.fail(f"pgrep -f 'agetty.*tty{tty_number}'")
+        machine.send_key(f"alt-f{tty_number}")
+        machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]")
+        machine.wait_for_unit(f"getty@tty{tty_number}.service")
+        machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'")
+
+
+    def enter_user_alice(tty_number):
+        machine.wait_until_tty_matches(tty_number, "login: ")
+        machine.send_chars("alice\n")
+        machine.wait_until_tty_matches(tty_number, "login: alice")
+        machine.wait_until_succeeds("pgrep login")
+        machine.wait_until_tty_matches(tty_number, "One-time password")
+
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+    machine.screenshot("postboot")
+
+    with subtest("Invalid password"):
+        switch_to_tty(2)
+        enter_user_alice(2)
+
+        machine.send_chars("${oathSnakeOilPassword1}\n")
+        machine.wait_until_tty_matches(2, "Password: ")
+        machine.send_chars("blorg\n")
+        machine.wait_until_tty_matches(2, "Login incorrect")
+
+    with subtest("Invalid oath token"):
+        switch_to_tty(3)
+        enter_user_alice(3)
+
+        machine.send_chars("000000\n")
+        machine.wait_until_tty_matches(3, "Login incorrect")
+        machine.wait_until_tty_matches(3, "login:")
+
+    with subtest("Happy path: Both passwords are mandatory to get us in"):
+        switch_to_tty(4)
+        enter_user_alice(4)
+
+        machine.send_chars("${oathSnakeOilPassword2}\n")
+        machine.wait_until_tty_matches(4, "Password: ")
+        machine.send_chars("${alicePassword}\n")
+
+        machine.wait_until_succeeds("pgrep -u alice bash")
+        machine.send_chars("touch  done4\n")
+        machine.wait_for_file("/home/alice/done4")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/pam-u2f.nix b/nixpkgs/nixos/tests/pam-u2f.nix
new file mode 100644
index 000000000000..f492baa9e139
--- /dev/null
+++ b/nixpkgs/nixos/tests/pam-u2f.nix
@@ -0,0 +1,25 @@
+import ./make-test-python.nix ({ ... }:
+
+{
+  name = "pam-u2f";
+
+  machine =
+    { ... }:
+    {
+      security.pam.u2f = {
+        control = "required";
+        cue = true;
+        debug = true;
+        enable = true;
+        interactive = true;
+      };
+    };
+
+  testScript =
+    ''
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed(
+          'egrep "auth required .*/lib/security/pam_u2f.so.*debug.*interactive.*cue" /etc/pam.d/ -R'
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/pantheon.nix b/nixpkgs/nixos/tests/pantheon.nix
new file mode 100644
index 000000000000..c0434f20754c
--- /dev/null
+++ b/nixpkgs/nixos/tests/pantheon.nix
@@ -0,0 +1,59 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "pantheon";
+
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = pkgs.pantheon.maintainers;
+  };
+
+  machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+
+    services.xserver.enable = true;
+    services.xserver.desktopManager.pantheon.enable = true;
+
+    virtualisation.memorySize = 1024;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+    bob = nodes.machine.config.users.users.bob;
+  in ''
+    machine.wait_for_unit("display-manager.service")
+
+    with subtest("Test we can see usernames in elementary-greeter"):
+        machine.wait_for_text("${user.description}")
+        # OCR was struggling with this one.
+        # machine.wait_for_text("${bob.description}")
+        machine.screenshot("elementary_greeter_lightdm")
+
+    with subtest("Login with elementary-greeter"):
+        machine.send_chars("${user.password}\n")
+        machine.wait_for_x()
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+    with subtest("Check that logging in has given the user ownership of devices"):
+        machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+    # TODO: DBus API could eliminate this? Pantheon uses Bamf.
+    with subtest("Check if pantheon session components actually start"):
+        machine.wait_until_succeeds("pgrep gala")
+        machine.wait_for_window("gala")
+        machine.wait_until_succeeds("pgrep wingpanel")
+        machine.wait_for_window("wingpanel")
+        machine.wait_until_succeeds("pgrep plank")
+        machine.wait_for_window("plank")
+
+    with subtest("Open elementary terminal"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.terminal &'")
+        machine.wait_for_window("io.elementary.terminal")
+        machine.sleep(20)
+        machine.screenshot("screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/paperless.nix b/nixpkgs/nixos/tests/paperless.nix
new file mode 100644
index 000000000000..355e7041d3fe
--- /dev/null
+++ b/nixpkgs/nixos/tests/paperless.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ lib, ... } : {
+  name = "paperless";
+  meta = with lib.maintainers; {
+    maintainers = [ earvstedt ];
+  };
+
+  machine = { pkgs, ... }: {
+    environment.systemPackages = with pkgs; [ imagemagick jq ];
+    services.paperless = {
+      enable = true;
+      ocrLanguages = [ "eng" ];
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("paperless-consumer.service")
+
+    # Create test doc
+    machine.succeed(
+        "convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black -annotate +5+20 'hello world 16-10-2005' /var/lib/paperless/consume/doc.png"
+    )
+
+    with subtest("Service gets ready"):
+        machine.wait_for_unit("paperless-server.service")
+        # Wait until server accepts connections
+        machine.wait_until_succeeds("curl -s localhost:28981")
+
+    with subtest("Test document is consumed"):
+        machine.wait_until_succeeds(
+            "(($(curl -s localhost:28981/api/documents/ | jq .count) == 1))"
+        )
+        assert "2005-10-16" in machine.succeed(
+            "curl -s localhost:28981/api/documents/ | jq '.results | .[0] | .created'"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/partition.nix b/nixpkgs/nixos/tests/partition.nix
new file mode 100644
index 000000000000..01a08995950f
--- /dev/null
+++ b/nixpkgs/nixos/tests/partition.nix
@@ -0,0 +1,247 @@
+import ./make-test.nix ({ pkgs, ... }:
+
+with pkgs.lib;
+
+let
+  ksExt = pkgs.writeText "ks-ext4" ''
+    clearpart --all --initlabel --drives=vdb
+
+    part /boot --recommended --label=boot --fstype=ext2 --ondisk=vdb
+    part swap --recommended --label=swap --fstype=swap --ondisk=vdb
+    part /nix --size=500 --label=nix --fstype=ext3 --ondisk=vdb
+    part / --recommended --label=root --fstype=ext4 --ondisk=vdb
+  '';
+
+  ksBtrfs = pkgs.writeText "ks-btrfs" ''
+    clearpart --all --initlabel --drives=vdb,vdc
+
+    part swap1 --recommended --label=swap1 --fstype=swap --ondisk=vdb
+    part swap2 --recommended --label=swap2 --fstype=swap --ondisk=vdc
+
+    part btrfs.1 --grow --ondisk=vdb
+    part btrfs.2 --grow --ondisk=vdc
+
+    btrfs / --data=0 --metadata=1 --label=root btrfs.1 btrfs.2
+  '';
+
+  ksF2fs = pkgs.writeText "ks-f2fs" ''
+    clearpart --all --initlabel --drives=vdb
+
+    part swap  --recommended --label=swap --fstype=swap --ondisk=vdb
+    part /boot --recommended --label=boot --fstype=f2fs --ondisk=vdb
+    part /     --recommended --label=root --fstype=f2fs --ondisk=vdb
+  '';
+
+  ksRaid = pkgs.writeText "ks-raid" ''
+    clearpart --all --initlabel --drives=vdb,vdc
+
+    part raid.01 --size=200 --ondisk=vdb
+    part raid.02 --size=200 --ondisk=vdc
+
+    part swap1 --size=500 --label=swap1 --fstype=swap --ondisk=vdb
+    part swap2 --size=500 --label=swap2 --fstype=swap --ondisk=vdc
+
+    part raid.11 --grow --ondisk=vdb
+    part raid.12 --grow --ondisk=vdc
+
+    raid /boot --level=1 --fstype=ext3 --device=md0 raid.01 raid.02
+    raid / --level=1 --fstype=xfs --device=md1 raid.11 raid.12
+  '';
+
+  ksRaidLvmCrypt = pkgs.writeText "ks-lvm-crypt" ''
+    clearpart --all --initlabel --drives=vdb,vdc
+
+    part raid.1 --grow --ondisk=vdb
+    part raid.2 --grow --ondisk=vdc
+
+    raid pv.0 --level=1 --encrypted --passphrase=x --device=md0 raid.1 raid.2
+
+    volgroup nixos pv.0
+
+    logvol /boot --size=200 --fstype=ext3 --name=boot --vgname=nixos
+    logvol swap --size=500 --fstype=swap --name=swap --vgname=nixos
+    logvol / --size=1000 --grow --fstype=ext4 --name=root --vgname=nixos
+  '';
+in {
+  name = "partitiion";
+
+  machine = { pkgs, ... }: {
+    environment.systemPackages = [
+      pkgs.pythonPackages.nixpart0
+      pkgs.file pkgs.btrfs-progs pkgs.xfsprogs pkgs.lvm2
+    ];
+    virtualisation.emptyDiskImages = [ 4096 4096 ];
+  };
+
+  testScript = ''
+    my $diskStart;
+    my @mtab;
+
+    sub getMtab {
+      my $mounts = $machine->succeed("cat /proc/mounts");
+      chomp $mounts;
+      return map [split], split /\n/, $mounts;
+    }
+
+    sub parttest {
+      my ($desc, $code) = @_;
+      $machine->start;
+      $machine->waitForUnit("default.target");
+
+      # Gather mounts and superblock
+      @mtab = getMtab;
+      $diskStart = $machine->succeed("dd if=/dev/vda bs=512 count=1");
+
+      subtest($desc, $code);
+      $machine->shutdown;
+    }
+
+    sub ensureSanity {
+      # Check whether the filesystem in /dev/vda is still intact
+      my $newDiskStart = $machine->succeed("dd if=/dev/vda bs=512 count=1");
+      if ($diskStart ne $newDiskStart) {
+        $machine->log("Something went wrong, the partitioner wrote " .
+                      "something into the first 512 bytes of /dev/vda!");
+        die;
+      }
+
+      # Check whether nixpart has unmounted anything
+      my @currentMtab = getMtab;
+      for my $mount (@mtab) {
+        my $path = $mount->[1];
+        unless (grep { $_->[1] eq $path } @currentMtab) {
+          $machine->log("The partitioner seems to have unmounted $path.");
+          die;
+        }
+      }
+    }
+
+    sub checkMount {
+      my $mounts = $machine->succeed("cat /proc/mounts");
+
+    }
+
+    sub kickstart {
+      $machine->copyFileFromHost($_[0], "/kickstart");
+      $machine->succeed("nixpart -v /kickstart");
+      ensureSanity;
+    }
+
+    sub ensurePartition {
+      my ($name, $match) = @_;
+      my $path = $name =~ /^\// ? $name : "/dev/disk/by-label/$name";
+      my $out = $machine->succeed("file -Ls $path");
+      my @matches = grep(/^$path: .*$match/i, $out);
+      if (!@matches) {
+        $machine->log("Partition on $path was expected to have a " .
+                      "file system that matches $match, but instead has: $out");
+        die;
+      }
+    }
+
+    sub ensureNoPartition {
+      $machine->succeed("test ! -e /dev/$_[0]");
+    }
+
+    sub ensureMountPoint {
+      $machine->succeed("mountpoint $_[0]");
+    }
+
+    sub remountAndCheck {
+      $machine->nest("Remounting partitions:", sub {
+        # XXX: "findmnt -ARunl -oTARGET /mnt" seems to NOT print all mounts!
+        my $getmounts_cmd = "cat /proc/mounts | cut -d' ' -f2 | grep '^/mnt'";
+        # Insert canaries first
+        my $canaries = $machine->succeed($getmounts_cmd . " | while read p;" .
+                                         " do touch \"\$p/canary\";" .
+                                         " echo \"\$p/canary\"; done");
+        # Now unmount manually
+        $machine->succeed($getmounts_cmd . " | tac | xargs -r umount");
+        # /mnt should be empty or non-existing
+        my $found = $machine->succeed("find /mnt -mindepth 1");
+        chomp $found;
+        if ($found) {
+          $machine->log("Cruft found in /mnt:\n$found");
+          die;
+        }
+        # Try to remount with nixpart
+        $machine->succeed("nixpart -vm /kickstart");
+        ensureMountPoint("/mnt");
+        # Check if our beloved canaries are dead
+        chomp $canaries;
+        $machine->nest("Checking canaries:", sub {
+          for my $canary (split /\n/, $canaries) {
+            $machine->succeed("test -e '$canary'");
+          }
+        });
+      });
+    }
+
+    parttest "ext2, ext3 and ext4 filesystems", sub {
+      kickstart("${ksExt}");
+      ensurePartition("boot", "ext2");
+      ensurePartition("swap", "swap");
+      ensurePartition("nix", "ext3");
+      ensurePartition("root", "ext4");
+      ensurePartition("/dev/vdb4", "boot sector");
+      ensureNoPartition("vdb6");
+      ensureNoPartition("vdc1");
+      remountAndCheck;
+      ensureMountPoint("/mnt/boot");
+      ensureMountPoint("/mnt/nix");
+    };
+
+    parttest "btrfs filesystem", sub {
+      $machine->succeed("modprobe btrfs");
+      kickstart("${ksBtrfs}");
+      ensurePartition("swap1", "swap");
+      ensurePartition("swap2", "swap");
+      ensurePartition("/dev/vdb2", "btrfs");
+      ensurePartition("/dev/vdc2", "btrfs");
+      ensureNoPartition("vdb3");
+      ensureNoPartition("vdc3");
+      remountAndCheck;
+    };
+
+    parttest "f2fs filesystem", sub {
+      $machine->succeed("modprobe f2fs");
+      kickstart("${ksF2fs}");
+      ensurePartition("swap", "swap");
+      ensurePartition("boot", "f2fs");
+      ensurePartition("root", "f2fs");
+      remountAndCheck;
+      ensureMountPoint("/mnt/boot", "f2fs");
+    };
+
+    parttest "RAID1 with XFS", sub {
+      kickstart("${ksRaid}");
+      ensurePartition("swap1", "swap");
+      ensurePartition("swap2", "swap");
+      ensurePartition("/dev/md0", "ext3");
+      ensurePartition("/dev/md1", "xfs");
+      ensureNoPartition("vdb4");
+      ensureNoPartition("vdc4");
+      ensureNoPartition("md2");
+      remountAndCheck;
+      ensureMountPoint("/mnt/boot");
+    };
+
+    parttest "RAID1 with LUKS and LVM", sub {
+      kickstart("${ksRaidLvmCrypt}");
+      ensurePartition("/dev/vdb1", "data");
+      ensureNoPartition("vdb2");
+      ensurePartition("/dev/vdc1", "data");
+      ensureNoPartition("vdc2");
+
+      ensurePartition("/dev/md0", "luks");
+      ensureNoPartition("md1");
+
+      ensurePartition("/dev/nixos/boot", "ext3");
+      ensurePartition("/dev/nixos/swap", "swap");
+      ensurePartition("/dev/nixos/root", "ext4");
+
+      remountAndCheck;
+      ensureMountPoint("/mnt/boot");
+    };
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pdns-recursor.nix b/nixpkgs/nixos/tests/pdns-recursor.nix
new file mode 100644
index 000000000000..de1b60e0b1c7
--- /dev/null
+++ b/nixpkgs/nixos/tests/pdns-recursor.nix
@@ -0,0 +1,12 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "powerdns";
+
+  nodes.server = { ... }: {
+    services.pdns-recursor.enable = true;
+  };
+
+  testScript = ''
+    server.wait_for_unit("pdns-recursor")
+    server.wait_for_open_port("53")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/peerflix.nix b/nixpkgs/nixos/tests/peerflix.nix
new file mode 100644
index 000000000000..37628604d49b
--- /dev/null
+++ b/nixpkgs/nixos/tests/peerflix.nix
@@ -0,0 +1,23 @@
+# This test runs peerflix and checks if peerflix starts
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "peerflix";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = {
+    peerflix =
+      { ... }:
+        {
+          services.peerflix.enable = true;
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    peerflix.wait_for_unit("peerflix.service")
+    peerflix.wait_until_succeeds("curl localhost:9000")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pgjwt.nix b/nixpkgs/nixos/tests/pgjwt.nix
new file mode 100644
index 000000000000..4793a3e31503
--- /dev/null
+++ b/nixpkgs/nixos/tests/pgjwt.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, lib, ...}:
+
+with pkgs; {
+  name = "pgjwt";
+  meta = with lib.maintainers; {
+    maintainers = [ spinus willibutz ];
+  };
+
+  nodes = {
+    master = { ... }:
+    {
+      services.postgresql = {
+        enable = true;
+        extraPlugins = [ pgjwt pgtap ];
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+  let
+    sqlSU = "${nodes.master.config.services.postgresql.superUser}";
+    pgProve = "${pkgs.perlPackages.TAPParserSourceHandlerpgTAP}";
+  in
+  ''
+    start_all()
+    master.wait_for_unit("postgresql")
+    master.succeed(
+        "${pkgs.gnused}/bin/sed -e '12 i CREATE EXTENSION pgcrypto;\\nCREATE EXTENSION pgtap;\\nSET search_path TO tap,public;' ${pgjwt.src}/test.sql > /tmp/test.sql"
+    )
+    master.succeed(
+        "${pkgs.sudo}/bin/sudo -u ${sqlSU} PGOPTIONS=--search_path=tap,public ${pgProve}/bin/pg_prove -d postgres -v -f /tmp/test.sql"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pgmanage.nix b/nixpkgs/nixos/tests/pgmanage.nix
new file mode 100644
index 000000000000..4f5dbed24a97
--- /dev/null
+++ b/nixpkgs/nixos/tests/pgmanage.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix ({ pkgs, ... } :
+let
+  role     = "test";
+  password = "secret";
+  conn     = "local";
+in
+{
+  name = "pgmanage";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ basvandijk ];
+  };
+  nodes = {
+    one = { config, pkgs, ... }: {
+      services = {
+        postgresql = {
+          enable = true;
+          initialScript = pkgs.writeText "pg-init-script" ''
+            CREATE ROLE ${role} SUPERUSER LOGIN PASSWORD '${password}';
+          '';
+        };
+        pgmanage = {
+          enable = true;
+          connections = {
+            ${conn} = "hostaddr=127.0.0.1 port=${toString config.services.postgresql.port} dbname=postgres";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    one.wait_for_unit("default.target")
+    one.require_unit_state("pgmanage.service", "active")
+
+    # Test if we can log in.
+    one.wait_until_succeeds(
+        "curl 'http://localhost:8080/pgmanage/auth' --data 'action=login&connname=${conn}&username=${role}&password=${password}' --fail"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/php/default.nix b/nixpkgs/nixos/tests/php/default.nix
new file mode 100644
index 000000000000..9ab14f722d08
--- /dev/null
+++ b/nixpkgs/nixos/tests/php/default.nix
@@ -0,0 +1,7 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; }
+}: {
+  fpm = import ./fpm.nix { inherit system pkgs; };
+  pcre = import ./pcre.nix { inherit system pkgs; };
+}
diff --git a/nixpkgs/nixos/tests/php/fpm.nix b/nixpkgs/nixos/tests/php/fpm.nix
new file mode 100644
index 000000000000..e93a31834185
--- /dev/null
+++ b/nixpkgs/nixos/tests/php/fpm.nix
@@ -0,0 +1,55 @@
+import ../make-test-python.nix ({pkgs, ...}: {
+  name = "php-fpm-nginx-test";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ etu ];
+
+  machine = { config, lib, pkgs, ... }: {
+    services.nginx = {
+      enable = true;
+
+      virtualHosts."phpfpm" = let
+        testdir = pkgs.writeTextDir "web/index.php" "<?php phpinfo();";
+      in {
+        root = "${testdir}/web";
+        locations."~ \.php$".extraConfig = ''
+          fastcgi_pass unix:${config.services.phpfpm.pools.foobar.socket};
+          fastcgi_index index.php;
+          include ${pkgs.nginx}/conf/fastcgi_params;
+          include ${pkgs.nginx}/conf/fastcgi.conf;
+        '';
+        locations."/" = {
+          tryFiles = "$uri $uri/ index.php";
+          index = "index.php index.html index.htm";
+        };
+      };
+    };
+
+    services.phpfpm.pools."foobar" = {
+      user = "nginx";
+      settings = {
+        "listen.group" = "nginx";
+        "listen.mode" = "0600";
+        "listen.owner" = "nginx";
+        "pm" = "dynamic";
+        "pm.max_children" = 5;
+        "pm.max_requests" = 500;
+        "pm.max_spare_servers" = 3;
+        "pm.min_spare_servers" = 1;
+        "pm.start_servers" = 2;
+      };
+    };
+  };
+  testScript = { ... }: ''
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_unit("phpfpm-foobar.service")
+
+    # Check so we get an evaluated PHP back
+    assert "PHP Version ${pkgs.php.version}" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+
+    # Check so we have database and some other extensions loaded
+    assert "json" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    assert "opcache" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    assert "pdo_mysql" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    assert "pdo_pgsql" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    assert "pdo_sqlite" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/php/pcre.nix b/nixpkgs/nixos/tests/php/pcre.nix
new file mode 100644
index 000000000000..56a87778579f
--- /dev/null
+++ b/nixpkgs/nixos/tests/php/pcre.nix
@@ -0,0 +1,37 @@
+let
+  testString = "can-use-subgroups";
+in import ../make-test-python.nix ({ ...}: {
+  name = "php-httpd-pcre-jit-test";
+  machine = { lib, pkgs, ... }: {
+    time.timeZone = "UTC";
+    services.httpd = {
+      enable = true;
+      adminAddr = "please@dont.contact";
+      enablePHP = true;
+      phpOptions = "pcre.jit = true";
+      extraConfig = let
+        testRoot = pkgs.writeText "index.php"
+          ''
+            <?php
+            preg_match('/(${testString})/', '${testString}', $result);
+            var_dump($result);
+          '';
+      in
+        ''
+          Alias / ${testRoot}/
+
+          <Directory ${testRoot}>
+            Require all granted
+          </Directory>
+        '';
+    };
+  };
+  testScript = { ... }:
+    ''
+      machine.wait_for_unit("httpd.service")
+      # Ensure php evaluation by matching on the var_dump syntax
+      assert 'string(${toString (builtins.stringLength testString)}) "${testString}"' in machine.succeed(
+          "curl -vvv -s http://127.0.0.1:80/index.php"
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/plasma5.nix b/nixpkgs/nixos/tests/plasma5.nix
new file mode 100644
index 000000000000..2eccfdf47f59
--- /dev/null
+++ b/nixpkgs/nixos/tests/plasma5.nix
@@ -0,0 +1,59 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "plasma5";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ttuegel ];
+  };
+
+  machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.displayManager.sddm.enable = true;
+    services.xserver.displayManager.defaultSession = "plasma5";
+    services.xserver.desktopManager.plasma5.enable = true;
+    services.xserver.displayManager.sddm.autoLogin = {
+      enable = true;
+      user = "alice";
+    };
+    hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+    virtualisation.memorySize = 1024;
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+    xdo = "${pkgs.xdotool}/bin/xdotool";
+  in ''
+    with subtest("Wait for login"):
+        start_all()
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+    with subtest("Check plasmashell started"):
+        machine.wait_until_succeeds("pgrep plasmashell")
+        machine.wait_for_window("^Desktop ")
+
+    with subtest("Check that logging in has given the user ownership of devices"):
+        machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+    with subtest("Run Dolphin"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 dolphin &'")
+        machine.wait_for_window(" Dolphin")
+
+    with subtest("Run Konsole"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 konsole &'")
+        machine.wait_for_window("Konsole")
+
+    with subtest("Run systemsettings"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 systemsettings5 &'")
+        machine.wait_for_window("Settings")
+
+    with subtest("Wait to get a screenshot"):
+        machine.execute(
+            "${xdo} key Alt+F1 sleep 10"
+        )
+        machine.screenshot("screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/plotinus.nix b/nixpkgs/nixos/tests/plotinus.nix
new file mode 100644
index 000000000000..39a4234dbf73
--- /dev/null
+++ b/nixpkgs/nixos/tests/plotinus.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "plotinus";
+  meta = {
+    maintainers = pkgs.plotinus.meta.maintainers;
+  };
+
+  machine =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      programs.plotinus.enable = true;
+      environment.systemPackages = [ pkgs.gnome3.gnome-calculator pkgs.xdotool ];
+    };
+
+  testScript = ''
+    machine.wait_for_x()
+    machine.succeed("gnome-calculator &")
+    machine.wait_for_window("gnome-calculator")
+    machine.succeed(
+        "xdotool search --sync --onlyvisible --class gnome-calculator "
+        + "windowfocus --sync key --clearmodifiers --delay 1 'ctrl+shift+p'"
+    )
+    machine.sleep(5)  # wait for the popup
+    machine.succeed("xdotool key --delay 100 p r e f e r e n c e s Return")
+    machine.wait_for_window("Preferences")
+    machine.screenshot("screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/postgis.nix b/nixpkgs/nixos/tests/postgis.nix
new file mode 100644
index 000000000000..84bbb0bc8ec6
--- /dev/null
+++ b/nixpkgs/nixos/tests/postgis.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "postgis";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lsix ];
+  };
+
+  nodes = {
+    master =
+      { pkgs, ... }:
+
+      {
+        services.postgresql = let mypg = pkgs.postgresql_11; in {
+            enable = true;
+            package = mypg;
+            extraPlugins = with mypg.pkgs; [
+              postgis
+            ];
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    master.wait_for_unit("postgresql")
+    master.sleep(10)  # Hopefully this is long enough!!
+    master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis;'")
+    master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis_topology;'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/postgresql-wal-receiver.nix b/nixpkgs/nixos/tests/postgresql-wal-receiver.nix
new file mode 100644
index 000000000000..372dd9d8c1c1
--- /dev/null
+++ b/nixpkgs/nixos/tests/postgresql-wal-receiver.nix
@@ -0,0 +1,99 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; } }:
+
+with import ../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  makePostgresqlWalReceiverTest = subTestName: postgresqlPackage: let
+
+  postgresqlDataDir = "/var/db/postgresql/test";
+  replicationUser = "wal_receiver_user";
+  replicationSlot = "wal_receiver_slot";
+  replicationConn = "postgresql://${replicationUser}@localhost";
+  baseBackupDir = "/tmp/pg_basebackup";
+  walBackupDir = "/tmp/pg_wal";
+  atLeast12 = versionAtLeast postgresqlPackage.version "12.0";
+  restoreCommand = ''
+    restore_command = 'cp ${walBackupDir}/%f %p'
+  '';
+
+  recoveryFile = if atLeast12
+      then pkgs.writeTextDir "recovery.signal" ""
+      else pkgs.writeTextDir "recovery.conf" "${restoreCommand}";
+
+  in makeTest {
+    name = "postgresql-wal-receiver-${subTestName}";
+    meta.maintainers = with maintainers; [ pacien ];
+
+    machine = { ... }: {
+      services.postgresql = {
+        package = postgresqlPackage;
+        enable = true;
+        dataDir = postgresqlDataDir;
+        extraConfig = ''
+          wal_level = archive # alias for replica on pg >= 9.6
+          max_wal_senders = 10
+          max_replication_slots = 10
+        '' + optionalString atLeast12 ''
+          ${restoreCommand}
+          recovery_end_command = 'touch recovery.done'
+        '';
+        authentication = ''
+          host replication ${replicationUser} all trust
+        '';
+        initialScript = pkgs.writeText "init.sql" ''
+          create user ${replicationUser} replication;
+          select * from pg_create_physical_replication_slot('${replicationSlot}');
+        '';
+      };
+
+      services.postgresqlWalReceiver.receivers.main = {
+        inherit postgresqlPackage;
+        connection = replicationConn;
+        slot = replicationSlot;
+        directory = walBackupDir;
+      };
+      # This is only to speedup test, it isn't time racing. Service is set to autorestart always,
+      # default 60sec is fine for real system, but is too much for a test
+      systemd.services.postgresql-wal-receiver-main.serviceConfig.RestartSec = mkForce 5;
+    };
+
+    testScript = ''
+      # make an initial base backup
+      $machine->waitForUnit('postgresql');
+      $machine->waitForUnit('postgresql-wal-receiver-main');
+      # WAL receiver healthchecks PG every 5 seconds, so let's be sure they have connected each other
+      # required only for 9.4
+      $machine->sleep(5);
+      $machine->succeed('${postgresqlPackage}/bin/pg_basebackup --dbname=${replicationConn} --pgdata=${baseBackupDir}');
+
+      # create a dummy table with 100 records
+      $machine->succeed('sudo -u postgres psql --command="create table dummy as select * from generate_series(1, 100) as val;"');
+
+      # stop postgres and destroy data
+      $machine->systemctl('stop postgresql');
+      $machine->systemctl('stop postgresql-wal-receiver-main');
+      $machine->succeed('rm -r ${postgresqlDataDir}/{base,global,pg_*}');
+
+      # restore the base backup
+      $machine->succeed('cp -r ${baseBackupDir}/* ${postgresqlDataDir} && chown postgres:postgres -R ${postgresqlDataDir}');
+
+      # prepare WAL and recovery
+      $machine->succeed('chmod a+rX -R ${walBackupDir}');
+      $machine->execute('for part in ${walBackupDir}/*.partial; do mv $part ''${part%%.*}; done'); # make use of partial segments too
+      $machine->succeed('cp ${recoveryFile}/* ${postgresqlDataDir}/ && chmod 666 ${postgresqlDataDir}/recovery*');
+
+      # replay WAL
+      $machine->systemctl('start postgresql');
+      $machine->waitForFile('${postgresqlDataDir}/recovery.done');
+      $machine->systemctl('restart postgresql');
+      $machine->waitForUnit('postgresql');
+
+      # check that our records have been restored
+      $machine->succeed('test $(sudo -u postgres psql --pset="pager=off" --tuples-only --command="select count(distinct val) from dummy;") -eq 100');
+    '';
+  };
+
+in mapAttrs makePostgresqlWalReceiverTest (import ../../pkgs/servers/sql/postgresql pkgs)
diff --git a/nixpkgs/nixos/tests/postgresql.nix b/nixpkgs/nixos/tests/postgresql.nix
new file mode 100644
index 000000000000..3201e22555ea
--- /dev/null
+++ b/nixpkgs/nixos/tests/postgresql.nix
@@ -0,0 +1,90 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  postgresql-versions = import ../../pkgs/servers/sql/postgresql pkgs;
+  test-sql = pkgs.writeText "postgresql-test" ''
+    CREATE EXTENSION pgcrypto; -- just to check if lib loading works
+    CREATE TABLE sth (
+      id int
+    );
+    INSERT INTO sth (id) VALUES (1);
+    INSERT INTO sth (id) VALUES (1);
+    INSERT INTO sth (id) VALUES (1);
+    INSERT INTO sth (id) VALUES (1);
+    INSERT INTO sth (id) VALUES (1);
+    CREATE TABLE xmltest ( doc xml );
+    INSERT INTO xmltest (doc) VALUES ('<test>ok</test>'); -- check if libxml2 enabled
+  '';
+  make-postgresql-test = postgresql-name: postgresql-package: backup-all: makeTest {
+    name = postgresql-name;
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ zagy ];
+    };
+
+    machine = {...}:
+      {
+        services.postgresql = {
+          enable = true;
+          package = postgresql-package;
+        };
+
+        services.postgresqlBackup = {
+          enable = true;
+          databases = optional (!backup-all) "postgres";
+        };
+      };
+
+    testScript = let
+      backupName = if backup-all then "all" else "postgres";
+      backupService = if backup-all then "postgresqlBackup" else "postgresqlBackup-postgres";
+    in ''
+      def check_count(statement, lines):
+          return 'test $(sudo -u postgres psql postgres -tAc "{}"|wc -l) -eq {}'.format(
+              statement, lines
+          )
+
+
+      machine.start()
+      machine.wait_for_unit("postgresql")
+
+      with subtest("Postgresql is available just after unit start"):
+          machine.succeed(
+              "cat ${test-sql} | sudo -u postgres psql"
+          )
+
+      with subtest("Postgresql survives restart (bug #1735)"):
+          machine.shutdown()
+          time.sleep(2)
+          machine.start()
+          machine.wait_for_unit("postgresql")
+
+      machine.fail(check_count("SELECT * FROM sth;", 3))
+      machine.succeed(check_count("SELECT * FROM sth;", 5))
+      machine.fail(check_count("SELECT * FROM sth;", 4))
+      machine.succeed(check_count("SELECT xpath('/test/text()', doc) FROM xmltest;", 1))
+
+      with subtest("Backup service works"):
+          machine.succeed(
+              "systemctl start ${backupService}.service",
+              "zcat /var/backup/postgresql/${backupName}.sql.gz | grep '<test>ok</test>'",
+              "stat -c '%a' /var/backup/postgresql/${backupName}.sql.gz | grep 600",
+          )
+
+      with subtest("Initdb works"):
+          machine.succeed("sudo -u postgres initdb -D /tmp/testpostgres2")
+
+      machine.shutdown()
+    '';
+
+  };
+in
+  (mapAttrs' (name: package: { inherit name; value=make-postgresql-test name package false;}) postgresql-versions) // {
+    postgresql_11-backup-all = make-postgresql-test "postgresql_11-backup-all" postgresql-versions.postgresql_11 true;
+  }
+
diff --git a/nixpkgs/nixos/tests/powerdns.nix b/nixpkgs/nixos/tests/powerdns.nix
new file mode 100644
index 000000000000..75d71315e644
--- /dev/null
+++ b/nixpkgs/nixos/tests/powerdns.nix
@@ -0,0 +1,13 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "powerdns";
+
+  nodes.server = { ... }: {
+    services.powerdns.enable = true;
+    environment.systemPackages = [ pkgs.dnsutils ];
+  };
+
+  testScript = ''
+    server.wait_for_unit("pdns")
+    server.succeed("dig version.bind txt chaos \@127.0.0.1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pppd.nix b/nixpkgs/nixos/tests/pppd.nix
new file mode 100644
index 000000000000..bda0aa75bb50
--- /dev/null
+++ b/nixpkgs/nixos/tests/pppd.nix
@@ -0,0 +1,62 @@
+import ./make-test-python.nix (
+  let
+    chap-secrets = {
+      text = ''"flynn" * "reindeerflotilla" *'';
+      mode = "0640";
+    };
+  in {
+    nodes = {
+      server = {config, pkgs, ...}: {
+        config = {
+          # Run a PPPoE access concentrator server. It will spawn an
+          # appropriate PPP server process when a PPPoE client sets up a
+          # PPPoE session.
+          systemd.services.pppoe-server = {
+            restartTriggers = [
+              config.environment.etc."ppp/pppoe-server-options".source
+              config.environment.etc."ppp/chap-secrets".source
+            ];
+            after = ["network.target"];
+            serviceConfig = {
+              ExecStart = "${pkgs.rpPPPoE}/sbin/pppoe-server -F -O /etc/ppp/pppoe-server-options -q ${pkgs.ppp}/sbin/pppd -I eth1 -L 192.0.2.1 -R 192.0.2.2";
+            };
+            wantedBy = ["multi-user.target"];
+          };
+          environment.etc = {
+            "ppp/pppoe-server-options".text = ''
+              lcp-echo-interval 10
+              lcp-echo-failure 2
+              plugin rp-pppoe.so
+              require-chap
+              nobsdcomp
+              noccp
+              novj
+            '';
+            "ppp/chap-secrets" = chap-secrets;
+          };
+        };
+      };
+      client = {config, pkgs, ...}: {
+        services.pppd = {
+          enable = true;
+          peers.test = {
+            config = ''
+              plugin rp-pppoe.so eth1
+              name "flynn"
+              noipdefault
+              persist
+              noauth
+              debug
+            '';
+          };
+        };
+        environment.etc."ppp/chap-secrets" = chap-secrets;
+      };
+    };
+
+    testScript = ''
+      start_all()
+      client.wait_until_succeeds("ping -c1 -W1 192.0.2.1")
+      server.wait_until_succeeds("ping -c1 -W1 192.0.2.2")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/predictable-interface-names.nix b/nixpkgs/nixos/tests/predictable-interface-names.nix
new file mode 100644
index 000000000000..bab091d57acf
--- /dev/null
+++ b/nixpkgs/nixos/tests/predictable-interface-names.nix
@@ -0,0 +1,33 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+in pkgs.lib.listToAttrs (pkgs.lib.crossLists (predictable: withNetworkd: {
+  name = pkgs.lib.optionalString (!predictable) "un" + "predictable"
+       + pkgs.lib.optionalString withNetworkd "Networkd";
+  value = makeTest {
+    name = "${if predictable then "" else "un"}predictableInterfaceNames${if withNetworkd then "-with-networkd" else ""}";
+    meta = {};
+
+    machine = { lib, ... }: {
+      networking.usePredictableInterfaceNames = lib.mkForce predictable;
+      networking.useNetworkd = withNetworkd;
+      networking.dhcpcd.enable = !withNetworkd;
+      networking.useDHCP = !withNetworkd;
+
+      # Check if predictable interface names are working in stage-1
+      boot.initrd.postDeviceCommands = ''
+        ip link
+        ip link show eth0 ${if predictable then "&&" else "||"} exit 1
+      '';
+    };
+
+    testScript = ''
+      print(machine.succeed("ip link"))
+      machine.${if predictable then "fail" else "succeed"}("ip link show eth0")
+    '';
+  };
+}) [[true false] [true false]])
diff --git a/nixpkgs/nixos/tests/printing.nix b/nixpkgs/nixos/tests/printing.nix
new file mode 100644
index 000000000000..355c94a03861
--- /dev/null
+++ b/nixpkgs/nixos/tests/printing.nix
@@ -0,0 +1,137 @@
+# Test printing via CUPS.
+
+import ./make-test-python.nix ({pkgs, ... }:
+let
+  printingServer = startWhenNeeded: {
+    services.printing.enable = true;
+    services.printing.startWhenNeeded = startWhenNeeded;
+    services.printing.listenAddresses = [ "*:631" ];
+    services.printing.defaultShared = true;
+    services.printing.extraConf = ''
+      <Location />
+        Order allow,deny
+        Allow from all
+      </Location>
+    '';
+    networking.firewall.allowedTCPPorts = [ 631 ];
+    # Add a HP Deskjet printer connected via USB to the server.
+    hardware.printers.ensurePrinters = [{
+      name = "DeskjetLocal";
+      deviceUri = "usb://foobar/printers/foobar";
+      model = "drv:///sample.drv/deskjet.ppd";
+    }];
+  };
+  printingClient = startWhenNeeded: {
+    services.printing.enable = true;
+    services.printing.startWhenNeeded = startWhenNeeded;
+    # Add printer to the client as well, via IPP.
+    hardware.printers.ensurePrinters = [{
+      name = "DeskjetRemote";
+      deviceUri = "ipp://${if startWhenNeeded then "socketActivatedServer" else "serviceServer"}/printers/DeskjetLocal";
+      model = "drv:///sample.drv/deskjet.ppd";
+    }];
+    hardware.printers.ensureDefaultPrinter = "DeskjetRemote";
+  };
+
+in {
+  name = "printing";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ domenkozar eelco matthewbauer ];
+  };
+
+  nodes = {
+    socketActivatedServer = { ... }: (printingServer true);
+    serviceServer = { ... }: (printingServer false);
+
+    socketActivatedClient = { ... }: (printingClient true);
+    serviceClient = { ... }: (printingClient false);
+  };
+
+  testScript = ''
+    import os
+    import re
+    import sys
+
+    start_all()
+
+    with subtest("Make sure that cups is up on both sides"):
+        serviceServer.wait_for_unit("cups.service")
+        serviceClient.wait_for_unit("cups.service")
+
+    with subtest(
+        "Wait until cups is fully initialized and ensure-printers has "
+        "executed with 10s delay"
+    ):
+        serviceClient.sleep(20)
+        socketActivatedClient.wait_until_succeeds(
+            "systemctl status ensure-printers | grep -q -E 'code=exited, status=0/SUCCESS'"
+        )
+
+
+    def test_printing(client, server):
+        assert "scheduler is running" in client.succeed("lpstat -r")
+
+        with subtest("UNIX socket is used for connections"):
+            assert "/var/run/cups/cups.sock" in client.succeed("lpstat -H")
+        with subtest("HTTP server is available too"):
+            client.succeed("curl --fail http://localhost:631/")
+            client.succeed(f"curl --fail http://{server.name}:631/")
+            server.fail(f"curl --fail --connect-timeout 2 http://{client.name}:631/")
+
+        with subtest("LP status checks"):
+            assert "DeskjetRemote accepting requests" in client.succeed("lpstat -a")
+            assert "DeskjetLocal accepting requests" in client.succeed(
+                f"lpstat -h {server.name}:631 -a"
+            )
+            client.succeed("cupsdisable DeskjetRemote")
+            out = client.succeed("lpq")
+            print(out)
+            assert re.search(
+                "DeskjetRemote is not ready.*no entries",
+                client.succeed("lpq"),
+                flags=re.DOTALL,
+            )
+            client.succeed("cupsenable DeskjetRemote")
+            assert re.match(
+                "DeskjetRemote is ready.*no entries", client.succeed("lpq"), flags=re.DOTALL
+            )
+
+        # Test printing various file types.
+        for file in [
+            "${pkgs.groff.doc}/share/doc/*/examples/mom/penguin.pdf",
+            "${pkgs.groff.doc}/share/doc/*/meref.ps",
+            "${pkgs.cups.out}/share/doc/cups/images/cups.png",
+            "${pkgs.pcre.doc}/share/doc/pcre/pcre.txt",
+        ]:
+            file_name = os.path.basename(file)
+            with subtest(f"print {file_name}"):
+                # Print the file on the client.
+                print(client.succeed("lpq"))
+                client.succeed(f"lp {file}")
+                client.wait_until_succeeds(
+                    f"lpq; lpq | grep -q -E 'active.*root.*{file_name}'"
+                )
+
+                # Ensure that a raw PCL file appeared in the server's queue
+                # (showing that the right filters have been applied).  Of
+                # course, since there is no actual USB printer attached, the
+                # file will stay in the queue forever.
+                server.wait_for_file("/var/spool/cups/d*-001")
+                server.wait_until_succeeds(f"lpq -a | grep -q -E '{file_name}'")
+
+                # Delete the job on the client.  It should disappear on the
+                # server as well.
+                client.succeed("lprm")
+                client.wait_until_succeeds("lpq -a | grep -q -E 'no entries'")
+
+                retry(lambda _: "no entries" in server.succeed("lpq -a"))
+
+                # The queue is empty already, so this should be safe.
+                # Otherwise, pairs of "c*"-"d*-001" files might persist.
+                server.execute("rm /var/spool/cups/*")
+
+
+    test_printing(serviceClient, serviceServer)
+    test_printing(socketActivatedClient, socketActivatedServer)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/prometheus-exporters.nix b/nixpkgs/nixos/tests/prometheus-exporters.nix
new file mode 100644
index 000000000000..4fc3668cfafb
--- /dev/null
+++ b/nixpkgs/nixos/tests/prometheus-exporters.nix
@@ -0,0 +1,552 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  inherit (pkgs.lib) concatStringsSep maintainers mapAttrs mkMerge
+                     removeSuffix replaceChars singleton splitString;
+
+/*
+ * The attrset `exporterTests` contains one attribute
+ * for each exporter test. Each of these attributes
+ * is expected to be an attrset containing:
+ *
+ *  `exporterConfig`:
+ *    this attribute set contains config for the exporter itself
+ *
+ *  `exporterTest`
+ *    this attribute set contains test instructions
+ *
+ *  `metricProvider` (optional)
+ *    this attribute contains additional machine config
+ *
+ *  Example:
+ *    exporterTests.<exporterName> = {
+ *      exporterConfig = {
+ *        enable = true;
+ *      };
+ *      metricProvider = {
+ *        services.<metricProvider>.enable = true;
+ *      };
+ *      exporterTest = ''
+ *        wait_for_unit("prometheus-<exporterName>-exporter.service")
+ *        wait_for_open_port("1234")
+ *        succeed("curl -sSf 'localhost:1234/metrics'")
+ *      '';
+ *    };
+ *
+ *  # this would generate the following test config:
+ *
+ *    nodes.<exporterName> = {
+ *      services.prometheus.<exporterName> = {
+ *        enable = true;
+ *      };
+ *      services.<metricProvider>.enable = true;
+ *    };
+ *
+ *    testScript = ''
+ *      <exporterName>.start()
+ *      <exporterName>.wait_for_unit("prometheus-<exporterName>-exporter.service")
+ *      <exporterName>.wait_for_open_port("1234")
+ *      <exporterName>.succeed("curl -sSf 'localhost:1234/metrics'")
+ *      <exporterName>.shutdown()
+ *    '';
+ */
+
+  exporterTests = {
+
+    bind = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.bind.enable = true;
+        services.bind.extraConfig = ''
+          statistics-channels {
+            inet 127.0.0.1 port 8053 allow { localhost; };
+          };
+        '';
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-bind-exporter.service")
+        wait_for_open_port(9119)
+        succeed(
+            "curl -sSf http://localhost:9119/metrics | grep -q 'bind_query_recursions_total 0'"
+        )
+      '';
+    };
+
+    blackbox = {
+      exporterConfig = {
+        enable = true;
+        configFile = pkgs.writeText "config.yml" (builtins.toJSON {
+          modules.icmp_v6 = {
+            prober = "icmp";
+            icmp.preferred_ip_protocol = "ip6";
+          };
+        });
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-blackbox-exporter.service")
+        wait_for_open_port(9115)
+        succeed(
+            "curl -sSf 'http://localhost:9115/probe?target=localhost&module=icmp_v6' | grep -q 'probe_success 1'"
+        )
+      '';
+    };
+
+    collectd = {
+      exporterConfig = {
+        enable = true;
+        extraFlags = [ "--web.collectd-push-path /collectd" ];
+      };
+      exporterTest = let postData = replaceChars [ "\n" ] [ "" ] ''
+        [{
+          "values":[23],
+          "dstypes":["gauge"],
+          "type":"gauge",
+          "interval":1000,
+          "host":"testhost",
+          "plugin":"testplugin",
+          "time":DATE
+        }]
+        ''; in ''
+        wait_for_unit("prometheus-collectd-exporter.service")
+        wait_for_open_port(9103)
+        succeed(
+            'echo \'${postData}\'> /tmp/data.json'
+        )
+        succeed('sed -ie "s DATE $(date +%s) " /tmp/data.json')
+        succeed(
+            "curl -sSfH 'Content-Type: application/json' -X POST --data @/tmp/data.json localhost:9103/collectd"
+        )
+        succeed(
+            "curl -sSf localhost:9103/metrics | grep -q 'collectd_testplugin_gauge{instance=\"testhost\"} 23'"
+        )
+      '';
+    };
+
+    dnsmasq = {
+      exporterConfig = {
+        enable = true;
+        leasesPath = "/var/lib/dnsmasq/dnsmasq.leases";
+      };
+      metricProvider = {
+        services.dnsmasq.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-dnsmasq-exporter.service")
+        wait_for_open_port(9153)
+        succeed("curl -sSf http://localhost:9153/metrics | grep -q 'dnsmasq_leases 0'")
+      '';
+    };
+
+    dovecot = {
+      exporterConfig = {
+        enable = true;
+        scopes = [ "global" ];
+        socketPath = "/var/run/dovecot2/old-stats";
+        user = "root"; # <- don't use user root in production
+      };
+      metricProvider = {
+        services.dovecot2.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-dovecot-exporter.service")
+        wait_for_open_port(9166)
+        succeed(
+            "curl -sSf http://localhost:9166/metrics | grep -q 'dovecot_up{scope=\"global\"} 1'"
+        )
+      '';
+    };
+
+    fritzbox = { # TODO add proper test case
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-fritzbox-exporter.service")
+        wait_for_open_port(9133)
+        succeed(
+            "curl -sSf http://localhost:9133/metrics | grep -q 'fritzbox_exporter_collect_errors 0'"
+        )
+      '';
+    };
+
+    json = {
+      exporterConfig = {
+        enable = true;
+        url = "http://localhost";
+        configFile = pkgs.writeText "json-exporter-conf.json" (builtins.toJSON [{
+          name = "json_test_metric";
+          path = "$.test";
+        }]);
+      };
+      metricProvider = {
+        systemd.services.prometheus-json-exporter.after = [ "nginx.service" ];
+        services.nginx = {
+          enable = true;
+          virtualHosts.localhost.locations."/".extraConfig = ''
+            return 200 "{\"test\":1}";
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("nginx.service")
+        wait_for_open_port(80)
+        wait_for_unit("prometheus-json-exporter.service")
+        wait_for_open_port(7979)
+        succeed("curl -sSf localhost:7979/metrics | grep -q 'json_test_metric 1'")
+      '';
+    };
+
+    mail = {
+      exporterConfig = {
+        enable = true;
+        configuration = {
+          monitoringInterval = "2s";
+          mailCheckTimeout = "10s";
+          servers = [ {
+            name = "testserver";
+            server = "localhost";
+            port = 25;
+            from = "mail-exporter@localhost";
+            to = "mail-exporter@localhost";
+            detectionDir = "/var/spool/mail/mail-exporter/new";
+          } ];
+        };
+      };
+      metricProvider = {
+        services.postfix.enable = true;
+        systemd.services.prometheus-mail-exporter = {
+          after = [ "postfix.service" ];
+          requires = [ "postfix.service" ];
+          preStart = ''
+            mkdir -p -m 0700 mail-exporter/new
+          '';
+          serviceConfig = {
+            ProtectHome = true;
+            ReadOnlyPaths = "/";
+            ReadWritePaths = "/var/spool/mail";
+            WorkingDirectory = "/var/spool/mail";
+          };
+        };
+        users.users.mailexporter.isSystemUser = true;
+      };
+      exporterTest = ''
+        wait_for_unit("postfix.service")
+        wait_for_unit("prometheus-mail-exporter.service")
+        wait_for_open_port(9225)
+        wait_until_succeeds(
+            "curl -sSf http://localhost:9225/metrics | grep -q 'mail_deliver_success{configname=\"testserver\"} 1'"
+        )
+      '';
+    };
+
+    mikrotik = {
+      exporterConfig = {
+        enable = true;
+        extraFlags = [ "-timeout=1s" ];
+        configuration = {
+          devices = [
+            {
+              name = "router";
+              address = "192.168.42.48";
+              user = "prometheus";
+              password = "shh";
+            }
+          ];
+          features = {
+            bgp = true;
+            dhcp = true;
+            dhcpl = true;
+            dhcpv6 = true;
+            health = true;
+            routes = true;
+            poe = true;
+            pools = true;
+            optics = true;
+            w60g = true;
+            wlansta = true;
+            wlanif = true;
+            monitor = true;
+            ipsec = true;
+          };
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-mikrotik-exporter.service")
+        wait_for_open_port(9436)
+        succeed(
+            "curl -sSf http://localhost:9436/metrics | grep -q 'mikrotik_scrape_collector_success{device=\"router\"} 0'"
+        )
+      '';
+    };
+
+    nextcloud = {
+      exporterConfig = {
+        enable = true;
+        passwordFile = "/var/nextcloud-pwfile";
+        url = "http://localhost/negative-space.xml";
+      };
+      metricProvider = {
+        systemd.services.nc-pwfile = let
+          passfile = (pkgs.writeText "pwfile" "snakeoilpw");
+        in {
+          requiredBy = [ "prometheus-nextcloud-exporter.service" ];
+          before = [ "prometheus-nextcloud-exporter.service" ];
+          serviceConfig.ExecStart = ''
+            ${pkgs.coreutils}/bin/install -o nextcloud-exporter -m 0400 ${passfile} /var/nextcloud-pwfile
+          '';
+        };
+        services.nginx = {
+          enable = true;
+          virtualHosts."localhost" = {
+            basicAuth.nextcloud-exporter = "snakeoilpw";
+            locations."/" = {
+              root = "${pkgs.prometheus-nextcloud-exporter.src}/serverinfo/testdata";
+            };
+          };
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("nginx.service")
+        wait_for_unit("prometheus-nextcloud-exporter.service")
+        wait_for_open_port(9205)
+        succeed("curl -sSf http://localhost:9205/metrics | grep -q 'nextcloud_up 1'")
+      '';
+    };
+
+    nginx = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.nginx = {
+          enable = true;
+          statusPage = true;
+          virtualHosts."test".extraConfig = "return 204;";
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("nginx.service")
+        wait_for_unit("prometheus-nginx-exporter.service")
+        wait_for_open_port(9113)
+        succeed("curl -sSf http://localhost:9113/metrics | grep -q 'nginx_up 1'")
+      '';
+    };
+
+    node = {
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-node-exporter.service")
+        wait_for_open_port(9100)
+        succeed(
+            "curl -sSf http://localhost:9100/metrics | grep -q 'node_exporter_build_info{.\\+} 1'"
+        )
+      '';
+    };
+
+    postfix = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.postfix.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-postfix-exporter.service")
+        wait_for_open_port(9154)
+        succeed(
+            "curl -sSf http://localhost:9154/metrics | grep -q 'postfix_smtpd_connects_total 0'"
+        )
+      '';
+    };
+
+    postgres = {
+      exporterConfig = {
+        enable = true;
+        runAsLocalSuperUser = true;
+      };
+      metricProvider = {
+        services.postgresql.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-postgres-exporter.service")
+        wait_for_open_port(9187)
+        wait_for_unit("postgresql.service")
+        succeed(
+            "curl -sSf http://localhost:9187/metrics | grep -q 'pg_exporter_last_scrape_error 0'"
+        )
+        succeed("curl -sSf http://localhost:9187/metrics | grep -q 'pg_up 1'")
+        systemctl("stop postgresql.service")
+        succeed(
+            "curl -sSf http://localhost:9187/metrics | grep -qv 'pg_exporter_last_scrape_error 0'"
+        )
+        succeed("curl -sSf http://localhost:9187/metrics | grep -q 'pg_up 0'")
+        systemctl("start postgresql.service")
+        wait_for_unit("postgresql.service")
+        succeed(
+            "curl -sSf http://localhost:9187/metrics | grep -q 'pg_exporter_last_scrape_error 0'"
+        )
+        succeed("curl -sSf http://localhost:9187/metrics | grep -q 'pg_up 1'")
+      '';
+    };
+
+    rspamd = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.rspamd.enable = true;
+        virtualisation.memorySize = 1024;
+      };
+      exporterTest = ''
+        wait_for_unit("rspamd.service")
+        wait_for_unit("prometheus-rspamd-exporter.service")
+        wait_for_open_port(11334)
+        wait_for_open_port(7980)
+        wait_until_succeeds(
+            "curl -sSf localhost:7980/metrics | grep -q 'rspamd_scanned{host=\"rspamd\"} 0'"
+        )
+      '';
+    };
+
+    snmp = {
+      exporterConfig = {
+        enable = true;
+        configuration.default = {
+          version = 2;
+          auth.community = "public";
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-snmp-exporter.service")
+        wait_for_open_port(9116)
+        succeed("curl -sSf localhost:9116/metrics | grep -q 'snmp_request_errors_total 0'")
+      '';
+    };
+
+    surfboard = {
+      exporterConfig = {
+        enable = true;
+        modemAddress = "localhost";
+      };
+      metricProvider = {
+        systemd.services.prometheus-surfboard-exporter.after = [ "nginx.service" ];
+        services.nginx = {
+          enable = true;
+          virtualHosts.localhost.locations."/cgi-bin/status".extraConfig = ''
+            return 204;
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("nginx.service")
+        wait_for_open_port(80)
+        wait_for_unit("prometheus-surfboard-exporter.service")
+        wait_for_open_port(9239)
+        succeed("curl -sSf localhost:9239/metrics | grep -q 'surfboard_up 1'")
+      '';
+    };
+
+    tor = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        # Note: this does not connect the test environment to the Tor network.
+        # Client, relay, bridge or exit connectivity are disabled by default.
+        services.tor.enable = true;
+        services.tor.controlPort = 9051;
+      };
+      exporterTest = ''
+        wait_for_unit("tor.service")
+        wait_for_open_port(9051)
+        wait_for_unit("prometheus-tor-exporter.service")
+        wait_for_open_port(9130)
+        succeed("curl -sSf localhost:9130/metrics | grep -q 'tor_version{.\\+} 1'")
+      '';
+    };
+
+    varnish = {
+      exporterConfig = {
+        enable = true;
+        instance = "/var/spool/varnish/varnish";
+        group = "varnish";
+      };
+      metricProvider = {
+        systemd.services.prometheus-varnish-exporter.after = [
+          "varnish.service"
+        ];
+        services.varnish = {
+          enable = true;
+          config = ''
+            vcl 4.0;
+            backend default {
+              .host = "127.0.0.1";
+              .port = "80";
+            }
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-varnish-exporter.service")
+        wait_for_open_port(6081)
+        wait_for_open_port(9131)
+        succeed("curl -sSf http://localhost:9131/metrics | grep -q 'varnish_up 1'")
+      '';
+    };
+
+    wireguard = let snakeoil = import ./wireguard/snakeoil-keys.nix; in {
+      exporterConfig.enable = true;
+      metricProvider = {
+        networking.wireguard.interfaces.wg0 = {
+          ips = [ "10.23.42.1/32" "fc00::1/128" ];
+          listenPort = 23542;
+
+          inherit (snakeoil.peer0) privateKey;
+
+          peers = singleton {
+            allowedIPs = [ "10.23.42.2/32" "fc00::2/128" ];
+
+            inherit (snakeoil.peer1) publicKey;
+          };
+        };
+        systemd.services.prometheus-wireguard-exporter.after = [ "wireguard-wg0.service" ];
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-wireguard-exporter.service")
+        wait_for_open_port(9586)
+        wait_until_succeeds(
+            "curl -sSf http://localhost:9586/metrics | grep '${snakeoil.peer1.publicKey}'"
+        )
+      '';
+    };
+  };
+in
+mapAttrs (exporter: testConfig: (makeTest {
+  name = "prometheus-${exporter}-exporter";
+
+  nodes.${exporter} = mkMerge [{
+    services.prometheus.exporters.${exporter} = testConfig.exporterConfig;
+  } testConfig.metricProvider or {}];
+
+  testScript = ''
+    ${exporter}.start()
+    ${concatStringsSep "\n" (map (line:
+      if (builtins.substring 0 1 line == " " || builtins.substring 0 1 line == ")")
+      then line
+      else "${exporter}.${line}"
+    ) (splitString "\n" (removeSuffix "\n" testConfig.exporterTest)))}
+    ${exporter}.shutdown()
+  '';
+
+  meta = with maintainers; {
+    maintainers = [ willibutz ];
+  };
+})) exporterTests
diff --git a/nixpkgs/nixos/tests/prometheus.nix b/nixpkgs/nixos/tests/prometheus.nix
new file mode 100644
index 000000000000..8bfd0c131e61
--- /dev/null
+++ b/nixpkgs/nixos/tests/prometheus.nix
@@ -0,0 +1,245 @@
+let
+  grpcPort   = 19090;
+  queryPort  =  9090;
+  minioPort  =  9000;
+  pushgwPort =  9091;
+
+  s3 = {
+    accessKey = "BKIKJAA5BMMU2RHO6IBB";
+    secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
+  };
+
+  objstore.config = {
+    type = "S3";
+    config = {
+      bucket = "thanos-bucket";
+      endpoint = "s3:${toString minioPort}";
+      region =  "us-east-1";
+      access_key = s3.accessKey;
+      secret_key = s3.secretKey;
+      insecure = true;
+      signature_version2 = false;
+      encrypt_sse =  false;
+      put_user_metadata = {};
+      http_config = {
+        idle_conn_timeout = "0s";
+        insecure_skip_verify = false;
+      };
+      trace = {
+        enable = false;
+      };
+    };
+  };
+
+in import ./make-test-python.nix {
+  name = "prometheus";
+
+  nodes = {
+    prometheus = { pkgs, ... }: {
+      virtualisation.diskSize = 2 * 1024;
+      environment.systemPackages = [ pkgs.jq ];
+      networking.firewall.allowedTCPPorts = [ grpcPort ];
+      services.prometheus = {
+        enable = true;
+        scrapeConfigs = [
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [ "127.0.0.1:${toString queryPort}" ];
+                labels = { instance = "localhost"; };
+              }
+            ];
+          }
+          {
+            job_name = "pushgateway";
+            scrape_interval = "1s";
+            static_configs = [
+              {
+                targets = [ "127.0.0.1:${toString pushgwPort}" ];
+              }
+            ];
+          }
+        ];
+        rules = [
+          ''
+            groups:
+              - name: test
+                rules:
+                  - record: testrule
+                    expr: count(up{job="prometheus"})
+          ''
+        ];
+        globalConfig = {
+          external_labels = {
+            some_label = "required by thanos";
+          };
+        };
+        extraFlags = [
+          # Required by thanos
+          "--storage.tsdb.min-block-duration=5s"
+          "--storage.tsdb.max-block-duration=5s"
+        ];
+      };
+      services.prometheus.pushgateway = {
+        enable = true;
+        web.listen-address = ":${toString pushgwPort}";
+        persistMetrics = true;
+        persistence.interval = "1s";
+        stateDir = "prometheus-pushgateway";
+      };
+      services.thanos = {
+        sidecar = {
+          enable = true;
+          grpc-address = "0.0.0.0:${toString grpcPort}";
+          inherit objstore;
+        };
+
+        # TODO: Add some tests for these services:
+        #rule = {
+        #  enable = true;
+        #  http-address = "0.0.0.0:19194";
+        #  grpc-address = "0.0.0.0:19193";
+        #  query.addresses = [
+        #    "localhost:19191"
+        #  ];
+        #  labels = {
+        #    just = "some";
+        #    nice = "labels";
+        #  };
+        #};
+        #
+        #receive = {
+        #  http-address = "0.0.0.0:19195";
+        #  enable = true;
+        #  labels = {
+        #    just = "some";
+        #    nice = "labels";
+        #  };
+        #};
+      };
+    };
+
+    query = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      services.thanos.query = {
+        enable = true;
+        http-address = "0.0.0.0:${toString queryPort}";
+        store.addresses = [
+          "prometheus:${toString grpcPort}"
+        ];
+      };
+    };
+
+    store = { pkgs, ... }: {
+      virtualisation.diskSize = 2 * 1024;
+      environment.systemPackages = with pkgs; [ jq thanos ];
+      services.thanos.store = {
+        enable = true;
+        http-address = "0.0.0.0:10902";
+        grpc-address = "0.0.0.0:${toString grpcPort}";
+        inherit objstore;
+        sync-block-duration = "1s";
+      };
+      services.thanos.compact = {
+        enable = true;
+        http-address = "0.0.0.0:10903";
+        inherit objstore;
+        consistency-delay = "5s";
+      };
+      services.thanos.query = {
+        enable = true;
+        http-address = "0.0.0.0:${toString queryPort}";
+        store.addresses = [
+          "localhost:${toString grpcPort}"
+        ];
+      };
+    };
+
+    s3 = { pkgs, ... } : {
+      # Minio requires at least 1GiB of free disk space to run.
+      virtualisation.diskSize = 2 * 1024;
+      networking.firewall.allowedTCPPorts = [ minioPort ];
+
+      services.minio = {
+        enable = true;
+        inherit (s3) accessKey secretKey;
+      };
+
+      environment.systemPackages = [ pkgs.minio-client ];
+    };
+  };
+
+  testScript = { nodes, ... } : ''
+    # Before starting the other machines we first make sure that our S3 service is online
+    # and has a bucket added for thanos:
+    s3.start()
+    s3.wait_for_unit("minio.service")
+    s3.wait_for_open_port(${toString minioPort})
+    s3.succeed(
+        "mc config host add minio "
+        + "http://localhost:${toString minioPort} "
+        + "${s3.accessKey} ${s3.secretKey} S3v4",
+        "mc mb minio/thanos-bucket",
+    )
+
+    # Now that s3 has started we can start the other machines:
+    for machine in prometheus, query, store:
+        machine.start()
+
+    # Check if prometheus responds to requests:
+    prometheus.wait_for_unit("prometheus.service")
+    prometheus.wait_for_open_port(${toString queryPort})
+    prometheus.succeed("curl -s http://127.0.0.1:${toString queryPort}/metrics")
+
+    # Let's test if pushing a metric to the pushgateway succeeds:
+    prometheus.wait_for_unit("pushgateway.service")
+    prometheus.succeed(
+        "echo 'some_metric 3.14' | "
+        + "curl --data-binary \@- "
+        + "http://127.0.0.1:${toString pushgwPort}/metrics/job/some_job"
+    )
+
+    # Now check whether that metric gets ingested by prometheus.
+    # Since we'll check for the metric several times on different machines
+    # we abstract the test using the following function:
+
+    # Function to check if the metric "some_metric" has been received and returns the correct value.
+    def wait_for_metric(machine):
+        return machine.wait_until_succeeds(
+            "curl -sf 'http://127.0.0.1:${toString queryPort}/api/v1/query?query=some_metric' | "
+            + "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
+        )
+
+
+    wait_for_metric(prometheus)
+
+    # Let's test if the pushgateway persists metrics to the configured location.
+    prometheus.wait_until_succeeds("test -e /var/lib/prometheus-pushgateway/metrics")
+
+    # Test thanos
+    prometheus.wait_for_unit("thanos-sidecar.service")
+
+    # Test if the Thanos query service can correctly retrieve the metric that was send above.
+    query.wait_for_unit("thanos-query.service")
+    wait_for_metric(query)
+
+    # Test if the Thanos sidecar has correctly uploaded its TSDB to S3, if the
+    # Thanos storage service has correctly downloaded it from S3 and if the Thanos
+    # query service running on $store can correctly retrieve the metric:
+    store.wait_for_unit("thanos-store.service")
+    wait_for_metric(store)
+
+    store.wait_for_unit("thanos-compact.service")
+
+    # Test if the Thanos bucket command is able to retrieve blocks from the S3 bucket
+    # and check if the blocks have the correct labels:
+    store.succeed(
+        "thanos bucket ls "
+        + "--objstore.config-file=${nodes.store.config.services.thanos.store.objstore.config-file} "
+        + "--output=json | "
+        + "jq .thanos.labels.some_label | "
+        + "grep 'required by thanos'"
+    )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/proxy.nix b/nixpkgs/nixos/tests/proxy.nix
new file mode 100644
index 000000000000..6a14a9af59ae
--- /dev/null
+++ b/nixpkgs/nixos/tests/proxy.nix
@@ -0,0 +1,90 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  backend = { pkgs, ... }: {
+    services.httpd = {
+      enable = true;
+      adminAddr = "foo@example.org";
+      virtualHosts.localhost.documentRoot = "${pkgs.valgrind.doc}/share/doc/valgrind/html";
+    };
+    networking.firewall.allowedTCPPorts = [ 80 ];
+  };
+in {
+  name = "proxy";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes = {
+    proxy = { nodes, ... }: {
+      services.httpd = {
+        enable = true;
+        adminAddr = "bar@example.org";
+        extraModules = [ "proxy_balancer" "lbmethod_byrequests" ];
+        extraConfig = ''
+          ExtendedStatus on
+        '';
+        virtualHosts.localhost = {
+          extraConfig = ''
+            <Location /server-status>
+              Require all granted
+              SetHandler server-status
+            </Location>
+
+            <Proxy balancer://cluster>
+              Require all granted
+              BalancerMember http://${nodes.backend1.config.networking.hostName} retry=0
+              BalancerMember http://${nodes.backend2.config.networking.hostName} retry=0
+            </Proxy>
+
+            ProxyStatus       full
+            ProxyPass         /server-status !
+            ProxyPass         /       balancer://cluster/
+            ProxyPassReverse  /       balancer://cluster/
+
+            # For testing; don't want to wait forever for dead backend servers.
+            ProxyTimeout      5
+          '';
+        };
+      };
+      networking.firewall.allowedTCPPorts = [ 80 ];
+    };
+
+    backend1 = backend;
+    backend2 = backend;
+
+    client = { ... }: { };
+  };
+
+  testScript = ''
+    start_all()
+
+    proxy.wait_for_unit("httpd")
+    backend1.wait_for_unit("httpd")
+    backend2.wait_for_unit("httpd")
+    client.wait_for_unit("network.target")
+
+    # With the back-ends up, the proxy should work.
+    client.succeed("curl --fail http://proxy/")
+
+    client.succeed("curl --fail http://proxy/server-status")
+
+    # Block the first back-end.
+    backend1.block()
+
+    # The proxy should still work.
+    client.succeed("curl --fail http://proxy/")
+    client.succeed("curl --fail http://proxy/")
+
+    # Block the second back-end.
+    backend2.block()
+
+    # Now the proxy should fail as well.
+    client.fail("curl --fail http://proxy/")
+
+    # But if the second back-end comes back, the proxy should start
+    # working again.
+    backend2.unblock()
+    client.succeed("curl --fail http://proxy/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/quagga.nix b/nixpkgs/nixos/tests/quagga.nix
new file mode 100644
index 000000000000..04590aa0eb38
--- /dev/null
+++ b/nixpkgs/nixos/tests/quagga.nix
@@ -0,0 +1,96 @@
+# This test runs Quagga and checks if OSPF routing works.
+#
+# Network topology:
+#   [ client ]--net1--[ router1 ]--net2--[ router2 ]--net3--[ server ]
+#
+# All interfaces are in OSPF Area 0.
+
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+
+    ifAddr = node: iface: (pkgs.lib.head node.config.networking.interfaces.${iface}.ipv4.addresses).address;
+
+    ospfConf = ''
+      interface eth2
+        ip ospf hello-interval 1
+        ip ospf dead-interval 5
+      !
+      router ospf
+        network 192.168.0.0/16 area 0
+    '';
+
+  in
+    {
+      name = "quagga";
+
+      meta = with pkgs.stdenv.lib.maintainers; {
+        maintainers = [ tavyc ];
+      };
+
+      nodes = {
+
+        client =
+          { nodes, ... }:
+          {
+            virtualisation.vlans = [ 1 ];
+            networking.defaultGateway = ifAddr nodes.router1 "eth1";
+          };
+
+        router1 =
+          { ... }:
+          {
+            virtualisation.vlans = [ 1 2 ];
+            boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
+            networking.firewall.extraCommands = "iptables -A nixos-fw -i eth2 -p ospf -j ACCEPT";
+            services.quagga.ospf = {
+              enable = true;
+              config = ospfConf;
+            };
+          };
+
+        router2 =
+          { ... }:
+          {
+            virtualisation.vlans = [ 3 2 ];
+            boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
+            networking.firewall.extraCommands = "iptables -A nixos-fw -i eth2 -p ospf -j ACCEPT";
+            services.quagga.ospf = {
+              enable = true;
+              config = ospfConf;
+            };
+          };
+
+        server =
+          { nodes, ... }:
+          {
+            virtualisation.vlans = [ 3 ];
+            networking.defaultGateway = ifAddr nodes.router2 "eth1";
+            networking.firewall.allowedTCPPorts = [ 80 ];
+            services.httpd.enable = true;
+            services.httpd.adminAddr = "foo@example.com";
+          };
+      };
+
+      testScript =
+        { ... }:
+        ''
+          start_all()
+
+          # Wait for the networking to start on all machines
+          for machine in client, router1, router2, server:
+              machine.wait_for_unit("network.target")
+
+          with subtest("Wait for OSPF to form adjacencies"):
+              for gw in router1, router2:
+                  gw.wait_for_unit("ospfd")
+                  gw.wait_until_succeeds("vtysh -c 'show ip ospf neighbor' | grep Full")
+                  gw.wait_until_succeeds("vtysh -c 'show ip route' | grep '^O>'")
+
+          with subtest("Test ICMP"):
+              client.wait_until_succeeds("ping -c 3 server >&2")
+
+          with subtest("Test whether HTTP works"):
+              server.wait_for_unit("httpd")
+              client.succeed("curl --fail http://server/ >&2")
+        '';
+    })
diff --git a/nixpkgs/nixos/tests/quorum.nix b/nixpkgs/nixos/tests/quorum.nix
new file mode 100644
index 000000000000..846d2a930188
--- /dev/null
+++ b/nixpkgs/nixos/tests/quorum.nix
@@ -0,0 +1,79 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "quorum";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mmahut ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.quorum = {
+        enable = true;
+        permissioned = false;
+        staticNodes = [ "enode://dd333ec28f0a8910c92eb4d336461eea1c20803eed9cf2c056557f986e720f8e693605bba2f4e8f289b1162e5ac7c80c914c7178130711e393ca76abc1d92f57@0.0.0.0:30303?discport=0" ];
+        genesis = {
+          alloc = {
+            "189d23d201b03ae1cf9113672df29a5d672aefa3" = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+            "44b07d2c28b8ed8f02b45bd84ac7d9051b3349e6" = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+            "4c1ccd426833b9782729a212c857f2f03b7b4c0d" = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+            "7ae555d0f6faad7930434abdaac2274fd86ab516" = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+            c1056df7c02b6f1a353052eaf0533cc7cb743b52 = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+          };
+          coinbase = "0x0000000000000000000000000000000000000000";
+          config = {
+            byzantiumBlock = 1;
+            chainId = 10;
+            eip150Block = 1;
+            eip150Hash =
+              "0x0000000000000000000000000000000000000000000000000000000000000000";
+            eip155Block = 1;
+            eip158Block = 1;
+            isQuorum = true;
+            istanbul = {
+              epoch = 30000;
+              policy = 0;
+            };
+          };
+        difficulty = "0x1";
+        extraData =
+          "0x0000000000000000000000000000000000000000000000000000000000000000f8aff869944c1ccd426833b9782729a212c857f2f03b7b4c0d94189d23d201b03ae1cf9113672df29a5d672aefa39444b07d2c28b8ed8f02b45bd84ac7d9051b3349e694c1056df7c02b6f1a353052eaf0533cc7cb743b52947ae555d0f6faad7930434abdaac2274fd86ab516b8410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0";
+        gasLimit = "0xe0000000";
+        gasUsed = "0x0";
+        mixHash =
+          "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365";
+        nonce = "0x0";
+        number = "0x0";
+        parentHash =
+          "0x0000000000000000000000000000000000000000000000000000000000000000";
+        timestamp = "0x5cffc201";
+		  };
+     };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_until_succeeds("mkdir -p /var/lib/quorum/keystore")
+    machine.wait_until_succeeds(
+        'echo \{\\"address\\":\\"9377bc3936de934c497e22917b81aa8774ac3bb0\\",\\"crypto\\":\{\\"cipher\\":\\"aes-128-ctr\\",\\"ciphertext\\":\\"ad8341d8ef225650403fd366c955f41095e438dd966a3c84b3d406818c1e366c\\",\\"cipherparams\\":\{\\"iv\\":\\"2a09f7a72fd6dff7c43150ff437e6ac2\\"\},\\"kdf\\":\\"scrypt\\",\\"kdfparams\\":\{\\"dklen\\":32,\\"n\\":262144,\\"p\\":1,\\"r\\":8,\\"salt\\":\\"d1a153845bb80cd6274c87c5bac8ac09fdfac5ff131a6f41b5ed319667f12027\\"\},\\"mac\\":\\"a9621ad88fa1d042acca6fc2fcd711f7e05bfbadea3f30f379235570c8e270d3\\"\},\\"id\\":\\"89e847a3-1527-42f6-a321-77de0a14ce02\\",\\"version\\":3\}\\" > /var/lib/quorum/keystore/UTC--2020-03-23T11-08-34.144812212Z--9377bc3936de934c497e22917b81aa8774ac3bb0'
+    )
+    machine.wait_until_succeeds(
+        "echo fe2725c4e8f7617764b845e8d939a65c664e7956eb47ed7d934573f16488efc1 > /var/lib/quorum/nodekey"
+    )
+    machine.wait_until_succeeds("systemctl restart quorum")
+    machine.wait_for_unit("quorum.service")
+    machine.sleep(15)
+    machine.wait_until_succeeds(
+        'geth attach /var/lib/quorum/geth.ipc --exec "eth.accounts" | grep 0x9377bc3936de934c497e22917b81aa8774ac3bb0'
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/rabbitmq.nix b/nixpkgs/nixos/tests/rabbitmq.nix
new file mode 100644
index 000000000000..f403e4ac2edc
--- /dev/null
+++ b/nixpkgs/nixos/tests/rabbitmq.nix
@@ -0,0 +1,21 @@
+# This test runs rabbitmq and checks if rabbitmq is up and running.
+
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "rabbitmq";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco offline ];
+  };
+
+  machine = {
+    services.rabbitmq.enable = true;
+  };
+
+  testScript = ''
+    machine.start()
+
+    machine.wait_for_unit("rabbitmq.service")
+    machine.wait_until_succeeds(
+        'su -s ${pkgs.runtimeShell} rabbitmq -c "rabbitmqctl status"'
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/radarr.nix b/nixpkgs/nixos/tests/radarr.nix
new file mode 100644
index 000000000000..ed90025ac420
--- /dev/null
+++ b/nixpkgs/nixos/tests/radarr.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "radarr";
+  meta.maintainers = with maintainers; [ etu ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.radarr.enable = true; };
+
+  testScript = ''
+    machine.wait_for_unit("radarr.service")
+    machine.wait_for_open_port("7878")
+    machine.succeed("curl --fail http://localhost:7878/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/radicale.nix b/nixpkgs/nixos/tests/radicale.nix
new file mode 100644
index 000000000000..c81e78a8f994
--- /dev/null
+++ b/nixpkgs/nixos/tests/radicale.nix
@@ -0,0 +1,122 @@
+let
+  user = "someuser";
+  password = "some_password";
+  port = builtins.toString 5232;
+
+  common = { pkgs, ... }: {
+    services.radicale = {
+      enable = true;
+      config = ''
+        [auth]
+        type = htpasswd
+        htpasswd_filename = /etc/radicale/htpasswd
+        htpasswd_encryption = bcrypt
+
+        [storage]
+        filesystem_folder = /tmp/collections
+
+        [logging]
+        debug = True
+      '';
+    };
+    # WARNING: DON'T DO THIS IN PRODUCTION!
+    # This puts unhashed secrets directly into the Nix store for ease of testing.
+    environment.etc."radicale/htpasswd".source = pkgs.runCommand "htpasswd" {} ''
+      ${pkgs.apacheHttpd}/bin/htpasswd -bcB "$out" ${user} ${password}
+    '';
+  };
+
+in
+
+  import ./make-test-python.nix ({ lib, ... }@args: {
+    name = "radicale";
+    meta.maintainers = with lib.maintainers; [ aneeshusa infinisil ];
+
+    nodes = rec {
+      radicale = radicale1; # Make the test script read more nicely
+      radicale1 = lib.recursiveUpdate (common args) {
+        nixpkgs.overlays = [
+          (self: super: {
+            radicale1 = super.radicale1.overrideAttrs (oldAttrs: {
+              propagatedBuildInputs = with self.pythonPackages;
+                (oldAttrs.propagatedBuildInputs or []) ++ [ passlib ];
+            });
+          })
+        ];
+        system.stateVersion = "17.03";
+      };
+      radicale1_export = lib.recursiveUpdate radicale1 {
+        services.radicale.extraArgs = [
+          "--export-storage" "/tmp/collections-new"
+        ];
+      };
+      radicale2_verify = lib.recursiveUpdate radicale2 {
+        services.radicale.extraArgs = [ "--verify-storage" ];
+      };
+      radicale2 = lib.recursiveUpdate (common args) {
+        system.stateVersion = "17.09";
+      };
+    };
+
+    # This tests whether the web interface is accessible to an authenticated user
+    testScript = { nodes }: let
+      switchToConfig = nodeName: let
+        newSystem = nodes.${nodeName}.config.system.build.toplevel;
+      in "${newSystem}/bin/switch-to-configuration test";
+    in ''
+      with subtest("Check Radicale 1 functionality"):
+          radicale.succeed(
+              "${switchToConfig "radicale1"} >&2"
+          )
+          radicale.wait_for_unit("radicale.service")
+          radicale.wait_for_open_port(${port})
+          radicale.succeed(
+              "curl --fail http://${user}:${password}@localhost:${port}/someuser/calendar.ics/"
+          )
+
+      with subtest("Export data in Radicale 2 format"):
+          radicale.succeed("systemctl stop radicale")
+          radicale.succeed("ls -al /tmp/collections")
+          radicale.fail("ls -al /tmp/collections-new")
+
+      with subtest("Radicale exits immediately after exporting storage"):
+          radicale.succeed(
+              "${switchToConfig "radicale1_export"} >&2"
+          )
+          radicale.wait_until_fails("systemctl status radicale")
+          radicale.succeed("ls -al /tmp/collections")
+          radicale.succeed("ls -al /tmp/collections-new")
+
+      with subtest("Verify data in Radicale 2 format"):
+          radicale.succeed("rm -r /tmp/collections/${user}")
+          radicale.succeed("mv /tmp/collections-new/collection-root /tmp/collections")
+          radicale.succeed(
+              "${switchToConfig "radicale2_verify"} >&2"
+          )
+          radicale.wait_until_fails("systemctl status radicale")
+
+          (retcode, logs) = radicale.execute("journalctl -u radicale -n 10")
+          assert (
+              retcode == 0 and "Verifying storage" in logs
+          ), "Radicale 2 didn't verify storage"
+          assert (
+              "failed" not in logs and "exception" not in logs
+          ), "storage verification failed"
+
+      with subtest("Check Radicale 2 functionality"):
+          radicale.succeed(
+              "${switchToConfig "radicale2"} >&2"
+          )
+          radicale.wait_for_unit("radicale.service")
+          radicale.wait_for_open_port(${port})
+
+          (retcode, output) = radicale.execute(
+              "curl --fail http://${user}:${password}@localhost:${port}/someuser/calendar.ics/"
+          )
+          assert (
+              retcode == 0 and "VCALENDAR" in output
+          ), "Could not read calendar from Radicale 2"
+
+      radicale.succeed("curl --fail http://${user}:${password}@localhost:${port}/.web/")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/redis.nix b/nixpkgs/nixos/tests/redis.nix
new file mode 100644
index 000000000000..529965d7acde
--- /dev/null
+++ b/nixpkgs/nixos/tests/redis.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "redis";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ flokli ];
+  };
+
+  nodes = {
+    machine =
+      { pkgs, ... }:
+
+      {
+        services.redis.enable = true;
+        services.redis.unixSocket = "/run/redis/redis.sock";
+      };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("redis")
+    machine.wait_for_open_port("6379")
+    machine.succeed("redis-cli ping | grep PONG")
+    machine.succeed("redis-cli -s /run/redis/redis.sock ping | grep PONG")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/redmine.nix b/nixpkgs/nixos/tests/redmine.nix
new file mode 100644
index 000000000000..73eb684f33a9
--- /dev/null
+++ b/nixpkgs/nixos/tests/redmine.nix
@@ -0,0 +1,76 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  mysqlTest = package: makeTest {
+    machine =
+      { config, pkgs, ... }:
+      { services.redmine.enable = true;
+        services.redmine.package = package;
+        services.redmine.database.type = "mysql2";
+        services.redmine.plugins = {
+          redmine_env_auth = pkgs.fetchurl {
+            url = "https://github.com/Intera/redmine_env_auth/archive/0.7.zip";
+            sha256 = "1xb8lyarc7mpi86yflnlgyllh9hfwb9z304f19dx409gqpia99sc";
+          };
+        };
+        services.redmine.themes = {
+          dkuk-redmine_alex_skin = pkgs.fetchurl {
+            url = "https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip";
+            sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
+          };
+        };
+      };
+
+    testScript = ''
+      startAll;
+      $machine->waitForUnit('redmine.service');
+      $machine->waitForOpenPort('3000');
+      $machine->succeed("curl --fail http://localhost:3000/");
+    '';
+  };
+
+  pgsqlTest = package: makeTest {
+    machine =
+      { config, pkgs, ... }:
+      { services.redmine.enable = true;
+        services.redmine.package = package;
+        services.redmine.database.type = "postgresql";
+        services.redmine.plugins = {
+          redmine_env_auth = pkgs.fetchurl {
+            url = "https://github.com/Intera/redmine_env_auth/archive/0.7.zip";
+            sha256 = "1xb8lyarc7mpi86yflnlgyllh9hfwb9z304f19dx409gqpia99sc";
+          };
+        };
+        services.redmine.themes = {
+          dkuk-redmine_alex_skin = pkgs.fetchurl {
+            url = "https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip";
+            sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
+          };
+        };
+      };
+
+    testScript = ''
+      startAll;
+      $machine->waitForUnit('redmine.service');
+      $machine->waitForOpenPort('3000');
+      $machine->succeed("curl --fail http://localhost:3000/");
+    '';
+  };
+in
+{
+  mysql = mysqlTest pkgs.redmine // {
+    name = "mysql";
+    meta.maintainers = [ maintainers.aanderse ];
+  };
+
+  pgsql = pgsqlTest pkgs.redmine // {
+    name = "pgsql";
+    meta.maintainers = [ maintainers.aanderse ];
+  };
+}
diff --git a/nixpkgs/nixos/tests/resolv.nix b/nixpkgs/nixos/tests/resolv.nix
new file mode 100644
index 000000000000..b506f87451ee
--- /dev/null
+++ b/nixpkgs/nixos/tests/resolv.nix
@@ -0,0 +1,46 @@
+# Test whether DNS resolving returns multiple records and all address families.
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "resolv";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ckauhaus ];
+  };
+
+  nodes.resolv = { ... }: {
+    networking.extraHosts = ''
+      # IPv4 only
+      192.0.2.1 host-ipv4.example.net
+      192.0.2.2 host-ipv4.example.net
+      # IP6 only
+      2001:db8::2:1 host-ipv6.example.net
+      2001:db8::2:2 host-ipv6.example.net
+      # dual stack
+      192.0.2.1 host-dual.example.net
+      192.0.2.2 host-dual.example.net
+      2001:db8::2:1 host-dual.example.net
+      2001:db8::2:2 host-dual.example.net
+    '';
+  };
+
+  testScript = ''
+    def addrs_in(hostname, addrs):
+        res = resolv.succeed("getent ahosts {}".format(hostname))
+        for addr in addrs:
+            assert addr in res, "Expected output '{}' not found in\n{}".format(addr, res)
+
+
+    start_all()
+    resolv.wait_for_unit("nscd")
+
+    ipv4 = ["192.0.2.1", "192.0.2.2"]
+    ipv6 = ["2001:db8::2:1", "2001:db8::2:2"]
+
+    with subtest("IPv4 resolves"):
+        addrs_in("host-ipv4.example.net", ipv4)
+
+    with subtest("IPv6 resolves"):
+        addrs_in("host-ipv6.example.net", ipv6)
+
+    with subtest("Dual stack resolves"):
+        addrs_in("host-dual.example.net", ipv4 + ipv6)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/restic.nix b/nixpkgs/nixos/tests/restic.nix
new file mode 100644
index 000000000000..67bb7f1933d6
--- /dev/null
+++ b/nixpkgs/nixos/tests/restic.nix
@@ -0,0 +1,63 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+
+    let
+      password = "some_password";
+      repository = "/tmp/restic-backup";
+      passwordFile = pkgs.writeText "password" "correcthorsebatterystaple";
+    in
+      {
+        name = "restic";
+
+        meta = with pkgs.stdenv.lib.maintainers; {
+          maintainers = [ bbigras ];
+        };
+
+        nodes = {
+          server =
+            { ... }:
+              {
+                services.restic.backups = {
+                  remotebackup = {
+                    inherit repository;
+                    passwordFile = "${passwordFile}";
+                    initialize = true;
+                    paths = [ "/opt" ];
+                    pruneOpts = [
+                      "--keep-daily 2"
+                      "--keep-weekly 1"
+                      "--keep-monthly 1"
+                      "--keep-yearly 99"
+                    ];
+                  };
+                };
+              };
+        };
+
+        testScript = ''
+          server.start()
+          server.wait_for_unit("dbus.socket")
+          server.fail(
+              "${pkgs.restic}/bin/restic -r ${repository} -p ${passwordFile} snapshots"
+          )
+          server.succeed(
+              "mkdir -p /opt",
+              "touch /opt/some_file",
+              "timedatectl set-time '2016-12-13 13:45'",
+              "systemctl start restic-backups-remotebackup.service",
+              '${pkgs.restic}/bin/restic -r ${repository} -p ${passwordFile} snapshots -c | grep -e "^1 snapshot"',
+              "timedatectl set-time '2017-12-13 13:45'",
+              "systemctl start restic-backups-remotebackup.service",
+              "timedatectl set-time '2018-12-13 13:45'",
+              "systemctl start restic-backups-remotebackup.service",
+              "timedatectl set-time '2018-12-14 13:45'",
+              "systemctl start restic-backups-remotebackup.service",
+              "timedatectl set-time '2018-12-15 13:45'",
+              "systemctl start restic-backups-remotebackup.service",
+              "timedatectl set-time '2018-12-16 13:45'",
+              "systemctl start restic-backups-remotebackup.service",
+              '${pkgs.restic}/bin/restic -r ${repository} -p ${passwordFile} snapshots -c | grep -e "^4 snapshot"',
+          )
+        '';
+      }
+)
diff --git a/nixpkgs/nixos/tests/riak.nix b/nixpkgs/nixos/tests/riak.nix
new file mode 100644
index 000000000000..6915779e7e9c
--- /dev/null
+++ b/nixpkgs/nixos/tests/riak.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "riak";
+  meta = with lib.maintainers; {
+    maintainers = [ filalex77 ];
+  };
+
+  machine = {
+    services.riak.enable = true;
+    services.riak.package = pkgs.riak;
+  };
+
+  testScript = ''
+    machine.start()
+
+    machine.wait_for_unit("riak")
+    machine.wait_until_succeeds("riak ping 2>&1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/roundcube.nix b/nixpkgs/nixos/tests/roundcube.nix
new file mode 100644
index 000000000000..97e1125694b6
--- /dev/null
+++ b/nixpkgs/nixos/tests/roundcube.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "roundcube";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ globin ];
+  };
+
+  nodes = {
+    roundcube = { config, pkgs, ... }: {
+      services.roundcube = {
+        enable = true;
+        hostName = "roundcube";
+        database.password = "not production";
+        package = pkgs.roundcube.withPlugins (plugins: [ plugins.persistent_login ]);
+        plugins = [ "persistent_login" ];
+        dicts = with pkgs.aspellDicts; [ en fr de ];
+      };
+      services.nginx.virtualHosts.roundcube = {
+        forceSSL = false;
+        enableACME = false;
+      };
+    };
+  };
+
+  testScript = ''
+    roundcube.start
+    roundcube.wait_for_unit("postgresql.service")
+    roundcube.wait_for_unit("phpfpm-roundcube.service")
+    roundcube.wait_for_unit("nginx.service")
+    roundcube.succeed("curl -sSfL http://roundcube/ | grep 'Keep me logged in'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/rspamd.nix b/nixpkgs/nixos/tests/rspamd.nix
new file mode 100644
index 000000000000..bf3f0de62044
--- /dev/null
+++ b/nixpkgs/nixos/tests/rspamd.nix
@@ -0,0 +1,304 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  initMachine = ''
+    start_all()
+    machine.wait_for_unit("rspamd.service")
+    machine.succeed("id rspamd >/dev/null")
+  '';
+  checkSocket = socket: user: group: mode: ''
+    machine.succeed("ls ${socket} >/dev/null")
+    machine.succeed('[[ "$(stat -c %U ${socket})" == "${user}" ]]')
+    machine.succeed('[[ "$(stat -c %G ${socket})" == "${group}" ]]')
+    machine.succeed('[[ "$(stat -c %a ${socket})" == "${mode}" ]]')
+  '';
+  simple = name: enableIPv6: makeTest {
+    name = "rspamd-${name}";
+    machine = {
+      services.rspamd.enable = true;
+      networking.enableIPv6 = enableIPv6;
+    };
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_open_port(11334)
+      machine.wait_for_unit("rspamd.service")
+      machine.succeed("id rspamd >/dev/null")
+      ${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "660" }
+      machine.sleep(10)
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.conf"))
+      machine.log(
+          machine.succeed("grep 'CONFDIR/worker-controller.inc' /etc/rspamd/rspamd.conf")
+      )
+      machine.log(machine.succeed("grep 'CONFDIR/worker-normal.inc' /etc/rspamd/rspamd.conf"))
+      machine.log(machine.succeed("systemctl cat rspamd.service"))
+      machine.log(machine.succeed("curl http://localhost:11334/auth"))
+      machine.log(machine.succeed("curl http://127.0.0.1:11334/auth"))
+      ${optionalString enableIPv6 ''machine.log(machine.succeed("curl http://[::1]:11334/auth"))''}
+      # would not reformat
+    '';
+  };
+in
+{
+  simple = simple "simple" true;
+  ipv4only = simple "ipv4only" false;
+  deprecated = makeTest {
+    name = "rspamd-deprecated";
+    machine = {
+      services.rspamd = {
+        enable = true;
+        workers.normal.bindSockets = [{
+          socket = "/run/rspamd.sock";
+          mode = "0600";
+          owner = "root";
+          group = "root";
+        }];
+        workers.controller.bindSockets = [{
+          socket = "/run/rspamd-worker.sock";
+          mode = "0666";
+          owner = "root";
+          group = "root";
+        }];
+      };
+    };
+
+    testScript = ''
+      ${initMachine}
+      machine.wait_for_file("/run/rspamd.sock")
+      ${checkSocket "/run/rspamd.sock" "root" "root" "600" }
+      ${checkSocket "/run/rspamd-worker.sock" "root" "root" "666" }
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.conf"))
+      machine.log(
+          machine.succeed("grep 'CONFDIR/worker-controller.inc' /etc/rspamd/rspamd.conf")
+      )
+      machine.log(machine.succeed("grep 'CONFDIR/worker-normal.inc' /etc/rspamd/rspamd.conf"))
+      machine.log(machine.succeed("rspamc -h /run/rspamd-worker.sock stat"))
+      machine.log(
+          machine.succeed("curl --unix-socket /run/rspamd-worker.sock http://localhost/ping")
+      )
+    '';
+  };
+
+  bindports = makeTest {
+    name = "rspamd-bindports";
+    machine = {
+      services.rspamd = {
+        enable = true;
+        workers.normal.bindSockets = [{
+          socket = "/run/rspamd.sock";
+          mode = "0600";
+          owner = "root";
+          group = "root";
+        }];
+        workers.controller.bindSockets = [{
+          socket = "/run/rspamd-worker.sock";
+          mode = "0666";
+          owner = "root";
+          group = "root";
+        }];
+        workers.controller2 = {
+          type = "controller";
+          bindSockets = [ "0.0.0.0:11335" ];
+          extraConfig = ''
+            static_dir = "''${WWWDIR}";
+            secure_ip = null;
+            password = "verysecretpassword";
+          '';
+        };
+      };
+    };
+
+    testScript = ''
+      ${initMachine}
+      machine.wait_for_file("/run/rspamd.sock")
+      ${checkSocket "/run/rspamd.sock" "root" "root" "600" }
+      ${checkSocket "/run/rspamd-worker.sock" "root" "root" "666" }
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.conf"))
+      machine.log(
+          machine.succeed("grep 'CONFDIR/worker-controller.inc' /etc/rspamd/rspamd.conf")
+      )
+      machine.log(machine.succeed("grep 'CONFDIR/worker-normal.inc' /etc/rspamd/rspamd.conf"))
+      machine.log(
+          machine.succeed(
+              "grep 'LOCAL_CONFDIR/override.d/worker-controller2.inc' /etc/rspamd/rspamd.conf"
+          )
+      )
+      machine.log(
+          machine.succeed(
+              "grep 'verysecretpassword' /etc/rspamd/override.d/worker-controller2.inc"
+          )
+      )
+      machine.wait_until_succeeds(
+          "journalctl -u rspamd | grep -i 'starting controller process' >&2"
+      )
+      machine.log(machine.succeed("rspamc -h /run/rspamd-worker.sock stat"))
+      machine.log(
+          machine.succeed("curl --unix-socket /run/rspamd-worker.sock http://localhost/ping")
+      )
+      machine.log(machine.succeed("curl http://localhost:11335/ping"))
+    '';
+  };
+  customLuaRules = makeTest {
+    name = "rspamd-custom-lua-rules";
+    machine = {
+      environment.etc."tests/no-muh.eml".text = ''
+        From: Sheep1<bah@example.com>
+        To: Sheep2<mah@example.com>
+        Subject: Evil cows
+
+        I find cows to be evil don't you?
+      '';
+      environment.etc."tests/muh.eml".text = ''
+        From: Cow<cow@example.com>
+        To: Sheep2<mah@example.com>
+        Subject: Evil cows
+
+        Cows are majestic creatures don't Muh agree?
+      '';
+      services.rspamd = {
+        enable = true;
+        locals = {
+          "antivirus.conf" = mkIf false { text = ''
+              clamav {
+                action = "reject";
+                symbol = "CLAM_VIRUS";
+                type = "clamav";
+                log_clean = true;
+                servers = "/run/clamav/clamd.ctl";
+              }
+            '';};
+          "redis.conf" = {
+            enable = false;
+            text = ''
+              servers = "127.0.0.1";
+            '';
+          };
+          "groups.conf".text = ''
+            group "cows" {
+              symbol {
+                NO_MUH = {
+                  weight = 1.0;
+                  description = "Mails should not muh";
+                }
+              }
+            }
+          '';
+        };
+        localLuaRules = pkgs.writeText "rspamd.local.lua" ''
+          local rspamd_logger = require "rspamd_logger"
+          rspamd_config.NO_MUH = {
+            callback = function (task)
+              local parts = task:get_text_parts()
+              if parts then
+                for _,part in ipairs(parts) do
+                  local content = tostring(part:get_content())
+                  rspamd_logger.infox(rspamd_config, 'Found content %s', content)
+                  local found = string.find(content, "Muh");
+                  rspamd_logger.infox(rspamd_config, 'Found muh %s', tostring(found))
+                  if found then
+                    return true
+                  end
+                end
+              end
+              return false
+            end,
+            score = 5.0,
+	          description = 'Allow no cows',
+            group = "cows",
+          }
+          rspamd_logger.infox(rspamd_config, 'Work dammit!!!')
+        '';
+      };
+    };
+    testScript = ''
+      ${initMachine}
+      machine.wait_for_open_port(11334)
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.conf"))
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.local.lua"))
+      machine.log(machine.succeed("cat /etc/rspamd/local.d/groups.conf"))
+      # Verify that redis.conf was not written
+      machine.fail("cat /etc/rspamd/local.d/redis.conf >&2")
+      # Verify that antivirus.conf was not written
+      machine.fail("cat /etc/rspamd/local.d/antivirus.conf >&2")
+      ${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "660" }
+      machine.log(
+          machine.succeed("curl --unix-socket /run/rspamd/rspamd.sock http://localhost/ping")
+      )
+      machine.log(machine.succeed("rspamc -h 127.0.0.1:11334 stat"))
+      machine.log(machine.succeed("cat /etc/tests/no-muh.eml | rspamc -h 127.0.0.1:11334"))
+      machine.log(
+          machine.succeed("cat /etc/tests/muh.eml | rspamc -h 127.0.0.1:11334 symbols")
+      )
+      machine.wait_until_succeeds("journalctl -u rspamd | grep -i muh >&2")
+      machine.log(
+          machine.fail(
+              "cat /etc/tests/no-muh.eml | rspamc -h 127.0.0.1:11334 symbols | grep NO_MUH"
+          )
+      )
+      machine.log(
+          machine.succeed(
+              "cat /etc/tests/muh.eml | rspamc -h 127.0.0.1:11334 symbols | grep NO_MUH"
+          )
+      )
+    '';
+  };
+  postfixIntegration = makeTest {
+    name = "rspamd-postfix-integration";
+    machine = {
+      environment.systemPackages = with pkgs; [ msmtp ];
+      environment.etc."tests/gtube.eml".text = ''
+        From: Sheep1<bah@example.com>
+        To: Sheep2<tester@example.com>
+        Subject: Evil cows
+
+        I find cows to be evil don't you?
+
+        XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-EMAIL*C.34X
+      '';
+      environment.etc."tests/example.eml".text = ''
+        From: Sheep1<bah@example.com>
+        To: Sheep2<tester@example.com>
+        Subject: Evil cows
+
+        I find cows to be evil don't you?
+      '';
+      users.users.tester.password = "test";
+      services.postfix = {
+        enable = true;
+        destination = ["example.com"];
+      };
+      services.rspamd = {
+        enable = true;
+        postfix.enable = true;
+        workers.rspamd_proxy.type = "rspamd_proxy";
+      };
+    };
+    testScript = ''
+      ${initMachine}
+      machine.wait_for_open_port(11334)
+      machine.wait_for_open_port(25)
+      ${checkSocket "/run/rspamd/rspamd-milter.sock" "rspamd" "postfix" "660" }
+      machine.log(machine.succeed("rspamc -h 127.0.0.1:11334 stat"))
+      machine.log(
+          machine.succeed(
+              "msmtp --host=localhost -t --read-envelope-from < /etc/tests/example.eml"
+          )
+      )
+      machine.log(
+          machine.fail(
+              "msmtp --host=localhost -t --read-envelope-from < /etc/tests/gtube.eml"
+          )
+      )
+
+      machine.wait_until_fails('[ "$(postqueue -p)" != "Mail queue is empty" ]')
+      machine.fail("journalctl -u postfix | grep -i error >&2")
+      machine.fail("journalctl -u postfix | grep -i warning >&2")
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/rss2email.nix b/nixpkgs/nixos/tests/rss2email.nix
new file mode 100644
index 000000000000..d62207a417b8
--- /dev/null
+++ b/nixpkgs/nixos/tests/rss2email.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix {
+  name = "opensmtpd";
+
+  nodes = {
+    server = { pkgs, ... }: {
+      imports = [ common/user-account.nix ];
+      services.nginx = {
+        enable = true;
+        virtualHosts."127.0.0.1".root = ./common/webroot;
+      };
+      services.rss2email = {
+        enable = true;
+        to = "alice@localhost";
+        interval = "1";
+        config.from = "test@example.org";
+        feeds = {
+          nixos = { url = "http://127.0.0.1/news-rss.xml"; };
+        };
+      };
+      services.opensmtpd = {
+        enable = true;
+        extraServerArgs = [ "-v" ];
+        serverConfiguration = ''
+          listen on 127.0.0.1
+          action dovecot_deliver mda \
+            "${pkgs.dovecot}/libexec/dovecot/deliver -d %{user.username}"
+          match from any for local action dovecot_deliver
+        '';
+      };
+      services.dovecot2 = {
+        enable = true;
+        enableImap = true;
+        mailLocation = "maildir:~/mail";
+        protocols = [ "imap" ];
+      };
+      environment.systemPackages = let
+        checkMailLanded = pkgs.writeScriptBin "check-mail-landed" ''
+          #!${pkgs.python3.interpreter}
+          import imaplib
+
+          with imaplib.IMAP4('127.0.0.1', 143) as imap:
+            imap.login('alice', 'foobar')
+            imap.select()
+            status, refs = imap.search(None, 'ALL')
+            print("=====> Result of search for all:", status, refs)
+            assert status == 'OK'
+            assert len(refs) > 0
+            status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+            assert status == 'OK'
+        '';
+      in [ pkgs.opensmtpd checkMailLanded ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("network-online.target")
+    server.wait_for_unit("opensmtpd")
+    server.wait_for_unit("dovecot2")
+    server.wait_for_unit("nginx")
+    server.wait_for_unit("rss2email")
+
+    server.wait_until_succeeds("check-mail-landed >&2")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/rsyslogd.nix b/nixpkgs/nixos/tests/rsyslogd.nix
new file mode 100644
index 000000000000..50523920c60b
--- /dev/null
+++ b/nixpkgs/nixos/tests/rsyslogd.nix
@@ -0,0 +1,40 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+{
+  test1 = makeTest {
+    name = "rsyslogd-test1";
+    meta.maintainers = [ pkgs.stdenv.lib.maintainers.aanderse ];
+
+    machine = { config, pkgs, ... }: {
+      services.rsyslogd.enable = true;
+      services.journald.forwardToSyslog = false;
+    };
+
+    # ensure rsyslogd isn't receiving messages from journald if explicitly disabled
+    testScript = ''
+      machine.wait_for_unit("default.target")
+      machine.fail("test -f /var/log/messages")
+    '';
+  };
+
+  test2 = makeTest {
+    name = "rsyslogd-test2";
+    meta.maintainers = [ pkgs.stdenv.lib.maintainers.aanderse ];
+
+    machine = { config, pkgs, ... }: {
+      services.rsyslogd.enable = true;
+    };
+
+    # ensure rsyslogd is receiving messages from journald
+    testScript = ''
+      machine.wait_for_unit("default.target")
+      machine.succeed("test -f /var/log/messages")
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/run-in-machine.nix b/nixpkgs/nixos/tests/run-in-machine.nix
new file mode 100644
index 000000000000..67840f3e9fe7
--- /dev/null
+++ b/nixpkgs/nixos/tests/run-in-machine.nix
@@ -0,0 +1,23 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  output = runInMachine {
+    drv = pkgs.hello;
+    machine = { ... }: { /* services.sshd.enable = true; */ };
+  };
+
+  test = pkgs.runCommand "verify-output" { inherit output; } ''
+    if [ ! -e "$output/bin/hello" ]; then
+      echo "Derivation built using runInMachine produced incorrect output:" >&2
+      ls -laR "$output" >&2
+      exit 1
+    fi
+    "$output/bin/hello" > "$out"
+  '';
+
+in test // { inherit test; } # To emulate behaviour of makeTest
diff --git a/nixpkgs/nixos/tests/rxe.nix b/nixpkgs/nixos/tests/rxe.nix
new file mode 100644
index 000000000000..10753c4ed0c8
--- /dev/null
+++ b/nixpkgs/nixos/tests/rxe.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ ... } :
+
+let
+  node = { pkgs, ... } : {
+    networking = {
+      firewall = {
+        allowedUDPPorts = [ 4791 ]; # open RoCE port
+        allowedTCPPorts = [ 4800 ]; # port for test utils
+      };
+      rxe = {
+        enable = true;
+        interfaces = [ "eth1" ];
+      };
+    };
+
+    environment.systemPackages = with pkgs; [ rdma-core screen ];
+  };
+
+in {
+  name = "rxe";
+
+  nodes = {
+    server = node;
+    client = node;
+  };
+
+  testScript = ''
+    # Test if rxe interface comes up
+    server.wait_for_unit("default.target")
+    server.succeed("systemctl status rxe.service")
+    server.succeed("ibv_devices | grep rxe_eth1")
+
+    client.wait_for_unit("default.target")
+
+    # ping pong tests
+    for proto in "rc", "uc", "ud", "srq":
+        server.succeed(
+            "screen -dmS {0}_pingpong ibv_{0}_pingpong -p 4800 -s 1024 -g0".format(proto)
+        )
+        client.succeed("sleep 2; ibv_{}_pingpong -p 4800 -s 1024 -g0 server".format(proto))
+
+    server.succeed("screen -dmS rping rping -s -a server -C 10")
+    client.succeed("sleep 2; rping -c -a server -C 10")
+  '';
+})
+
+
diff --git a/nixpkgs/nixos/tests/samba.nix b/nixpkgs/nixos/tests/samba.nix
new file mode 100644
index 000000000000..142269752b34
--- /dev/null
+++ b/nixpkgs/nixos/tests/samba.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "samba";
+
+  meta.maintainers = [ pkgs.lib.maintainers.eelco ];
+
+  nodes =
+    { client =
+        { pkgs, ... }:
+        { fileSystems = pkgs.lib.mkVMOverride
+            { "/public" = {
+                fsType = "cifs";
+                device = "//server/public";
+                options = [ "guest" ];
+              };
+            };
+        };
+
+      server =
+        { ... }:
+        { services.samba.enable = true;
+          services.samba.shares.public =
+            { path = "/public";
+              "read only" = true;
+              browseable = "yes";
+              "guest ok" = "yes";
+              comment = "Public samba share.";
+            };
+          networking.firewall.allowedTCPPorts = [ 139 445 ];
+          networking.firewall.allowedUDPPorts = [ 137 138 ];
+        };
+    };
+
+  # client# [    4.542997] mount[777]: sh: systemd-ask-password: command not found
+
+  testScript =
+    ''
+      server.start()
+      server.wait_for_unit("samba.target")
+      server.succeed("mkdir -p /public; echo bar > /public/foo")
+
+      client.start()
+      client.wait_for_unit("remote-fs.target")
+      client.succeed("[[ $(cat /public/foo) = bar ]]")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/sanoid.nix b/nixpkgs/nixos/tests/sanoid.nix
new file mode 100644
index 000000000000..284b38932cce
--- /dev/null
+++ b/nixpkgs/nixos/tests/sanoid.nix
@@ -0,0 +1,90 @@
+import ./make-test-python.nix ({ pkgs, ... }: let
+  inherit (import ./ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+
+  commonConfig = { pkgs, ... }: {
+    virtualisation.emptyDiskImages = [ 2048 ];
+    boot.supportedFilesystems = [ "zfs" ];
+    environment.systemPackages = [ pkgs.parted ];
+  };
+in {
+  name = "sanoid";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lopsided98 ];
+  };
+
+  nodes = {
+    source = { ... }: {
+      imports = [ commonConfig ];
+      networking.hostId = "daa82e91";
+
+      programs.ssh.extraConfig = ''
+        UserKnownHostsFile=/dev/null
+        StrictHostKeyChecking=no
+      '';
+
+      services.sanoid = {
+        enable = true;
+        templates.test = {
+          hourly = 12;
+          daily = 1;
+          monthly = 1;
+          yearly = 1;
+
+          autosnap = true;
+        };
+        datasets."pool/test".useTemplate = [ "test" ];
+      };
+
+      services.syncoid = {
+        enable = true;
+        sshKey = "/root/.ssh/id_ecdsa";
+        commonArgs = [ "--no-sync-snap" ];
+        commands."pool/test".target = "root@target:pool/test";
+      };
+    };
+    target = { ... }: {
+      imports = [ commonConfig ];
+      networking.hostId = "dcf39d36";
+
+      services.openssh.enable = true;
+      users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+    };
+  };
+
+  testScript = ''
+    source.succeed(
+        "mkdir /tmp/mnt",
+        "parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s",
+        "udevadm settle",
+        "zpool create pool /dev/vdb1",
+        "zfs create -o mountpoint=legacy pool/test",
+        "mount -t zfs pool/test /tmp/mnt",
+        "udevadm settle",
+    )
+    target.succeed(
+        "parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s",
+        "udevadm settle",
+        "zpool create pool /dev/vdb1",
+        "udevadm settle",
+    )
+
+    source.succeed("mkdir -m 700 /root/.ssh")
+    source.succeed(
+        "cat '${snakeOilPrivateKey}' > /root/.ssh/id_ecdsa"
+    )
+    source.succeed("chmod 600 /root/.ssh/id_ecdsa")
+
+    source.succeed("touch /tmp/mnt/test.txt")
+    source.systemctl("start --wait sanoid.service")
+
+    target.wait_for_open_port(22)
+    source.systemctl("start --wait syncoid.service")
+    target.succeed(
+        "mkdir /tmp/mnt",
+        "zfs set mountpoint=legacy pool/test",
+        "mount -t zfs pool/test /tmp/mnt",
+    )
+    target.succeed("cat /tmp/mnt/test.txt")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/sddm.nix b/nixpkgs/nixos/tests/sddm.nix
new file mode 100644
index 000000000000..a145705250f7
--- /dev/null
+++ b/nixpkgs/nixos/tests/sddm.nix
@@ -0,0 +1,69 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  inherit (pkgs) lib;
+
+  tests = {
+    default = {
+      name = "sddm";
+
+      machine = { ... }: {
+        imports = [ ./common/user-account.nix ];
+        services.xserver.enable = true;
+        services.xserver.displayManager.sddm.enable = true;
+        services.xserver.displayManager.defaultSession = "none+icewm";
+        services.xserver.windowManager.icewm.enable = true;
+      };
+
+      enableOCR = true;
+
+      testScript = { nodes, ... }: let
+        user = nodes.machine.config.users.users.alice;
+      in ''
+        start_all()
+        machine.wait_for_text("(?i)select your user")
+        machine.screenshot("sddm")
+        machine.send_chars("${user.password}\n")
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+        machine.wait_for_window("^IceWM ")
+      '';
+    };
+
+    autoLogin = {
+      name = "sddm-autologin";
+      meta = with pkgs.stdenv.lib.maintainers; {
+        maintainers = [ ttuegel ];
+      };
+
+      machine = { ... }: {
+        imports = [ ./common/user-account.nix ];
+        services.xserver.enable = true;
+        services.xserver.displayManager.sddm = {
+          enable = true;
+          autoLogin = {
+            enable = true;
+            user = "alice";
+          };
+        };
+        services.xserver.displayManager.defaultSession = "none+icewm";
+        services.xserver.windowManager.icewm.enable = true;
+      };
+
+      testScript = { nodes, ... }: let
+        user = nodes.machine.config.users.users.alice;
+      in ''
+        start_all()
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+        machine.wait_for_window("^IceWM ")
+      '';
+    };
+  };
+in
+  lib.mapAttrs (lib.const makeTest) tests
diff --git a/nixpkgs/nixos/tests/service-runner.nix b/nixpkgs/nixos/tests/service-runner.nix
new file mode 100644
index 000000000000..adb3fcd36d7a
--- /dev/null
+++ b/nixpkgs/nixos/tests/service-runner.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "service-runner";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ roberth ];
+  };
+
+  nodes = {
+    machine = { pkgs, lib, ... }: {
+      services.nginx.enable = true;
+      services.nginx.virtualHosts.machine.root = pkgs.runCommand "webroot" {} ''
+        mkdir $out
+        echo 'yay' >$out/index.html
+      '';
+      systemd.services.nginx.enable = false;
+    };
+
+  };
+
+  testScript = { nodes, ... }: ''
+    url = "http://localhost/index.html"
+
+    with subtest("check systemd.services.nginx.runner"):
+        machine.fail(f"curl {url}")
+        machine.succeed(
+            """
+            mkdir -p /run/nginx /var/spool/nginx/logs
+            ${nodes.machine.config.systemd.services.nginx.runner} &
+            echo $!>my-nginx.pid
+            """
+        )
+        machine.wait_for_open_port(80)
+        machine.succeed(f"curl {url}")
+        machine.succeed("kill -INT $(cat my-nginx.pid)")
+        machine.wait_for_closed_port(80)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/shiori.nix b/nixpkgs/nixos/tests/shiori.nix
new file mode 100644
index 000000000000..a5771262c6f2
--- /dev/null
+++ b/nixpkgs/nixos/tests/shiori.nix
@@ -0,0 +1,81 @@
+import ./make-test-python.nix ({ pkgs, lib, ...}:
+
+{
+  name = "shiori";
+  meta.maintainers = with lib.maintainers; [ minijackson ];
+
+  machine =
+    { ... }:
+    { services.shiori.enable = true; };
+
+  testScript = let
+    authJSON = pkgs.writeText "auth.json" (builtins.toJSON {
+      username = "shiori";
+      password = "gopher";
+      remember = 1; # hour
+      owner = true;
+    });
+
+  insertBookmark = {
+    url = "http://example.org";
+    title = "Example Bookmark";
+  };
+
+  insertBookmarkJSON = pkgs.writeText "insertBookmark.json" (builtins.toJSON insertBookmark);
+  in ''
+    import json
+
+    machine.wait_for_unit("shiori.service")
+    machine.wait_for_open_port(8080)
+    machine.succeed("curl --fail http://localhost:8080/")
+    machine.succeed("curl --fail --location http://localhost:8080/ | grep -qi shiori")
+
+    with subtest("login"):
+        auth_json = machine.succeed(
+            "curl --fail --location http://localhost:8080/api/login "
+            "-X POST -H 'Content-Type:application/json' -d @${authJSON}"
+        )
+        auth_ret = json.loads(auth_json)
+        session_id = auth_ret["session"]
+
+    with subtest("bookmarks"):
+        with subtest("first use no bookmarks"):
+            bookmarks_json = machine.succeed(
+                (
+                    "curl --fail --location http://localhost:8080/api/bookmarks "
+                    "-H 'X-Session-Id:{}'"
+                ).format(session_id)
+            )
+
+            if json.loads(bookmarks_json)["bookmarks"] != []:
+                raise Exception("Shiori have a bookmark on first use")
+
+        with subtest("insert bookmark"):
+            machine.succeed(
+                (
+                    "curl --fail --location http://localhost:8080/api/bookmarks "
+                    "-X POST -H 'X-Session-Id:{}' "
+                    "-H 'Content-Type:application/json' -d @${insertBookmarkJSON}"
+                ).format(session_id)
+            )
+
+        with subtest("get inserted bookmark"):
+            bookmarks_json = machine.succeed(
+                (
+                    "curl --fail --location http://localhost:8080/api/bookmarks "
+                    "-H 'X-Session-Id:{}'"
+                ).format(session_id)
+            )
+
+            bookmarks = json.loads(bookmarks_json)["bookmarks"]
+            if len(bookmarks) != 1:
+                raise Exception("Shiori didn't save the bookmark")
+
+            bookmark = bookmarks[0]
+            if (
+                bookmark["url"] != "${insertBookmark.url}"
+                or bookmark["title"] != "${insertBookmark.title}"
+            ):
+                raise Exception("Inserted bookmark doesn't have same URL or title")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/signal-desktop.nix b/nixpkgs/nixos/tests/signal-desktop.nix
new file mode 100644
index 000000000000..e4b830e9e237
--- /dev/null
+++ b/nixpkgs/nixos/tests/signal-desktop.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "signal-desktop";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ flokli ];
+  };
+
+  machine = { ... }:
+
+  {
+    imports = [
+      ./common/user-account.nix
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    test-support.displayManager.auto.user = "alice";
+    environment.systemPackages = [ pkgs.signal-desktop ];
+    virtualisation.memorySize = 1024;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    start_all()
+    machine.wait_for_x()
+
+    # start signal desktop
+    machine.execute("su - alice -c signal-desktop &")
+
+    # wait for the "Link your phone to Signal Desktop" message
+    machine.wait_for_text("Link your phone to Signal Desktop")
+    machine.screenshot("signal_desktop")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/simple.nix b/nixpkgs/nixos/tests/simple.nix
new file mode 100644
index 000000000000..3810a2cd3a58
--- /dev/null
+++ b/nixpkgs/nixos/tests/simple.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "simple";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.shutdown()
+    '';
+})
diff --git a/nixpkgs/nixos/tests/slurm.nix b/nixpkgs/nixos/tests/slurm.nix
new file mode 100644
index 000000000000..d0e62d15437c
--- /dev/null
+++ b/nixpkgs/nixos/tests/slurm.nix
@@ -0,0 +1,141 @@
+import ./make-test-python.nix ({ lib, ... }:
+let
+    mungekey = "mungeverryweakkeybuteasytointegratoinatest";
+
+    slurmconfig = {
+      controlMachine = "control";
+      nodeName = [ "node[1-3] CPUs=1 State=UNKNOWN" ];
+      partitionName = [ "debug Nodes=node[1-3] Default=YES MaxTime=INFINITE State=UP" ];
+      extraConfig = ''
+        AccountingStorageHost=dbd
+        AccountingStorageType=accounting_storage/slurmdbd
+      '';
+    };
+in {
+  name = "slurm";
+
+  meta.maintainers = [ lib.maintainers.markuskowa ];
+
+  nodes =
+    let
+    computeNode =
+      { ...}:
+      {
+        # TODO slurmd port and slurmctld port should be configurations and
+        # automatically allowed by the  firewall.
+        networking.firewall.enable = false;
+        services.slurm = {
+          client.enable = true;
+        } // slurmconfig;
+      };
+    in {
+
+    control =
+      { ...}:
+      {
+        networking.firewall.enable = false;
+        services.slurm = {
+          server.enable = true;
+        } // slurmconfig;
+      };
+
+    submit =
+      { ...}:
+      {
+        networking.firewall.enable = false;
+        services.slurm = {
+          enableStools = true;
+        } // slurmconfig;
+      };
+
+    dbd =
+      { pkgs, ... } :
+      {
+        networking.firewall.enable = false;
+        services.slurm.dbdserver = {
+          enable = true;
+          storagePass = "password123";
+        };
+        services.mysql = {
+          enable = true;
+          package = pkgs.mariadb;
+          initialScript = pkgs.writeText "mysql-init.sql" ''
+            CREATE USER 'slurm'@'localhost' IDENTIFIED BY 'password123';
+            GRANT ALL PRIVILEGES ON slurm_acct_db.* TO 'slurm'@'localhost';
+          '';
+          ensureDatabases = [ "slurm_acct_db" ];
+          ensureUsers = [{
+            ensurePermissions = { "slurm_acct_db.*" = "ALL PRIVILEGES"; };
+            name = "slurm";
+          }];
+          extraOptions = ''
+            # recommendations from: https://slurm.schedmd.com/accounting.html#mysql-configuration
+            innodb_buffer_pool_size=1024M
+            innodb_log_file_size=64M
+            innodb_lock_wait_timeout=900
+          '';
+        };
+      };
+
+    node1 = computeNode;
+    node2 = computeNode;
+    node3 = computeNode;
+  };
+
+
+  testScript =
+  ''
+  start_all()
+
+  # Set up authentification across the cluster
+  for node in [submit, control, dbd, node1, node2, node3]:
+
+      node.wait_for_unit("default.target")
+
+      node.succeed("mkdir /etc/munge")
+      node.succeed(
+          "echo '${mungekey}' > /etc/munge/munge.key"
+      )
+      node.succeed("chmod 0400 /etc/munge/munge.key")
+      node.succeed("chown munge:munge /etc/munge/munge.key")
+      node.succeed("systemctl restart munged")
+
+      node.wait_for_unit("munged")
+
+
+  # Restart the services since they have probably failed due to the munge init
+  # failure
+  with subtest("can_start_slurmdbd"):
+      dbd.succeed("systemctl restart slurmdbd")
+      dbd.wait_for_unit("slurmdbd.service")
+      dbd.wait_for_open_port(6819)
+
+  # there needs to be an entry for the current
+  # cluster in the database before slurmctld is restarted
+  with subtest("add_account"):
+      control.succeed("sacctmgr -i add cluster default")
+      # check for cluster entry
+      control.succeed("sacctmgr list cluster | awk '{ print $1 }' | grep default")
+
+  with subtest("can_start_slurmctld"):
+      control.succeed("systemctl restart slurmctld")
+      control.wait_for_unit("slurmctld.service")
+
+  with subtest("can_start_slurmd"):
+      for node in [node1, node2, node3]:
+          node.succeed("systemctl restart slurmd.service")
+          node.wait_for_unit("slurmd")
+
+  # Test that the cluster works and can distribute jobs;
+
+  with subtest("run_distributed_command"):
+      # Run `hostname` on 3 nodes of the partition (so on all the 3 nodes).
+      # The output must contain the 3 different names
+      submit.succeed("srun -N 3 hostname | sort | uniq | wc -l | xargs test 3 -eq")
+
+      with subtest("check_slurm_dbd"):
+          # find the srun job from above in the database
+          control.succeed("sleep 5")
+          control.succeed("sacct | grep hostname")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/smokeping.nix b/nixpkgs/nixos/tests/smokeping.nix
new file mode 100644
index 000000000000..4f8f0fcc9fe2
--- /dev/null
+++ b/nixpkgs/nixos/tests/smokeping.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "smokeping";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ cransom ];
+  };
+
+  nodes = {
+    sm =
+      { ... }:
+      {
+        services.smokeping = {
+          enable = true;
+          port = 8081;
+          mailHost = "127.0.0.2";
+          probeConfig = ''
+            + FPing
+            binary = /run/wrappers/bin/fping
+            offset = 0%
+          '';
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    sm.wait_for_unit("smokeping")
+    sm.wait_for_unit("thttpd")
+    sm.wait_for_file("/var/lib/smokeping/data/Local/LocalMachine.rrd")
+    sm.succeed("curl -s -f localhost:8081/smokeping.fcgi?target=Local")
+    sm.succeed("ls /var/lib/smokeping/cache/Local/LocalMachine_mini.png")
+    sm.succeed("ls /var/lib/smokeping/cache/index.html")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/snapper.nix b/nixpkgs/nixos/tests/snapper.nix
new file mode 100644
index 000000000000..018102d7f640
--- /dev/null
+++ b/nixpkgs/nixos/tests/snapper.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ ... }:
+{
+  name = "snapper";
+
+  machine = { pkgs, lib, ... }: {
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.btrfs-progs}/bin/mkfs.btrfs -f -L aux /dev/vdb
+    '';
+
+    virtualisation.emptyDiskImages = [ 4096 ];
+
+    fileSystems = lib.mkVMOverride {
+      "/home" = {
+        device = "/dev/disk/by-label/aux";
+        fsType = "btrfs";
+      };
+    };
+    services.snapper.configs.home.subvolume = "/home";
+    services.snapper.filters = "/nix";
+  };
+
+  testScript = ''
+    machine.succeed("btrfs subvolume create /home/.snapshots")
+    machine.succeed("snapper -c home list")
+    machine.succeed("snapper -c home create --description empty")
+    machine.succeed("echo test > /home/file")
+    machine.succeed("snapper -c home create --description file")
+    machine.succeed("snapper -c home status 1..2")
+    machine.succeed("snapper -c home undochange 1..2")
+    machine.fail("ls /home/file")
+    machine.succeed("snapper -c home delete 2")
+    machine.succeed("systemctl --wait start snapper-timeline.service")
+    machine.succeed("systemctl --wait start snapper-cleanup.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/solr.nix b/nixpkgs/nixos/tests/solr.nix
new file mode 100644
index 000000000000..dc5770e16bc7
--- /dev/null
+++ b/nixpkgs/nixos/tests/solr.nix
@@ -0,0 +1,56 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "solr";
+  meta.maintainers = [ pkgs.stdenv.lib.maintainers.aanderse ];
+
+  machine =
+    { config, pkgs, ... }:
+    {
+      # Ensure the virtual machine has enough memory for Solr to avoid the following error:
+      #
+      #   OpenJDK 64-Bit Server VM warning:
+      #     INFO: os::commit_memory(0x00000000e8000000, 402653184, 0)
+      #     failed; error='Cannot allocate memory' (errno=12)
+      #
+      #   There is insufficient memory for the Java Runtime Environment to continue.
+      #   Native memory allocation (mmap) failed to map 402653184 bytes for committing reserved memory.
+      virtualisation.memorySize = 2000;
+
+      services.solr.enable = true;
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("solr.service")
+    machine.wait_for_open_port(8983)
+    machine.succeed("curl --fail http://localhost:8983/solr/")
+
+    # adapted from pkgs.solr/examples/films/README.txt
+    machine.succeed("sudo -u solr solr create -c films")
+    assert '"status":0' in machine.succeed(
+        """
+      curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:application/json' --data-binary '{
+        "add-field" : {
+          "name":"name",
+          "type":"text_general",
+          "multiValued":false,
+          "stored":true
+        },
+        "add-field" : {
+          "name":"initial_release_date",
+          "type":"pdate",
+          "stored":true
+        }
+      }'
+    """
+    )
+    machine.succeed(
+        "sudo -u solr post -c films ${pkgs.solr}/example/films/films.json"
+    )
+    assert '"name":"Batman Begins"' in machine.succeed(
+        "curl http://localhost:8983/solr/films/query?q=name:batman"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/sonarr.nix b/nixpkgs/nixos/tests/sonarr.nix
new file mode 100644
index 000000000000..764a4d05b381
--- /dev/null
+++ b/nixpkgs/nixos/tests/sonarr.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "sonarr";
+  meta.maintainers = with maintainers; [ etu ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.sonarr.enable = true; };
+
+  testScript = ''
+    machine.wait_for_unit("sonarr.service")
+    machine.wait_for_open_port("8989")
+    machine.succeed("curl --fail http://localhost:8989/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/spacecookie.nix b/nixpkgs/nixos/tests/spacecookie.nix
new file mode 100644
index 000000000000..6eff32a2e75d
--- /dev/null
+++ b/nixpkgs/nixos/tests/spacecookie.nix
@@ -0,0 +1,51 @@
+let
+  gopherRoot  = "/tmp/gopher";
+  gopherHost  = "gopherd";
+  fileContent = "Hello Gopher!";
+  fileName    = "file.txt";
+in
+  import ./make-test-python.nix ({...}: {
+    name = "spacecookie";
+    nodes = {
+      ${gopherHost} = {
+        networking.firewall.allowedTCPPorts = [ 70 ];
+        systemd.services.spacecookie = {
+          preStart = ''
+            mkdir -p ${gopherRoot}/directory
+            echo "${fileContent}" > ${gopherRoot}/${fileName}
+          '';
+        };
+
+        services.spacecookie = {
+          enable = true;
+          root = gopherRoot;
+          hostname = gopherHost;
+        };
+      };
+
+      client = {};
+    };
+
+    testScript = ''
+      start_all()
+      ${gopherHost}.wait_for_open_port(70)
+      ${gopherHost}.wait_for_unit("spacecookie.service")
+      client.wait_for_unit("network.target")
+
+      fileResponse = client.succeed("curl -s gopher://${gopherHost}//${fileName}")
+
+      # the file response should return our created file exactly
+      if not (fileResponse == "${fileContent}\n"):
+          raise Exception("Unexpected file response")
+
+      # sanity check on the directory listing: we serve a directory and a file
+      # via gopher, so the directory listing should have exactly two entries,
+      # one with gopher file type 0 (file) and one with file type 1 (directory).
+      dirResponse = client.succeed("curl -s gopher://${gopherHost}")
+      dirEntries = [l[0] for l in dirResponse.split("\n") if len(l) > 0]
+      dirEntries.sort()
+
+      if not (["0", "1"] == dirEntries):
+          raise Exception("Unexpected directory response")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/specialisation.nix b/nixpkgs/nixos/tests/specialisation.nix
new file mode 100644
index 000000000000..b8d4b8279f4d
--- /dev/null
+++ b/nixpkgs/nixos/tests/specialisation.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix {
+  name = "specialisation";
+  nodes =  {
+    inheritconf = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.cowsay ];
+      specialisation.inheritconf.configuration = { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.hello ];
+      };
+    };
+    noinheritconf = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.cowsay ];
+      specialisation.noinheritconf = {
+        inheritParentConfig = false;
+        configuration = { pkgs, ... }: {
+          environment.systemPackages = [ pkgs.hello ];
+        };
+      };
+    };
+  };
+  testScript = ''
+    inheritconf.wait_for_unit("default.target")
+    inheritconf.succeed("cowsay hey")
+    inheritconf.fail("hello")
+
+    with subtest("Nested clones do inherit from parent"):
+        inheritconf.succeed(
+            "/run/current-system/specialisation/inheritconf/bin/switch-to-configuration test"
+        )
+        inheritconf.succeed("cowsay hey")
+        inheritconf.succeed("hello")
+
+        noinheritconf.wait_for_unit("default.target")
+        noinheritconf.succeed("cowsay hey")
+        noinheritconf.fail("hello")
+
+    with subtest("Nested children do not inherit from parent"):
+        noinheritconf.succeed(
+            "/run/current-system/specialisation/noinheritconf/bin/switch-to-configuration test"
+        )
+        noinheritconf.fail("cowsay hey")
+        noinheritconf.succeed("hello")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/spike.nix b/nixpkgs/nixos/tests/spike.nix
new file mode 100644
index 000000000000..47763e75ffa2
--- /dev/null
+++ b/nixpkgs/nixos/tests/spike.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  riscvPkgs = import ../.. { crossSystem = pkgs.stdenv.lib.systems.examples.riscv64-embedded; };
+in
+{
+  name = "spike";
+  meta = with pkgs.stdenv.lib.maintainers; { maintainers = [ blitz ]; };
+
+  machine = { pkgs, lib, ... }: {
+    environment.systemPackages = [ pkgs.spike riscvPkgs.riscv-pk riscvPkgs.hello ];
+  };
+
+  # Run the RISC-V hello applications using the proxy kernel on the
+  # Spike emulator and see whether we get the expected output.
+  testScript =
+    ''
+      machine.wait_for_unit("multi-user.target")
+      output = machine.succeed("spike -m64 $(which pk) $(which hello)")
+      assert output == "Hello, world!\n"
+    '';
+})
diff --git a/nixpkgs/nixos/tests/ssh-keys.nix b/nixpkgs/nixos/tests/ssh-keys.nix
new file mode 100644
index 000000000000..07d422196efa
--- /dev/null
+++ b/nixpkgs/nixos/tests/ssh-keys.nix
@@ -0,0 +1,15 @@
+pkgs:
+{ snakeOilPrivateKey = pkgs.writeText "privkey.snakeoil" ''
+    -----BEGIN EC PRIVATE KEY-----
+    MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
+    AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
+    r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
+    -----END EC PRIVATE KEY-----
+  '';
+
+  snakeOilPublicKey = pkgs.lib.concatStrings [
+    "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
+    "yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
+    "9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
+  ];
+}
diff --git a/nixpkgs/nixos/tests/strongswan-swanctl.nix b/nixpkgs/nixos/tests/strongswan-swanctl.nix
new file mode 100644
index 000000000000..152c0d61c543
--- /dev/null
+++ b/nixpkgs/nixos/tests/strongswan-swanctl.nix
@@ -0,0 +1,148 @@
+# This strongswan-swanctl test is based on:
+# https://www.strongswan.org/testing/testresults/swanctl/rw-psk-ipv4/index.html
+# https://github.com/strongswan/strongswan/tree/master/testing/tests/swanctl/rw-psk-ipv4
+#
+# The roadwarrior carol sets up a connection to gateway moon. The authentication
+# is based on pre-shared keys and IPv4 addresses. Upon the successful
+# establishment of the IPsec tunnels, the specified updown script automatically
+# inserts iptables-based firewall rules that let pass the tunneled traffic. In
+# order to test both tunnel and firewall, carol pings the client alice behind
+# the gateway moon.
+#
+#     alice                       moon                        carol
+#      eth1------vlan_0------eth1        eth2------vlan_1------eth1
+#   192.168.0.1         192.168.0.3  192.168.1.3           192.168.1.2
+#
+# See the NixOS manual for how to run this test:
+# https://nixos.org/nixos/manual/index.html#sec-running-nixos-tests-interactively
+
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  allowESP = "iptables --insert INPUT --protocol ESP --jump ACCEPT";
+
+  # Shared VPN settings:
+  vlan0         = "192.168.0.0/24";
+  carolIp       = "192.168.1.2";
+  moonIp        = "192.168.1.3";
+  version       = 2;
+  secret        = "0sFpZAZqEN6Ti9sqt4ZP5EWcqx";
+  esp_proposals = [ "aes128gcm128-x25519" ];
+  proposals     = [ "aes128-sha256-x25519" ];
+in {
+  name = "strongswan-swanctl";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ basvandijk ];
+  nodes = {
+
+    alice = { ... } : {
+      virtualisation.vlans = [ 0 ];
+      networking = {
+        dhcpcd.enable = false;
+        defaultGateway = "192.168.0.3";
+      };
+    };
+
+    moon = { config, ...} :
+      let strongswan = config.services.strongswan-swanctl.package;
+      in {
+        virtualisation.vlans = [ 0 1 ];
+        networking = {
+          dhcpcd.enable = false;
+          firewall = {
+            allowedUDPPorts = [ 4500 500 ];
+            extraCommands = allowESP;
+          };
+          nat = {
+            enable             = true;
+            internalIPs        = [ vlan0 ];
+            internalInterfaces = [ "eth1" ];
+            externalIP         = moonIp;
+            externalInterface  = "eth2";
+          };
+        };
+        environment.systemPackages = [ strongswan ];
+        services.strongswan-swanctl = {
+          enable = true;
+          swanctl = {
+            connections = {
+              rw = {
+                local_addrs = [ moonIp ];
+                local.main = {
+                  auth = "psk";
+                };
+                remote.main = {
+                  auth = "psk";
+                };
+                children = {
+                  net = {
+                    local_ts = [ vlan0 ];
+                    updown = "${strongswan}/libexec/ipsec/_updown iptables";
+                    inherit esp_proposals;
+                  };
+                };
+                inherit version;
+                inherit proposals;
+              };
+            };
+            secrets = {
+              ike.carol = {
+                id.main = carolIp;
+                inherit secret;
+              };
+            };
+          };
+        };
+      };
+
+    carol = { config, ...} :
+      let strongswan = config.services.strongswan-swanctl.package;
+      in {
+        virtualisation.vlans = [ 1 ];
+        networking = {
+          dhcpcd.enable = false;
+          firewall.extraCommands = allowESP;
+        };
+        environment.systemPackages = [ strongswan ];
+        services.strongswan-swanctl = {
+          enable = true;
+          swanctl = {
+            connections = {
+              home = {
+                local_addrs = [ carolIp ];
+                remote_addrs = [ moonIp ];
+                local.main = {
+                  auth = "psk";
+                  id = carolIp;
+                };
+                remote.main = {
+                  auth = "psk";
+                  id = moonIp;
+                };
+                children = {
+                  home = {
+                    remote_ts = [ vlan0 ];
+                    start_action = "trap";
+                    updown = "${strongswan}/libexec/ipsec/_updown iptables";
+                    inherit esp_proposals;
+                  };
+                };
+                inherit version;
+                inherit proposals;
+              };
+            };
+            secrets = {
+              ike.moon = {
+                id.main = moonIp;
+                inherit secret;
+              };
+            };
+          };
+        };
+      };
+
+  };
+  testScript = ''
+    start_all()
+    carol.wait_until_succeeds("ping -c 1 alice")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/sudo.nix b/nixpkgs/nixos/tests/sudo.nix
new file mode 100644
index 000000000000..5bbec3d57269
--- /dev/null
+++ b/nixpkgs/nixos/tests/sudo.nix
@@ -0,0 +1,83 @@
+# Some tests to ensure sudo is working properly.
+
+let
+  password = "helloworld";
+
+in
+  import ./make-test-python.nix ({ pkgs, ...} : {
+    name = "sudo";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ lschuermann ];
+    };
+
+    machine =
+      { lib, ... }:
+      with lib;
+      {
+        users.groups = { foobar = {}; barfoo = {}; baz = { gid = 1337; }; };
+        users.users = {
+          test0 = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+          test1 = { isNormalUser = true; password = password; };
+          test2 = { isNormalUser = true; extraGroups = [ "foobar" ]; password = password; };
+          test3 = { isNormalUser = true; extraGroups = [ "barfoo" ]; };
+          test4 = { isNormalUser = true; extraGroups = [ "baz" ]; };
+          test5 = { isNormalUser = true; };
+        };
+
+        security.sudo = {
+          enable = true;
+          wheelNeedsPassword = false;
+
+          extraRules = [
+            # SUDOERS SYNTAX CHECK (Test whether the module produces a valid output;
+            # errors being detected by the visudo checks.
+
+            # These should not create any entries
+            { users = [ "notest1" ]; commands = [ ]; }
+            { commands = [ { command = "ALL"; options = [ ]; } ]; }
+
+            # Test defining commands with the options syntax, though not setting any options
+            { users = [ "notest2" ]; commands = [ { command = "ALL"; options = [ ]; } ]; }
+
+
+            # CONFIGURATION FOR TEST CASES
+            { users = [ "test1" ]; groups = [ "foobar" ]; commands = [ "ALL" ]; }
+            { groups = [ "barfoo" 1337 ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" "NOSETENV" ]; } ]; }
+            { users = [ "test5" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" "SETENV" ]; } ]; runAs = "test1:barfoo"; }
+          ];
+        };
+      };
+
+    testScript =
+      ''
+        with subtest("users in wheel group should have passwordless sudo"):
+            machine.succeed('su - test0 -c "sudo -u root true"')
+
+        with subtest("test1 user should have sudo with password"):
+            machine.succeed('su - test1 -c "echo ${password} | sudo -S -u root true"')
+
+        with subtest("test1 user should not be able to use sudo without password"):
+            machine.fail('su - test1 -c "sudo -n -u root true"')
+
+        with subtest("users in group 'foobar' should be able to use sudo with password"):
+            machine.succeed("sudo -u test2 echo ${password} | sudo -S -u root true")
+
+        with subtest("users in group 'barfoo' should be able to use sudo without password"):
+            machine.succeed("sudo -u test3 sudo -n -u root true")
+
+        with subtest("users in group 'baz' (GID 1337)"):
+            machine.succeed("sudo -u test4 sudo -n -u root echo true")
+
+        with subtest("test5 user should be able to run commands under test1"):
+            machine.succeed("sudo -u test5 sudo -n -u test1 true")
+
+        with subtest("test5 user should not be able to run commands under root"):
+            machine.fail("sudo -u test5 sudo -n -u root true")
+
+        with subtest("test5 user should be able to keep his environment"):
+            machine.succeed("sudo -u test5 sudo -n -E -u test1 true")
+
+        with subtest("users in group 'barfoo' should not be able to keep their environment"):
+            machine.fail("sudo -u test3 sudo -n -E -u root true")
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/switch-test.nix b/nixpkgs/nixos/tests/switch-test.nix
new file mode 100644
index 000000000000..9ef96cec5ef3
--- /dev/null
+++ b/nixpkgs/nixos/tests/switch-test.nix
@@ -0,0 +1,38 @@
+# Test configuration switching.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "switch-test";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ gleber ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      users.mutableUsers = false;
+    };
+    other = { ... }: {
+      users.mutableUsers = true;
+    };
+  };
+
+  testScript = {nodes, ...}: let
+    originalSystem = nodes.machine.config.system.build.toplevel;
+    otherSystem = nodes.other.config.system.build.toplevel;
+
+    # Ensures failures pass through using pipefail, otherwise failing to
+    # switch-to-configuration is hidden by the success of `tee`.
+    stderrRunner = pkgs.writeScript "stderr-runner" ''
+      #! ${pkgs.runtimeShell}
+      set -e
+      set -o pipefail
+      exec env -i "$@" | tee /dev/stderr
+    '';
+  in ''
+    machine.succeed(
+        "${stderrRunner} ${originalSystem}/bin/switch-to-configuration test"
+    )
+    machine.succeed(
+        "${stderrRunner} ${otherSystem}/bin/switch-to-configuration test"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/sympa.nix b/nixpkgs/nixos/tests/sympa.nix
new file mode 100644
index 000000000000..280691f7cb40
--- /dev/null
+++ b/nixpkgs/nixos/tests/sympa.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "sympa";
+  meta.maintainers = with lib.maintainers; [ mmilata ];
+
+  machine =
+    { ... }:
+    {
+      virtualisation.memorySize = 1024;
+
+      services.sympa = {
+        enable = true;
+        domains = {
+          "lists.example.org" = {
+            webHost = "localhost";
+          };
+        };
+        listMasters = [ "joe@example.org" ];
+        web.enable = true;
+        web.https = false;
+        database = {
+          type = "PostgreSQL";
+          createLocally = true;
+        };
+      };
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("sympa.service")
+    machine.wait_for_unit("wwsympa.service")
+    assert "Mailing lists service" in machine.succeed(
+        "curl --insecure -L http://localhost/"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/syncthing-init.nix b/nixpkgs/nixos/tests/syncthing-init.nix
new file mode 100644
index 000000000000..9c8e0a3d087e
--- /dev/null
+++ b/nixpkgs/nixos/tests/syncthing-init.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: let
+
+  testId = "7CFNTQM-IMTJBHJ-3UWRDIU-ZGQJFR6-VCXZ3NB-XUH3KZO-N52ITXR-LAIYUAU";
+
+in {
+  name = "syncthing-init";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ lassulus ];
+
+  machine = {
+    services.syncthing = {
+      enable = true;
+      declarative = {
+        devices.testDevice = {
+          id = testId;
+        };
+        folders.testFolder = {
+          path = "/tmp/test";
+          devices = [ "testDevice" ];
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("syncthing-init.service")
+    config = machine.succeed("cat /var/lib/syncthing/.config/syncthing/config.xml")
+   
+    assert "testFolder" in config
+    assert "${testId}" in config
+  '';
+})
+
diff --git a/nixpkgs/nixos/tests/syncthing-relay.nix b/nixpkgs/nixos/tests/syncthing-relay.nix
new file mode 100644
index 000000000000..cd72ef1cbe1d
--- /dev/null
+++ b/nixpkgs/nixos/tests/syncthing-relay.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "syncthing-relay";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ delroth ];
+
+  machine = {
+    environment.systemPackages = [ pkgs.jq ];
+    services.syncthing.relay = {
+      enable = true;
+      providedBy = "nixos-test";
+      pools = [];  # Don't connect to any pool while testing.
+      port = 12345;
+      statusPort = 12346;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("syncthing-relay.service")
+    machine.wait_for_open_port(12345)
+    machine.wait_for_open_port(12346)
+
+    out = machine.succeed(
+        "curl -sS http://localhost:12346/status | jq -r '.options.\"provided-by\"'"
+    )
+    assert "nixos-test" in out
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-analyze.nix b/nixpkgs/nixos/tests/systemd-analyze.nix
new file mode 100644
index 000000000000..a78ba08cd55c
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-analyze.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
+
+{
+  name = "systemd-analyze";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ raskin ];
+  };
+
+  machine =
+    { pkgs, lib, ... }:
+    { boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest;
+      sound.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+    };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+
+    # We create a special output directory to copy it as a whole
+    with subtest("Prepare output dir"):
+        machine.succeed("mkdir systemd-analyze")
+
+
+    # Save the output into a file with given name inside the common
+    # output directory
+    def run_systemd_analyze(args, name):
+        tgt_dir = "systemd-analyze"
+        machine.succeed(
+            "systemd-analyze {} > {}/{} 2> {}/{}.err".format(
+                " ".join(args), tgt_dir, name, tgt_dir, name
+            )
+        )
+
+
+    with subtest("Print statistics"):
+        run_systemd_analyze(["blame"], "blame.txt")
+        run_systemd_analyze(["critical-chain"], "critical-chain.txt")
+        run_systemd_analyze(["dot"], "dependencies.dot")
+        run_systemd_analyze(["plot"], "systemd-analyze.svg")
+
+    # We copy the main graph into the $out (toplevel), and we also copy
+    # the entire output directory with additional data
+    with subtest("Copying the resulting data into $out"):
+        machine.copy_from_vm("systemd-analyze/", "")
+        machine.copy_from_vm("systemd-analyze/systemd-analyze.svg", "")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-confinement.nix b/nixpkgs/nixos/tests/systemd-confinement.nix
new file mode 100644
index 000000000000..f22836e227b0
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-confinement.nix
@@ -0,0 +1,168 @@
+import ./make-test.nix {
+  name = "systemd-confinement";
+
+  machine = { pkgs, lib, ... }: let
+    testServer = pkgs.writeScript "testserver.sh" ''
+      #!${pkgs.runtimeShell}
+      export PATH=${lib.escapeShellArg "${pkgs.coreutils}/bin"}
+      ${lib.escapeShellArg pkgs.runtimeShell} 2>&1
+      echo "exit-status:$?"
+    '';
+
+    testClient = pkgs.writeScriptBin "chroot-exec" ''
+      #!${pkgs.runtimeShell} -e
+      output="$(echo "$@" | nc -NU "/run/test$(< /teststep).sock")"
+      ret="$(echo "$output" | sed -nre '$s/^exit-status:([0-9]+)$/\1/p')"
+      echo "$output" | head -n -1
+      exit "''${ret:-1}"
+    '';
+
+    mkTestStep = num: { description, config ? {}, testScript }: {
+      systemd.sockets."test${toString num}" = {
+        description = "Socket for Test Service ${toString num}";
+        wantedBy = [ "sockets.target" ];
+        socketConfig.ListenStream = "/run/test${toString num}.sock";
+        socketConfig.Accept = true;
+      };
+
+      systemd.services."test${toString num}@" = {
+        description = "Confined Test Service ${toString num}";
+        confinement = (config.confinement or {}) // { enable = true; };
+        serviceConfig = (config.serviceConfig or {}) // {
+          ExecStart = testServer;
+          StandardInput = "socket";
+        };
+      } // removeAttrs config [ "confinement" "serviceConfig" ];
+
+      __testSteps = lib.mkOrder num ''
+        subtest '${lib.escape ["\\" "'"] description}', sub {
+          $machine->succeed('echo ${toString num} > /teststep');
+          ${testScript}
+        };
+      '';
+    };
+
+  in {
+    imports = lib.imap1 mkTestStep [
+      { description = "chroot-only confinement";
+        config.confinement.mode = "chroot-only";
+        testScript = ''
+          $machine->succeed(
+            'test "$(chroot-exec ls -1 / | paste -sd,)" = bin,nix',
+            'test "$(chroot-exec id -u)" = 0',
+            'chroot-exec chown 65534 /bin',
+          );
+        '';
+      }
+      { description = "full confinement with APIVFS";
+        testScript = ''
+          $machine->fail(
+            'chroot-exec ls -l /etc',
+            'chroot-exec ls -l /run',
+            'chroot-exec chown 65534 /bin',
+          );
+          $machine->succeed(
+            'test "$(chroot-exec id -u)" = 0',
+            'chroot-exec chown 0 /bin',
+          );
+        '';
+      }
+      { description = "check existence of bind-mounted /etc";
+        config.serviceConfig.BindReadOnlyPaths = [ "/etc" ];
+        testScript = ''
+          $machine->succeed('test -n "$(chroot-exec cat /etc/passwd)"');
+        '';
+      }
+      { description = "check if User/Group really runs as non-root";
+        config.serviceConfig.User = "chroot-testuser";
+        config.serviceConfig.Group = "chroot-testgroup";
+        testScript = ''
+          $machine->succeed('chroot-exec ls -l /dev');
+          $machine->succeed('test "$(chroot-exec id -u)" != 0');
+          $machine->fail('chroot-exec touch /bin/test');
+        '';
+      }
+      (let
+        symlink = pkgs.runCommand "symlink" {
+          target = pkgs.writeText "symlink-target" "got me\n";
+        } "ln -s \"$target\" \"$out\"";
+      in {
+        description = "check if symlinks are properly bind-mounted";
+        config.confinement.packages = lib.singleton symlink;
+        testScript = ''
+          $machine->fail('chroot-exec test -e /etc');
+          $machine->succeed('chroot-exec cat ${symlink} >&2');
+          $machine->succeed('test "$(chroot-exec cat ${symlink})" = "got me"');
+        '';
+      })
+      { description = "check if StateDirectory works";
+        config.serviceConfig.User = "chroot-testuser";
+        config.serviceConfig.Group = "chroot-testgroup";
+        config.serviceConfig.StateDirectory = "testme";
+        testScript = ''
+          $machine->succeed('chroot-exec touch /tmp/canary');
+          $machine->succeed('chroot-exec "echo works > /var/lib/testme/foo"');
+          $machine->succeed('test "$(< /var/lib/testme/foo)" = works');
+          $machine->succeed('test ! -e /tmp/canary');
+        '';
+      }
+      { description = "check if /bin/sh works";
+        testScript = ''
+          $machine->succeed(
+            'chroot-exec test -e /bin/sh',
+            'test "$(chroot-exec \'/bin/sh -c "echo bar"\')" = bar',
+          );
+        '';
+      }
+      { description = "check if suppressing /bin/sh works";
+        config.confinement.binSh = null;
+        testScript = ''
+          $machine->succeed(
+            'chroot-exec test ! -e /bin/sh',
+            'test "$(chroot-exec \'/bin/sh -c "echo foo"\')" != foo',
+          );
+        '';
+      }
+      { description = "check if we can set /bin/sh to something different";
+        config.confinement.binSh = "${pkgs.hello}/bin/hello";
+        testScript = ''
+          $machine->succeed(
+            'chroot-exec test -e /bin/sh',
+            'test "$(chroot-exec /bin/sh -g foo)" = foo',
+          );
+        '';
+      }
+      { description = "check if only Exec* dependencies are included";
+        config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
+        testScript = ''
+          $machine->succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" != eek');
+        '';
+      }
+      { description = "check if all unit dependencies are included";
+        config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
+        config.confinement.fullUnit = true;
+        testScript = ''
+          $machine->succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" = eek');
+        '';
+      }
+    ];
+
+    options.__testSteps = lib.mkOption {
+      type = lib.types.lines;
+      description = "All of the test steps combined as a single script.";
+    };
+
+    config.environment.systemPackages = lib.singleton testClient;
+
+    config.users.groups.chroot-testgroup = {};
+    config.users.users.chroot-testuser = {
+      description = "Chroot Test User";
+      group = "chroot-testgroup";
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    $machine->waitForUnit('multi-user.target');
+    ${nodes.machine.config.__testSteps}
+  '';
+}
diff --git a/nixpkgs/nixos/tests/systemd-networkd-vrf.nix b/nixpkgs/nixos/tests/systemd-networkd-vrf.nix
new file mode 100644
index 000000000000..af7813a2e604
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-networkd-vrf.nix
@@ -0,0 +1,221 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
+  inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+in {
+  name = "systemd-networkd-vrf";
+  meta.maintainers = with lib.maintainers; [ ma27 ];
+
+  nodes = {
+    client = { pkgs, ... }: {
+      virtualisation.vlans = [ 1 2 ];
+
+      networking = {
+        useDHCP = false;
+        useNetworkd = true;
+        firewall.checkReversePath = "loose";
+      };
+
+      systemd.network = {
+        enable = true;
+
+        netdevs."10-vrf1" = {
+          netdevConfig = {
+            Kind = "vrf";
+            Name = "vrf1";
+            MTUBytes = "1300";
+          };
+          vrfConfig.Table = 23;
+        };
+        netdevs."10-vrf2" = {
+          netdevConfig = {
+            Kind = "vrf";
+            Name = "vrf2";
+            MTUBytes = "1300";
+          };
+          vrfConfig.Table = 42;
+        };
+
+        networks."10-vrf1" = {
+          matchConfig.Name = "vrf1";
+          networkConfig.IPForward = "yes";
+          routes = [
+            { routeConfig = { Destination = "192.168.1.2"; Metric = "100"; }; }
+          ];
+        };
+        networks."10-vrf2" = {
+          matchConfig.Name = "vrf2";
+          networkConfig.IPForward = "yes";
+          routes = [
+            { routeConfig = { Destination = "192.168.2.3"; Metric = "100"; }; }
+          ];
+        };
+
+        networks."10-eth1" = {
+          matchConfig.Name = "eth1";
+          linkConfig.RequiredForOnline = "no";
+          networkConfig = {
+            VRF = "vrf1";
+            Address = "192.168.1.1";
+            IPForward = "yes";
+          };
+        };
+        networks."10-eth2" = {
+          matchConfig.Name = "eth2";
+          linkConfig.RequiredForOnline = "no";
+          networkConfig = {
+            VRF = "vrf2";
+            Address = "192.168.2.1";
+            IPForward = "yes";
+          };
+        };
+      };
+    };
+
+    node1 = { pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      networking = {
+        useDHCP = false;
+        useNetworkd = true;
+      };
+
+      services.openssh.enable = true;
+      users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+
+      systemd.network = {
+        enable = true;
+
+        networks."10-eth1" = {
+          matchConfig.Name = "eth1";
+          linkConfig.RequiredForOnline = "no";
+          networkConfig = {
+            Address = "192.168.1.2";
+            IPForward = "yes";
+          };
+        };
+      };
+    };
+
+    node2 = { pkgs, ... }: {
+      virtualisation.vlans = [ 2 ];
+      networking = {
+        useDHCP = false;
+        useNetworkd = true;
+      };
+
+      systemd.network = {
+        enable = true;
+
+        networks."10-eth2" = {
+          matchConfig.Name = "eth2";
+          linkConfig.RequiredForOnline = "no";
+          networkConfig = {
+            Address = "192.168.2.3";
+            IPForward = "yes";
+          };
+        };
+      };
+    };
+
+    node3 = { pkgs, ... }: {
+      virtualisation.vlans = [ 2 ];
+      networking = {
+        useDHCP = false;
+        useNetworkd = true;
+      };
+
+      systemd.network = {
+        enable = true;
+
+        networks."10-eth2" = {
+          matchConfig.Name = "eth2";
+          linkConfig.RequiredForOnline = "no";
+          networkConfig = {
+            Address = "192.168.2.4";
+            IPForward = "yes";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    def compare_tables(expected, actual):
+        assert (
+            expected == actual
+        ), """
+        Routing tables don't match!
+        Expected:
+          {}
+        Actual:
+          {}
+        """.format(
+            expected, actual
+        )
+
+
+    start_all()
+
+    client.wait_for_unit("network.target")
+    node1.wait_for_unit("network.target")
+    node2.wait_for_unit("network.target")
+    node3.wait_for_unit("network.target")
+
+    client_ipv4_table = """
+    192.168.1.2 dev vrf1 proto static metric 100 
+    192.168.2.3 dev vrf2 proto static metric 100
+    """.strip()
+    vrf1_table = """
+    broadcast 192.168.1.0 dev eth1 proto kernel scope link src 192.168.1.1 
+    192.168.1.0/24 dev eth1 proto kernel scope link src 192.168.1.1 
+    local 192.168.1.1 dev eth1 proto kernel scope host src 192.168.1.1 
+    broadcast 192.168.1.255 dev eth1 proto kernel scope link src 192.168.1.1
+    """.strip()
+    vrf2_table = """
+    broadcast 192.168.2.0 dev eth2 proto kernel scope link src 192.168.2.1 
+    192.168.2.0/24 dev eth2 proto kernel scope link src 192.168.2.1 
+    local 192.168.2.1 dev eth2 proto kernel scope host src 192.168.2.1 
+    broadcast 192.168.2.255 dev eth2 proto kernel scope link src 192.168.2.1
+    """.strip()
+
+    # Check that networkd properly configures the main routing table
+    # and the routing tables for the VRF.
+    with subtest("check vrf routing tables"):
+        compare_tables(
+            client_ipv4_table, client.succeed("ip -4 route list | head -n2").strip()
+        )
+        compare_tables(
+            vrf1_table, client.succeed("ip -4 route list table 23 | head -n4").strip()
+        )
+        compare_tables(
+            vrf2_table, client.succeed("ip -4 route list table 42 | head -n4").strip()
+        )
+
+    # Ensure that other nodes are reachable via ICMP through the VRF.
+    with subtest("icmp through vrf works"):
+        client.succeed("ping -c5 192.168.1.2")
+        client.succeed("ping -c5 192.168.2.3")
+
+    # Test whether SSH through a VRF IP is possible.
+    # (Note: this seems to be an issue on Linux 5.x, so I decided to add this to
+    # ensure that we catch this when updating the default kernel).
+    # with subtest("tcp traffic through vrf works"):
+    #     node1.wait_for_open_port(22)
+    #     client.succeed(
+    #         "cat ${snakeOilPrivateKey} > privkey.snakeoil"
+    #     )
+    #     client.succeed("chmod 600 privkey.snakeoil")
+    #     client.succeed(
+    #         "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil root@192.168.1.2 true"
+    #     )
+
+    # Only configured routes through the VRF from the main routing table should
+    # work. Additional IPs are only reachable when binding to the vrf interface.
+    with subtest("only routes from main routing table work by default"):
+        client.fail("ping -c5 192.168.2.4")
+        client.succeed("ping -I vrf2 -c5 192.168.2.4")
+
+    client.shutdown()
+    node1.shutdown()
+    node2.shutdown()
+    node3.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-networkd.nix b/nixpkgs/nixos/tests/systemd-networkd.nix
new file mode 100644
index 000000000000..319e5e94eceb
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-networkd.nix
@@ -0,0 +1,113 @@
+let generateNodeConf = { lib, pkgs, config, privk, pubk, peerId, nodeId, ...}: {
+      imports = [ common/user-account.nix ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking.useNetworkd = true;
+      networking.useDHCP = false;
+      networking.firewall.enable = false;
+      virtualisation.vlans = [ 1 ];
+      environment.systemPackages = with pkgs; [ wireguard-tools ];
+      boot.extraModulePackages = [ config.boot.kernelPackages.wireguard ];
+      systemd.tmpfiles.rules = [
+        "f /run/wg_priv 0640 root systemd-network - ${privk}"
+      ];
+      systemd.network = {
+        enable = true;
+        netdevs = {
+          "90-wg0" = {
+            netdevConfig = { Kind = "wireguard"; Name = "wg0"; };
+            wireguardConfig = {
+              PrivateKeyFile = "/run/wg_priv";
+              ListenPort = 51820;
+              FwMark = 42;
+            };
+            wireguardPeers = [ {wireguardPeerConfig={
+              Endpoint = "192.168.1.${peerId}:51820";
+              PublicKey = pubk;
+              PresharedKeyFile = pkgs.writeText "psk.key" "yTL3sCOL33Wzi6yCnf9uZQl/Z8laSE+zwpqOHC4HhFU=";
+              AllowedIPs = [ "10.0.0.${peerId}/32" ];
+              PersistentKeepalive = 15;
+            };}];
+          };
+        };
+        networks = {
+          "99-nope" = {
+            matchConfig.Name = "eth*";
+            linkConfig.Unmanaged = true;
+          };
+          "90-wg0" = {
+            matchConfig = { Name = "wg0"; };
+            address = [ "10.0.0.${nodeId}/32" ];
+            routes = [
+              { routeConfig = { Gateway = "10.0.0.${nodeId}"; Destination = "10.0.0.0/24"; }; }
+            ];
+          };
+          "30-eth1" = {
+            matchConfig = { Name = "eth1"; };
+            address = [
+              "192.168.1.${nodeId}/24"
+              "fe80::${nodeId}/64"
+            ];
+            routingPolicyRules = [
+              { routingPolicyRuleConfig = { Table = 10; IncomingInterface = "eth1"; Family = "both"; };}
+              { routingPolicyRuleConfig = { Table = 20; OutgoingInterface = "eth1"; };}
+              { routingPolicyRuleConfig = { Table = 30; From = "192.168.1.1"; To = "192.168.1.2"; SourcePort = 666 ; DestinationPort = 667; };}
+              { routingPolicyRuleConfig = { Table = 40; IPProtocol = "tcp"; InvertRule = true; };}
+              { routingPolicyRuleConfig = { Table = 50; IncomingInterface = "eth1"; Family = "ipv4"; };}
+            ];
+          };
+        };
+      };
+    };
+in import ./make-test-python.nix ({pkgs, ... }: {
+  name = "networkd";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ninjatrappeur ];
+  };
+  nodes = {
+    node1 = { pkgs, ... }@attrs:
+    let localConf = {
+        privk = "GDiXWlMQKb379XthwX0haAbK6hTdjblllpjGX0heP00=";
+        pubk = "iRxpqj42nnY0Qz8MAQbSm7bXxXP5hkPqWYIULmvW+EE=";
+        nodeId = "1";
+        peerId = "2";
+    };
+    in generateNodeConf (attrs // localConf);
+
+    node2 = { pkgs, ... }@attrs:
+    let localConf = {
+        privk = "eHxSI2jwX/P4AOI0r8YppPw0+4NZnjOxfbS5mt06K2k=";
+        pubk = "27s0OvaBBdHoJYkH9osZpjpgSOVNw+RaKfboT/Sfq0g=";
+        nodeId = "2";
+        peerId = "1";
+    };
+    in generateNodeConf (attrs // localConf);
+  };
+testScript = ''
+    start_all()
+    node1.wait_for_unit("systemd-networkd-wait-online.service")
+    node2.wait_for_unit("systemd-networkd-wait-online.service")
+
+    # ================================
+    # Wireguard
+    # ================================
+    node1.succeed("ping -c 5 10.0.0.2")
+    node2.succeed("ping -c 5 10.0.0.1")
+    # Is the fwmark set?
+    node2.succeed("wg | grep -q 42")
+
+    # ================================
+    # Routing Policies
+    # ================================
+    # Testing all the routingPolicyRuleConfig members:
+    # Table + IncomingInterface
+    node1.succeed("sudo ip rule | grep 'from all iif eth1 lookup 10'")
+    # OutgoingInterface
+    node1.succeed("sudo ip rule | grep 'from all oif eth1 lookup 20'")
+    # From + To + SourcePort + DestinationPort
+    node1.succeed(
+        "sudo ip rule | grep 'from 192.168.1.1 to 192.168.1.2 sport 666 dport 667 lookup 30'"
+    )
+    # IPProtocol + InvertRule
+    node1.succeed("sudo ip rule | grep 'not from all ipproto tcp lookup 40'")
+'';
+})
diff --git a/nixpkgs/nixos/tests/systemd-nspawn.nix b/nixpkgs/nixos/tests/systemd-nspawn.nix
new file mode 100644
index 000000000000..5bf55060d2e0
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-nspawn.nix
@@ -0,0 +1,60 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+let
+  gpgKeyring = (pkgs.runCommand "gpg-keyring" { buildInputs = [ pkgs.gnupg ]; } ''
+    mkdir -p $out
+    export GNUPGHOME=$out
+    cat > foo <<EOF
+      %echo Generating a basic OpenPGP key
+      %no-protection
+      Key-Type: DSA
+      Key-Length: 1024
+      Subkey-Type: ELG-E
+      Subkey-Length: 1024
+      Name-Real: Joe Tester
+      Name-Email: joe@foo.bar
+      Expire-Date: 0
+      # Do a commit here, so that we can later print "done"
+      %commit
+      %echo done
+    EOF
+    gpg --batch --generate-key foo
+    rm $out/S.gpg-agent $out/S.gpg-agent.*
+    gpg --export joe@foo.bar -a > $out/pubkey.gpg
+  '');
+
+  nspawnImages = (pkgs.runCommand "localhost" { buildInputs = [ pkgs.coreutils pkgs.gnupg ]; } ''
+    mkdir -p $out
+    cd $out
+    dd if=/dev/urandom of=$out/testimage.raw bs=$((1024*1024+7)) count=5
+    sha256sum testimage.raw > SHA256SUMS
+    export GNUPGHOME="$(mktemp -d)"
+    cp -R ${gpgKeyring}/* $GNUPGHOME
+    gpg --batch --sign --detach-sign --output SHA256SUMS.gpg SHA256SUMS
+  '');
+in {
+  name = "systemd-nspawn";
+
+  nodes = {
+    server = { pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+      services.nginx = {
+        enable = true;
+        virtualHosts."server".root = nspawnImages;
+      };
+    };
+    client = { pkgs, ... }: {
+      environment.etc."systemd/import-pubring.gpg".source = "${gpgKeyring}/pubkey.gpg";
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("nginx.service")
+    client.wait_for_unit("network-online.target")
+    client.succeed("machinectl pull-raw --verify=signature http://server/testimage.raw")
+    client.succeed(
+        "cmp /var/lib/machines/testimage.raw ${nspawnImages}/testimage.raw"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-timesyncd.nix b/nixpkgs/nixos/tests/systemd-timesyncd.nix
new file mode 100644
index 000000000000..ad5b9a47383b
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-timesyncd.nix
@@ -0,0 +1,52 @@
+# Regression test for systemd-timesync having moved the state directory without
+# upstream providing a migration path. https://github.com/systemd/systemd/issues/12131
+
+import ./make-test-python.nix (let
+  common = { lib, ... }: {
+    # override the `false` value from the qemu-vm base profile
+    services.timesyncd.enable = lib.mkForce true;
+  };
+  mkVM = conf: { imports = [ conf common ]; };
+in {
+  name = "systemd-timesyncd";
+  nodes = {
+    current = mkVM {};
+    pre1909 = mkVM ({lib, ... }: with lib; {
+      # create the path that should be migrated by our activation script when
+      # upgrading to a newer nixos version
+      system.stateVersion = "19.03";
+      system.activationScripts.simulate-old-timesync-state-dir = mkBefore ''
+        rm -f /var/lib/systemd/timesync
+        mkdir -p /var/lib/systemd /var/lib/private/systemd/timesync
+        ln -s /var/lib/private/systemd/timesync /var/lib/systemd/timesync
+        chown systemd-timesync: /var/lib/private/systemd/timesync
+      '';
+    });
+  };
+
+  testScript = ''
+    start_all()
+    current.succeed("systemctl status systemd-timesyncd.service")
+    # on a new install with a recent systemd there should not be any
+    # leftovers from the dynamic user mess
+    current.succeed("test -e /var/lib/systemd/timesync")
+    current.succeed("test ! -L /var/lib/systemd/timesync")
+
+    # timesyncd should be running on the upgrading system since we fixed the
+    # file bits in the activation script
+    pre1909.succeed("systemctl status systemd-timesyncd.service")
+
+    # the path should be gone after the migration
+    pre1909.succeed("test ! -e /var/lib/private/systemd/timesync")
+
+    # and the new path should no longer be a symlink
+    pre1909.succeed("test -e /var/lib/systemd/timesync")
+    pre1909.succeed("test ! -L /var/lib/systemd/timesync")
+
+    # after a restart things should still work and not fail in the activation
+    # scripts and cause the boot to fail..
+    pre1909.shutdown()
+    pre1909.start()
+    pre1909.succeed("systemctl status systemd-timesyncd.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd.nix b/nixpkgs/nixos/tests/systemd.nix
new file mode 100644
index 000000000000..ca2e36a443e9
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd.nix
@@ -0,0 +1,121 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "systemd";
+
+  machine = { lib, ... }: {
+    imports = [ common/user-account.nix common/x11.nix ];
+
+    virtualisation.emptyDiskImages = [ 512 ];
+
+    fileSystems = lib.mkVMOverride {
+      "/test-x-initrd-mount" = {
+        device = "/dev/vdb";
+        fsType = "ext2";
+        autoFormat = true;
+        noCheck = true;
+        options = [ "x-initrd.mount" ];
+      };
+    };
+
+    systemd.extraConfig = "DefaultEnvironment=\"XXX_SYSTEM=foo\"";
+    systemd.user.extraConfig = "DefaultEnvironment=\"XXX_USER=bar\"";
+    services.journald.extraConfig = "Storage=volatile";
+    test-support.displayManager.auto.user = "alice";
+
+    systemd.shutdown.test = pkgs.writeScript "test.shutdown" ''
+      #!${pkgs.runtimeShell}
+      PATH=${lib.makeBinPath (with pkgs; [ utillinux coreutils ])}
+      mount -t 9p shared -o trans=virtio,version=9p2000.L /tmp/shared
+      touch /tmp/shared/shutdown-test
+      umount /tmp/shared
+    '';
+
+    systemd.services.testservice1 = {
+      description = "Test Service 1";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.Type = "oneshot";
+      script = ''
+        if [ "$XXX_SYSTEM" = foo ]; then
+          touch /system_conf_read
+        fi
+      '';
+    };
+
+    systemd.user.services.testservice2 = {
+      description = "Test Service 2";
+      wantedBy = [ "default.target" ];
+      serviceConfig.Type = "oneshot";
+      script = ''
+        if [ "$XXX_USER" = bar ]; then
+          touch "$HOME/user_conf_read"
+        fi
+      '';
+    };
+  };
+
+  testScript = ''
+    import re
+    import subprocess
+
+    machine.wait_for_x()
+    # wait for user services
+    machine.wait_for_unit("default.target", "alice")
+
+    # Regression test for https://github.com/NixOS/nixpkgs/issues/35415
+    with subtest("configuration files are recognized by systemd"):
+        machine.succeed("test -e /system_conf_read")
+        machine.succeed("test -e /home/alice/user_conf_read")
+        machine.succeed("test -z $(ls -1 /var/log/journal)")
+
+    # Regression test for https://github.com/NixOS/nixpkgs/issues/50273
+    with subtest("DynamicUser actually allocates a user"):
+        assert "iamatest" in machine.succeed(
+            "systemd-run --pty --property=Type=oneshot --property=DynamicUser=yes --property=User=iamatest whoami"
+        )
+
+    # Regression test for https://github.com/NixOS/nixpkgs/issues/35268
+    with subtest("file system with x-initrd.mount is not unmounted"):
+        machine.succeed("mountpoint -q /test-x-initrd-mount")
+        machine.shutdown()
+
+        subprocess.check_call(
+            [
+                "qemu-img",
+                "convert",
+                "-O",
+                "raw",
+                "vm-state-machine/empty0.qcow2",
+                "x-initrd-mount.raw",
+            ]
+        )
+        extinfo = subprocess.check_output(
+            [
+                "${pkgs.e2fsprogs}/bin/dumpe2fs",
+                "x-initrd-mount.raw",
+            ]
+        ).decode("utf-8")
+        assert (
+            re.search(r"^Filesystem state: *clean$", extinfo, re.MULTILINE) is not None
+        ), ("File system was not cleanly unmounted: " + extinfo)
+
+    with subtest("systemd-shutdown works"):
+        machine.shutdown()
+        machine.wait_for_unit("multi-user.target")
+        machine.succeed("test -e /tmp/shared/shutdown-test")
+
+    # Test settings from /etc/sysctl.d/50-default.conf are applied
+    with subtest("systemd sysctl settings are applied"):
+        machine.wait_for_unit("multi-user.target")
+        assert "fq_codel" in machine.succeed("sysctl net.core.default_qdisc")
+
+    # Test cgroup accounting is enabled
+    with subtest("systemd cgroup accounting is enabled"):
+        machine.wait_for_unit("multi-user.target")
+        assert "yes" in machine.succeed(
+            "systemctl show testservice1.service -p IOAccounting"
+        )
+
+        retcode, output = machine.execute("systemctl status testservice1.service")
+        assert retcode in [0, 3]  # https://bugs.freedesktop.org/show_bug.cgi?id=77507
+        assert "CPU:" in output
+  '';
+})
diff --git a/nixpkgs/nixos/tests/taskserver.nix b/nixpkgs/nixos/tests/taskserver.nix
new file mode 100644
index 000000000000..ab9b589f8593
--- /dev/null
+++ b/nixpkgs/nixos/tests/taskserver.nix
@@ -0,0 +1,290 @@
+import ./make-test.nix ({ pkgs, ... }: let
+  snakeOil = pkgs.runCommand "snakeoil-certs" {
+    outputs = [ "out" "cacert" "cert" "key" "crl" ];
+    buildInputs = [ pkgs.gnutls.bin ];
+    caTemplate = pkgs.writeText "snakeoil-ca.template" ''
+      cn = server
+      expiration_days = -1
+      cert_signing_key
+      ca
+    '';
+    certTemplate = pkgs.writeText "snakeoil-cert.template" ''
+      cn = server
+      expiration_days = -1
+      tls_www_server
+      encryption_key
+      signing_key
+    '';
+    crlTemplate = pkgs.writeText "snakeoil-crl.template" ''
+      expiration_days = -1
+    '';
+    userCertTemplate = pkgs.writeText "snakeoil-user-cert.template" ''
+      organization = snakeoil
+      cn = server
+      expiration_days = -1
+      tls_www_client
+      encryption_key
+      signing_key
+    '';
+  } ''
+    certtool -p --bits 4096 --outfile ca.key
+    certtool -s --template "$caTemplate" --load-privkey ca.key \
+                --outfile "$cacert"
+    certtool -p --bits 4096 --outfile "$key"
+    certtool -c --template "$certTemplate" \
+                --load-ca-privkey ca.key \
+                --load-ca-certificate "$cacert" \
+                --load-privkey "$key" \
+                --outfile "$cert"
+    certtool --generate-crl --template "$crlTemplate" \
+                            --load-ca-privkey ca.key \
+                            --load-ca-certificate "$cacert" \
+                            --outfile "$crl"
+
+    mkdir "$out"
+
+    # Stripping key information before the actual PEM-encoded values is solely
+    # to make test output a bit less verbose when copying the client key to the
+    # actual client.
+    certtool -p --bits 4096 | sed -n \
+      -e '/^----* *BEGIN/,/^----* *END/p' > "$out/alice.key"
+
+    certtool -c --template "$userCertTemplate" \
+                --load-privkey "$out/alice.key" \
+                --load-ca-privkey ca.key \
+                --load-ca-certificate "$cacert" \
+                --outfile "$out/alice.cert"
+  '';
+
+in {
+  name = "taskserver";
+
+  nodes = rec {
+    server = {
+      services.taskserver.enable = true;
+      services.taskserver.listenHost = "::";
+      services.taskserver.fqdn = "server";
+      services.taskserver.organisations = {
+        testOrganisation.users = [ "alice" "foo" ];
+        anotherOrganisation.users = [ "bob" ];
+      };
+    };
+
+    # New generation of the server with manual config
+    newServer = { lib, nodes, ... }: {
+      imports = [ server ];
+      services.taskserver.pki.manual = {
+        ca.cert = snakeOil.cacert;
+        server.cert = snakeOil.cert;
+        server.key = snakeOil.key;
+        server.crl = snakeOil.crl;
+      };
+      # This is to avoid assigning a different network address to the new
+      # generation.
+      networking = lib.mapAttrs (lib.const lib.mkForce) {
+        interfaces.eth1.ipv4 = nodes.server.config.networking.interfaces.eth1.ipv4;
+        inherit (nodes.server.config.networking)
+          hostName primaryIPAddress extraHosts;
+      };
+    };
+
+    client1 = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.taskwarrior pkgs.gnutls ];
+      users.users.alice.isNormalUser = true;
+      users.users.bob.isNormalUser = true;
+      users.users.foo.isNormalUser = true;
+      users.users.bar.isNormalUser = true;
+    };
+
+    client2 = client1;
+  };
+
+  testScript = { nodes, ... }: let
+    cfg = nodes.server.config.services.taskserver;
+    portStr = toString cfg.listenPort;
+    newServerSystem = nodes.newServer.config.system.build.toplevel;
+    switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test";
+  in ''
+    sub su ($$) {
+      my ($user, $cmd) = @_;
+      my $esc = $cmd =~ s/'/'\\${"'"}'/gr;
+      return "su - $user -c '$esc'";
+    }
+
+    sub setupClientsFor ($$;$) {
+      my ($org, $user, $extraInit) = @_;
+
+      for my $client ($client1, $client2) {
+        $client->nest("initialize client for user $user", sub {
+          $client->succeed(
+            (su $user, "rm -rf /home/$user/.task"),
+            (su $user, "task rc.confirmation=no config confirmation no")
+          );
+
+          my $exportinfo = $server->succeed(
+            "nixos-taskserver user export $org $user"
+          );
+
+          $exportinfo =~ s/'/'\\'''/g;
+
+          $client->nest("importing taskwarrior configuration", sub {
+            my $cmd = su $user, "eval '$exportinfo' >&2";
+            my ($status, $out) = $client->execute_($cmd);
+            if ($status != 0) {
+              $client->log("output: $out");
+              die "command `$cmd' did not succeed (exit code $status)\n";
+            }
+          });
+
+          eval { &$extraInit($client, $org, $user) };
+
+          $client->succeed(su $user,
+            "task config taskd.server server:${portStr} >&2"
+          );
+
+          $client->succeed(su $user, "task sync init >&2");
+        });
+      }
+    }
+
+    sub restartServer {
+      $server->succeed("systemctl restart taskserver.service");
+      $server->waitForOpenPort(${portStr});
+    }
+
+    sub readdImperativeUser {
+      $server->nest("(re-)add imperative user bar", sub {
+        $server->execute("nixos-taskserver org remove imperativeOrg");
+        $server->succeed(
+          "nixos-taskserver org add imperativeOrg",
+          "nixos-taskserver user add imperativeOrg bar"
+        );
+        setupClientsFor "imperativeOrg", "bar";
+      });
+    }
+
+    sub testSync ($) {
+      my $user = $_[0];
+      subtest "sync for user $user", sub {
+        $client1->succeed(su $user, "task add foo >&2");
+        $client1->succeed(su $user, "task sync >&2");
+        $client2->fail(su $user, "task list >&2");
+        $client2->succeed(su $user, "task sync >&2");
+        $client2->succeed(su $user, "task list >&2");
+      };
+    }
+
+    sub checkClientCert ($) {
+      my $user = $_[0];
+      my $cmd = "gnutls-cli".
+        " --x509cafile=/home/$user/.task/keys/ca.cert".
+        " --x509keyfile=/home/$user/.task/keys/private.key".
+        " --x509certfile=/home/$user/.task/keys/public.cert".
+        " --port=${portStr} server < /dev/null";
+      return su $user, $cmd;
+    }
+
+    # Explicitly start the VMs so that we don't accidentally start newServer
+    $server->start;
+    $client1->start;
+    $client2->start;
+
+    $server->waitForUnit("taskserver.service");
+
+    $server->succeed(
+      "nixos-taskserver user list testOrganisation | grep -qxF alice",
+      "nixos-taskserver user list testOrganisation | grep -qxF foo",
+      "nixos-taskserver user list anotherOrganisation | grep -qxF bob"
+    );
+
+    $server->waitForOpenPort(${portStr});
+
+    $client1->waitForUnit("multi-user.target");
+    $client2->waitForUnit("multi-user.target");
+
+    setupClientsFor "testOrganisation", "alice";
+    setupClientsFor "testOrganisation", "foo";
+    setupClientsFor "anotherOrganisation", "bob";
+
+    testSync $_ for ("alice", "bob", "foo");
+
+    $server->fail("nixos-taskserver user add imperativeOrg bar");
+    readdImperativeUser;
+
+    testSync "bar";
+
+    subtest "checking certificate revocation of user bar", sub {
+      $client1->succeed(checkClientCert "bar");
+
+      $server->succeed("nixos-taskserver user remove imperativeOrg bar");
+      restartServer;
+
+      $client1->fail(checkClientCert "bar");
+
+      $client1->succeed(su "bar", "task add destroy everything >&2");
+      $client1->fail(su "bar", "task sync >&2");
+    };
+
+    readdImperativeUser;
+
+    subtest "checking certificate revocation of org imperativeOrg", sub {
+      $client1->succeed(checkClientCert "bar");
+
+      $server->succeed("nixos-taskserver org remove imperativeOrg");
+      restartServer;
+
+      $client1->fail(checkClientCert "bar");
+
+      $client1->succeed(su "bar", "task add destroy even more >&2");
+      $client1->fail(su "bar", "task sync >&2");
+    };
+
+    readdImperativeUser;
+
+    subtest "check whether declarative config overrides user bar", sub {
+      restartServer;
+      testSync "bar";
+    };
+
+    subtest "check manual configuration", sub {
+      # Remove the keys from automatic CA creation, to make sure the new
+      # generation doesn't use keys from before.
+      $server->succeed('rm -rf ${cfg.dataDir}/keys/* >&2');
+
+      $server->succeed('${switchToNewServer} >&2');
+      $server->waitForUnit("taskserver.service");
+      $server->waitForOpenPort(${portStr});
+
+      $server->succeed(
+        "nixos-taskserver org add manualOrg",
+        "nixos-taskserver user add manualOrg alice"
+      );
+
+      setupClientsFor "manualOrg", "alice", sub {
+        my ($client, $org, $user) = @_;
+        my $cfgpath = "/home/$user/.task";
+
+        $client->copyFileFromHost("${snakeOil.cacert}", "$cfgpath/ca.cert");
+        for my $file ('alice.key', 'alice.cert') {
+          $client->copyFileFromHost("${snakeOil}/$file", "$cfgpath/$file");
+        }
+
+        for my $file ("$user.key", "$user.cert") {
+          $client->copyFileFromHost(
+            "${snakeOil}/$file", "$cfgpath/$file"
+          );
+        }
+        $client->copyFileFromHost(
+          "${snakeOil.cacert}", "$cfgpath/ca.cert"
+        );
+        $client->succeed(
+          (su "alice", "task config taskd.ca $cfgpath/ca.cert"),
+          (su "alice", "task config taskd.key $cfgpath/$user.key"),
+          (su $user, "task config taskd.certificate $cfgpath/$user.cert")
+        );
+      };
+
+      testSync "alice";
+    };
+  '';
+})
diff --git a/nixpkgs/nixos/tests/telegraf.nix b/nixpkgs/nixos/tests/telegraf.nix
new file mode 100644
index 000000000000..73f741b11357
--- /dev/null
+++ b/nixpkgs/nixos/tests/telegraf.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "telegraf";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mic92 ];
+  };
+
+  machine = { ... }: {
+    services.telegraf.enable = true;
+    services.telegraf.extraConfig = {
+      agent.interval = "1s";
+      agent.flush_interval = "1s";
+      inputs.exec = {
+        commands = [
+          "${pkgs.runtimeShell} -c 'echo example,tag=a i=42i'"
+        ];
+        timeout = "5s";
+        data_format = "influx";
+      };
+      outputs.file.files = ["/tmp/metrics.out"];
+      outputs.file.data_format = "influx";
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("telegraf.service")
+    machine.wait_until_succeeds("grep -q example /tmp/metrics.out")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/testdb.sql b/nixpkgs/nixos/tests/testdb.sql
new file mode 100644
index 000000000000..3c68c49ae82c
--- /dev/null
+++ b/nixpkgs/nixos/tests/testdb.sql
@@ -0,0 +1,11 @@
+create table tests
+( Id   INTEGER      NOT NULL,
+  Name VARCHAR(255) NOT NULL,
+  primary key(Id)
+);
+
+insert into tests values (1, 'a');
+insert into tests values (2, 'b');
+insert into tests values (3, 'c');
+insert into tests values (4, 'd');
+insert into tests values (5, 'hello');
diff --git a/nixpkgs/nixos/tests/tiddlywiki.nix b/nixpkgs/nixos/tests/tiddlywiki.nix
new file mode 100644
index 000000000000..cf45578b0f98
--- /dev/null
+++ b/nixpkgs/nixos/tests/tiddlywiki.nix
@@ -0,0 +1,69 @@
+import ./make-test-python.nix ({ ... }: {
+  name = "tiddlywiki";
+  nodes = {
+    default = {
+      services.tiddlywiki.enable = true;
+    };
+    configured = {
+      boot.postBootCommands = ''
+        echo "username,password
+        somelogin,somesecret" > /var/lib/wikiusers.csv
+      '';
+      services.tiddlywiki = {
+        enable = true;
+        listenOptions = {
+          port = 3000;
+          credentials="../wikiusers.csv";
+          readers="(authenticated)";
+        };
+      };
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      with subtest("by default works without configuration"):
+          default.wait_for_unit("tiddlywiki.service")
+
+      with subtest("by default available on port 8080 without auth"):
+          default.wait_for_unit("tiddlywiki.service")
+          default.wait_for_open_port(8080)
+          # we output to /dev/null here to avoid a python UTF-8 decode error
+          # but the check will still fail if the service doesn't respond
+          default.succeed("curl --fail -o /dev/null 127.0.0.1:8080")
+
+      with subtest("by default creates empty wiki"):
+          default.succeed("test -f /var/lib/tiddlywiki/tiddlywiki.info")
+
+      with subtest("configured on port 3000 with basic auth"):
+          configured.wait_for_unit("tiddlywiki.service")
+          configured.wait_for_open_port(3000)
+          configured.fail("curl --fail -o /dev/null 127.0.0.1:3000")
+          configured.succeed(
+              "curl --fail -o /dev/null 127.0.0.1:3000 --user somelogin:somesecret"
+          )
+      
+      with subtest("restart preserves changes"):
+          # given running wiki
+          default.wait_for_unit("tiddlywiki.service")
+          # with some changes
+          default.succeed(
+              'curl --fail --request PUT --header \'X-Requested-With:TiddlyWiki\' \
+              --data \'{ "title": "title", "text": "content" }\' \
+              --url 127.0.0.1:8080/recipes/default/tiddlers/somepage '
+          )
+          default.succeed("sleep 2")
+
+          # when wiki is cycled
+          default.systemctl("restart tiddlywiki.service")
+          default.wait_for_unit("tiddlywiki.service")
+          default.wait_for_open_port(8080)
+
+          # the change is preserved
+          default.succeed(
+              "curl --fail -o /dev/null 127.0.0.1:8080/recipes/default/tiddlers/somepage"
+          )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/timezone.nix b/nixpkgs/nixos/tests/timezone.nix
new file mode 100644
index 000000000000..7fc9a5058eee
--- /dev/null
+++ b/nixpkgs/nixos/tests/timezone.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "timezone";
+  meta.maintainers = with pkgs.lib.maintainers; [ lheckemann ];
+
+  nodes = {
+    node_eutz = { pkgs, ... }: {
+      time.timeZone = "Europe/Amsterdam";
+    };
+
+    node_nulltz = { pkgs, ... }: {
+      time.timeZone = null;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+      node_eutz.wait_for_unit("dbus.socket")
+
+      with subtest("static - Ensure timezone change gives the correct result"):
+          node_eutz.fail("timedatectl set-timezone Asia/Tokyo")
+          date_result = node_eutz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"')
+          assert date_result == "1970-01-01 01:00:00\n", "Timezone seems to be wrong"
+
+      node_nulltz.wait_for_unit("dbus.socket")
+
+      with subtest("imperative - Ensure timezone defaults to UTC"):
+          date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"')
+          print(date_result)
+          assert (
+              date_result == "1970-01-01 00:00:00\n"
+          ), "Timezone seems to be wrong (not UTC)"
+
+      with subtest("imperative - Ensure timezone adjustment produces expected result"):
+          node_nulltz.succeed("timedatectl set-timezone Asia/Tokyo")
+
+          # Adjustment should be taken into account
+          date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"')
+          print(date_result)
+          assert date_result == "1970-01-01 09:00:00\n", "Timezone was not adjusted"
+
+      with subtest("imperative - Ensure timezone adjustment persists across reboot"):
+          # Adjustment should persist across a reboot
+          node_nulltz.shutdown()
+          node_nulltz.wait_for_unit("dbus.socket")
+          date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"')
+          print(date_result)
+          assert (
+              date_result == "1970-01-01 09:00:00\n"
+          ), "Timezone adjustment was not persisted"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tinydns.nix b/nixpkgs/nixos/tests/tinydns.nix
new file mode 100644
index 000000000000..b80e3451700a
--- /dev/null
+++ b/nixpkgs/nixos/tests/tinydns.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ lib, ...} : {
+  name = "tinydns";
+  meta = {
+    maintainers = with lib.maintainers; [ basvandijk ];
+  };
+  nodes = {
+    nameserver = { config, lib, ... } : let
+      ip = (lib.head config.networking.interfaces.eth1.ipv4.addresses).address;
+    in {
+      networking.nameservers = [ ip ];
+      services.tinydns = {
+        enable = true;
+        inherit ip;
+        data = ''
+          .foo.bar:${ip}
+          +.bla.foo.bar:1.2.3.4:300
+        '';
+      };
+    };
+  };
+  testScript = ''
+    nameserver.start()
+    nameserver.wait_for_unit("tinydns.service")
+    nameserver.succeed("host bla.foo.bar 192.168.1.1 | grep '1\.2\.3\.4'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tor.nix b/nixpkgs/nixos/tests/tor.nix
new file mode 100644
index 000000000000..ad07231557c3
--- /dev/null
+++ b/nixpkgs/nixos/tests/tor.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ lib, ... }: with lib;
+
+rec {
+  name = "tor";
+  meta.maintainers = with maintainers; [ joachifm ];
+
+  common =
+    { ... }:
+    { boot.kernelParams = [ "audit=0" "apparmor=0" "quiet" ];
+      networking.firewall.enable = false;
+      networking.useDHCP = false;
+    };
+
+  nodes.client =
+    { pkgs, ... }:
+    { imports = [ common ];
+      environment.systemPackages = with pkgs; [ netcat ];
+      services.tor.enable = true;
+      services.tor.client.enable = true;
+      services.tor.controlPort = 9051;
+    };
+
+  testScript = ''
+    client.wait_for_unit("tor.service")
+    client.wait_for_open_port(9051)
+    assert "514 Authentication required." in client.succeed(
+        "echo GETINFO version | nc 127.0.0.1 9051"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/trac.nix b/nixpkgs/nixos/tests/trac.nix
new file mode 100644
index 000000000000..7953f8d41f77
--- /dev/null
+++ b/nixpkgs/nixos/tests/trac.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "trac";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mmahut ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.trac.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("trac.service")
+    machine.wait_for_open_port(8000)
+    machine.wait_until_succeeds("curl -L http://localhost:8000/ | grep 'Trac Powered'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/traefik.nix b/nixpkgs/nixos/tests/traefik.nix
new file mode 100644
index 000000000000..0e21a7cf8437
--- /dev/null
+++ b/nixpkgs/nixos/tests/traefik.nix
@@ -0,0 +1,87 @@
+# Test Traefik as a reverse proxy of a local web service
+# and a Docker container.
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "traefik";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ joko ];
+  };
+
+  nodes = {
+    client = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.curl ];
+    };
+    traefik = { config, pkgs, ... }: {
+      docker-containers.nginx = {
+        extraDockerOptions = [
+          "-l" "traefik.enable=true"
+          "-l" "traefik.http.routers.nginx.entrypoints=web"
+          "-l" "traefik.http.routers.nginx.rule=Host(`nginx.traefik.test`)"
+        ];
+        image = "nginx-container";
+        imageFile = pkgs.dockerTools.examples.nginx;
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.traefik = {
+        enable = true;
+
+        dynamicConfigOptions = {
+          http.routers.simplehttp = {
+            rule = "Host(`simplehttp.traefik.test`)";
+            entryPoints = [ "web" ];
+            service = "simplehttp";
+          };
+
+          http.services.simplehttp = {
+            loadBalancer.servers = [{
+              url = "http://127.0.0.1:8000";
+            }];
+          };
+        };
+
+        staticConfigOptions = {
+          global = {
+            checkNewVersion = false;
+            sendAnonymousUsage = false;
+          };
+
+          entryPoints.web.address = ":80";
+
+          providers.docker.exposedByDefault = false;
+        };
+      };
+
+      systemd.services.simplehttp = {
+        script = "${pkgs.python3}/bin/python -m http.server 8000";
+        serviceConfig.Type = "simple";
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      users.users.traefik.extraGroups = [ "docker" ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    traefik.wait_for_unit("docker-nginx.service")
+    traefik.wait_until_succeeds("docker ps | grep nginx-container")
+    traefik.wait_for_unit("simplehttp.service")
+    traefik.wait_for_unit("traefik.service")
+    traefik.wait_for_open_port(80)
+    traefik.wait_for_unit("multi-user.target")
+
+    client.wait_for_unit("multi-user.target")
+
+    with subtest("Check that a container can be reached via Traefik"):
+        assert "Hello from NGINX" in client.succeed(
+            "curl -sSf -H Host:nginx.traefik.test http://traefik/"
+        )
+
+    with subtest("Check that dynamic configuration works"):
+        assert "Directory listing for " in client.succeed(
+            "curl -sSf -H Host:simplehttp.traefik.test http://traefik/"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/transmission.nix b/nixpkgs/nixos/tests/transmission.nix
new file mode 100644
index 000000000000..f4f2186be1ff
--- /dev/null
+++ b/nixpkgs/nixos/tests/transmission.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "transmission";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ coconnor ];
+  };
+
+  machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    networking.firewall.allowedTCPPorts = [ 9091 ];
+
+    services.transmission.enable = true;
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("transmission")
+      machine.shutdown()
+    '';
+})
diff --git a/nixpkgs/nixos/tests/trezord.nix b/nixpkgs/nixos/tests/trezord.nix
new file mode 100644
index 000000000000..8d908a522492
--- /dev/null
+++ b/nixpkgs/nixos/tests/trezord.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "trezord";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ mmahut "1000101" ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.trezord.enable = true;
+      services.trezord.emulator.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("trezord.service")
+    machine.wait_for_open_port(21325)
+    machine.wait_until_succeeds("curl -L http://localhost:21325/status/ | grep Version")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/trickster.nix b/nixpkgs/nixos/tests/trickster.nix
new file mode 100644
index 000000000000..e2ca00980d53
--- /dev/null
+++ b/nixpkgs/nixos/tests/trickster.nix
@@ -0,0 +1,37 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "trickster";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ "1000101" ];
+  };
+
+  nodes = {
+    prometheus = { ... }: {
+      services.prometheus.enable = true;
+      networking.firewall.allowedTCPPorts = [ 9090 ];
+    };
+    trickster = { ... }: {
+      services.trickster.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    prometheus.wait_for_unit("prometheus.service")
+    prometheus.wait_for_open_port(9090)
+    prometheus.wait_until_succeeds(
+        "curl -L http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'"
+    )
+    trickster.wait_for_unit("trickster.service")
+    trickster.wait_for_open_port(8082)
+    trickster.wait_for_open_port(9090)
+    trickster.wait_until_succeeds(
+        "curl -L http://localhost:8082/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'"
+    )
+    trickster.wait_until_succeeds(
+        "curl -L http://prometheus:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'"
+    )
+    trickster.wait_until_succeeds(
+        "curl -L http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'"
+    )
+  '';
+})
\ No newline at end of file
diff --git a/nixpkgs/nixos/tests/trilium-server.nix b/nixpkgs/nixos/tests/trilium-server.nix
new file mode 100644
index 000000000000..6346575b33da
--- /dev/null
+++ b/nixpkgs/nixos/tests/trilium-server.nix
@@ -0,0 +1,53 @@
+import ./make-test-python.nix ({ ... }: {
+  name = "trilium-server";
+  nodes = {
+    default = {
+      services.trilium-server.enable = true;
+    };
+    configured = {
+      services.trilium-server = {
+        enable = true;
+        dataDir = "/data/trilium";
+      };
+    };
+
+    nginx = {
+      services.trilium-server = {
+        enable = true;
+        nginx.enable = true;
+        nginx.hostName = "trilium.example.com";
+      };
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      with subtest("by default works without configuration"):
+          default.wait_for_unit("trilium-server.service")
+
+      with subtest("by default available on port 8080"):
+          default.wait_for_unit("trilium-server.service")
+          default.wait_for_open_port(8080)
+          # we output to /dev/null here to avoid a python UTF-8 decode error
+          # but the check will still fail if the service doesn't respond
+          default.succeed("curl --fail -o /dev/null 127.0.0.1:8080")
+
+      with subtest("by default creates empty document"):
+          default.wait_for_unit("trilium-server.service")
+          default.succeed("test -f /var/lib/trilium/document.db")
+
+      with subtest("configured with custom data store"):
+          configured.wait_for_unit("trilium-server.service")
+          configured.succeed("test -f /data/trilium/document.db")
+
+      with subtest("nginx with custom host name"):
+          nginx.wait_for_unit("trilium-server.service")
+          nginx.wait_for_unit("nginx.service")
+
+          nginx.succeed(
+              "curl --resolve 'trilium.example.com:80:127.0.0.1' http://trilium.example.com/"
+          )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/tuptime.nix b/nixpkgs/nixos/tests/tuptime.nix
new file mode 100644
index 000000000000..36ce2b1ae192
--- /dev/null
+++ b/nixpkgs/nixos/tests/tuptime.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "tuptime";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ evils ];
+  };
+
+  machine = { pkgs, ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+    services.tuptime.enable = true;
+  };
+
+  testScript =
+    ''
+      # see if it starts
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("tuptime | grep 'System startups:[[:blank:]]*1'")
+      machine.succeed("tuptime | grep 'System uptime:[[:blank:]]*100.0%'")
+      machine.shutdown()
+
+      # restart machine and see if it correctly reports the reboot
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("tuptime | grep 'System startups:[[:blank:]]*2'")
+      machine.succeed("tuptime | grep 'System shutdowns:[[:blank:]]*1 ok'")
+      machine.shutdown()
+    '';
+})
+
diff --git a/nixpkgs/nixos/tests/udisks2.nix b/nixpkgs/nixos/tests/udisks2.nix
new file mode 100644
index 000000000000..64f5b6c40d20
--- /dev/null
+++ b/nixpkgs/nixos/tests/udisks2.nix
@@ -0,0 +1,69 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+
+  stick = pkgs.fetchurl {
+    url = "http://nixos.org/~eelco/nix/udisks-test.img.xz";
+    sha256 = "0was1xgjkjad91nipzclaz5biv3m4b2nk029ga6nk7iklwi19l8b";
+  };
+
+in
+
+{
+  name = "udisks2";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  machine =
+    { ... }:
+    { services.udisks2.enable = true;
+      imports = [ ./common/user-account.nix ];
+
+      security.polkit.extraConfig =
+        ''
+          polkit.addRule(function(action, subject) {
+            if (subject.user == "alice") return "yes";
+          });
+        '';
+    };
+
+  testScript =
+    ''
+      import lzma
+
+      with lzma.open(
+          "${stick}"
+      ) as data, open(machine.state_dir + "/usbstick.img", "wb") as stick:
+          stick.write(data.read())
+
+      machine.succeed("udisksctl info -b /dev/vda >&2")
+      machine.fail("udisksctl info -b /dev/sda1")
+
+      # Attach a USB stick and wait for it to show up.
+      machine.send_monitor_command(
+          f"drive_add 0 id=stick,if=none,file={stick.name},format=raw"
+      )
+      machine.send_monitor_command("device_add usb-storage,id=stick,drive=stick")
+      machine.wait_until_succeeds("udisksctl info -b /dev/sda1")
+      machine.succeed("udisksctl info -b /dev/sda1 | grep 'IdLabel:.*USBSTICK'")
+
+      # Mount the stick as a non-root user and do some stuff with it.
+      machine.succeed("su - alice -c 'udisksctl info -b /dev/sda1'")
+      machine.succeed("su - alice -c 'udisksctl mount -b /dev/sda1'")
+      machine.succeed(
+          "su - alice -c 'cat /run/media/alice/USBSTICK/test.txt' | grep -q 'Hello World'"
+      )
+      machine.succeed("su - alice -c 'echo foo > /run/media/alice/USBSTICK/bar.txt'")
+
+      # Unmounting the stick should make the mountpoint disappear.
+      machine.succeed("su - alice -c 'udisksctl unmount -b /dev/sda1'")
+      machine.fail("[ -d /run/media/alice/USBSTICK ]")
+
+      # Remove the USB stick.
+      machine.send_monitor_command("device_del stick")
+      machine.wait_until_fails("udisksctl info -b /dev/sda1")
+      machine.fail("[ -e /dev/sda ]")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/upnp.nix b/nixpkgs/nixos/tests/upnp.nix
new file mode 100644
index 000000000000..a7d837ea0708
--- /dev/null
+++ b/nixpkgs/nixos/tests/upnp.nix
@@ -0,0 +1,96 @@
+# This tests whether UPnP port mappings can be created using Miniupnpd
+# and Miniupnpc.
+# It runs a Miniupnpd service on one machine, and verifies
+# a client can indeed create a port mapping using Miniupnpc. If
+# this succeeds an external client will try to connect to the port
+# mapping.
+
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  internalRouterAddress = "192.168.3.1";
+  internalClient1Address = "192.168.3.2";
+  externalRouterAddress = "80.100.100.1";
+  externalClient2Address = "80.100.100.2";
+in
+{
+  name = "upnp";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ bobvanderlinden ];
+  };
+
+  nodes =
+    {
+      router =
+        { pkgs, nodes, ... }:
+        { virtualisation.vlans = [ 1 2 ];
+          networking.nat.enable = true;
+          networking.nat.internalInterfaces = [ "eth2" ];
+          networking.nat.externalInterface = "eth1";
+          networking.firewall.enable = true;
+          networking.firewall.trustedInterfaces = [ "eth2" ];
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = externalRouterAddress; prefixLength = 24; }
+          ];
+          networking.interfaces.eth2.ipv4.addresses = [
+            { address = internalRouterAddress; prefixLength = 24; }
+          ];
+          services.miniupnpd = {
+            enable = true;
+            externalInterface = "eth1";
+            internalIPs = [ "eth2" ];
+            appendConfig = ''
+              ext_ip=${externalRouterAddress}
+            '';
+          };
+        };
+
+      client1 =
+        { pkgs, nodes, ... }:
+        { environment.systemPackages = [ pkgs.miniupnpc_2 pkgs.netcat ];
+          virtualisation.vlans = [ 2 ];
+          networking.defaultGateway = internalRouterAddress;
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = internalClient1Address; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+
+          services.httpd.enable = true;
+          services.httpd.virtualHosts.localhost = {
+            listen = [{ ip = "*"; port = 9000; }];
+            adminAddr = "foo@example.org";
+            documentRoot = "/tmp";
+          };
+        };
+
+      client2 =
+        { pkgs, ... }:
+        { environment.systemPackages = [ pkgs.miniupnpc_2 ];
+          virtualisation.vlans = [ 1 ];
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = externalClient2Address; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+        };
+    };
+
+  testScript =
+    { nodes, ... }:
+    ''
+      start_all()
+
+      # Wait for network and miniupnpd.
+      router.wait_for_unit("network-online.target")
+      # $router.wait_for_unit("nat")
+      router.wait_for_unit("firewall.service")
+      router.wait_for_unit("miniupnpd")
+
+      client1.wait_for_unit("network-online.target")
+
+      client1.succeed("upnpc -a ${internalClient1Address} 9000 9000 TCP")
+
+      client1.wait_for_unit("httpd")
+      client2.wait_until_succeeds("curl http://${externalRouterAddress}:9000/")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/uwsgi.nix b/nixpkgs/nixos/tests/uwsgi.nix
new file mode 100644
index 000000000000..78a87147f55c
--- /dev/null
+++ b/nixpkgs/nixos/tests/uwsgi.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "uwsgi";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lnl7 ];
+  };
+  machine = { pkgs, ... }: {
+    services.uwsgi.enable = true;
+    services.uwsgi.plugins = [ "python3" ];
+    services.uwsgi.instance = {
+      type = "emperor";
+      vassals.hello = {
+        type = "normal";
+        master = true;
+        workers = 2;
+        http = ":8000";
+        module = "wsgi:application";
+        chdir = pkgs.writeTextDir "wsgi.py" ''
+          from flask import Flask
+          application = Flask(__name__)
+
+          @application.route("/")
+          def hello():
+              return "Hello World!"
+        '';
+        pythonPackages = self: with self; [ flask ];
+      };
+    };
+  };
+
+  testScript =
+    ''
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_unit("uwsgi.service")
+      machine.wait_for_open_port(8000)
+      assert "Hello World" in machine.succeed("curl -v 127.0.0.1:8000")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/vault.nix b/nixpkgs/nixos/tests/vault.nix
new file mode 100644
index 000000000000..ac8cf0703da5
--- /dev/null
+++ b/nixpkgs/nixos/tests/vault.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "vault";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lnl7 ];
+  };
+  machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.vault ];
+    environment.variables.VAULT_ADDR = "http://127.0.0.1:8200";
+    services.vault.enable = true;
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_unit("vault.service")
+      machine.wait_for_open_port(8200)
+      machine.succeed("vault operator init")
+      machine.succeed("vault status | grep Sealed | grep true")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/victoriametrics.nix b/nixpkgs/nixos/tests/victoriametrics.nix
new file mode 100644
index 000000000000..73ef8b728615
--- /dev/null
+++ b/nixpkgs/nixos/tests/victoriametrics.nix
@@ -0,0 +1,31 @@
+# This test runs influxdb and checks if influxdb is up and running
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "victoriametrics";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ yorickvp ];
+  };
+
+  nodes = {
+    one = { ... }: {
+      services.victoriametrics.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    one.wait_for_unit("victoriametrics.service")
+
+    # write some points and run simple query
+    out = one.succeed(
+        "curl -d 'measurement,tag1=value1,tag2=value2 field1=123,field2=1.23' -X POST 'http://localhost:8428/write'"
+    )
+    cmd = """curl -s -G 'http://localhost:8428/api/v1/export' -d 'match={__name__!=""}'"""
+    # data takes a while to appear
+    one.wait_until_succeeds(f"[[ $({cmd} | wc -l) -ne 0 ]]")
+    out = one.succeed(cmd)
+    assert '"values":[123]' in out
+    assert '"values":[1.23]' in out
+  '';
+})
diff --git a/nixpkgs/nixos/tests/virtualbox.nix b/nixpkgs/nixos/tests/virtualbox.nix
new file mode 100644
index 000000000000..aec8da6a2af3
--- /dev/null
+++ b/nixpkgs/nixos/tests/virtualbox.nix
@@ -0,0 +1,526 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; },
+  debug ? false,
+  enableUnfree ? false,
+  # Nested KVM virtualization (https://www.linux-kvm.org/page/Nested_Guests)
+  # requires a modprobe flag on the build machine: (kvm-amd for AMD CPUs)
+  #   boot.extraModprobeConfig = "options kvm-intel nested=Y";
+  # Without this VirtualBox will use SW virtualization and will only be able
+  # to run 32-bit guests.
+  useKvmNestedVirt ? false,
+  # Whether to run 64-bit guests instead of 32-bit. Requires nested KVM.
+  use64bitGuest ? false
+}:
+
+assert use64bitGuest -> useKvmNestedVirt;
+
+with import ../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  testVMConfig = vmName: attrs: { config, pkgs, lib, ... }: let
+    guestAdditions = pkgs.linuxPackages.virtualboxGuestAdditions;
+
+    miniInit = ''
+      #!${pkgs.runtimeShell} -xe
+      export PATH="${lib.makeBinPath [ pkgs.coreutils pkgs.utillinux ]}"
+
+      mkdir -p /run/dbus
+      cat > /etc/passwd <<EOF
+      root:x:0:0::/root:/bin/false
+      messagebus:x:1:1::/run/dbus:/bin/false
+      EOF
+      cat > /etc/group <<EOF
+      root:x:0:
+      messagebus:x:1:
+      EOF
+
+      "${pkgs.dbus.daemon}/bin/dbus-daemon" --fork \
+        --config-file="${pkgs.dbus.daemon}/share/dbus-1/system.conf"
+
+      ${guestAdditions}/bin/VBoxService
+      ${(attrs.vmScript or (const "")) pkgs}
+
+      i=0
+      while [ ! -e /mnt-root/shutdown ]; do
+        sleep 10
+        i=$(($i + 10))
+        [ $i -le 120 ] || fail
+      done
+
+      rm -f /mnt-root/boot-done /mnt-root/shutdown
+    '';
+  in {
+    boot.kernelParams = [
+      "console=tty0" "console=ttyS0" "ignore_loglevel"
+      "boot.trace" "panic=1" "boot.panic_on_fail"
+      "init=${pkgs.writeScript "mini-init.sh" miniInit}"
+    ];
+
+    fileSystems."/" = {
+      device = "vboxshare";
+      fsType = "vboxsf";
+    };
+
+    virtualisation.virtualbox.guest.enable = true;
+
+    boot.initrd.kernelModules = [
+      "af_packet" "vboxsf"
+      "virtio" "virtio_pci" "virtio_ring" "virtio_net" "vboxguest"
+    ];
+
+    boot.initrd.extraUtilsCommands = ''
+      copy_bin_and_libs "${guestAdditions}/bin/mount.vboxsf"
+      copy_bin_and_libs "${pkgs.utillinux}/bin/unshare"
+      ${(attrs.extraUtilsCommands or (const "")) pkgs}
+    '';
+
+    boot.initrd.postMountCommands = ''
+      touch /mnt-root/boot-done
+      hostname "${vmName}"
+      mkdir -p /nix/store
+      unshare -m ${escapeShellArg pkgs.runtimeShell} -c '
+        mount -t vboxsf nixstore /nix/store
+        exec "$stage2Init"
+      '
+      poweroff -f
+    '';
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isYes "SERIAL_8250_CONSOLE")
+      (isYes "SERIAL_8250")
+    ];
+  };
+
+  mkLog = logfile: tag: let
+    rotated = map (i: "${logfile}.${toString i}") (range 1 9);
+    all = concatMapStringsSep " " (f: "\"${f}\"") ([logfile] ++ rotated);
+    logcmd = "tail -F ${all} 2> /dev/null | logger -t \"${tag}\"";
+  in optionalString debug "$machine->execute(ru '${logcmd} & disown');";
+
+  testVM = vmName: vmScript: let
+    cfg = (import ../lib/eval-config.nix {
+      system = if use64bitGuest then "x86_64-linux" else "i686-linux";
+      modules = [
+        ../modules/profiles/minimal.nix
+        (testVMConfig vmName vmScript)
+      ];
+    }).config;
+  in pkgs.vmTools.runInLinuxVM (pkgs.runCommand "virtualbox-image" {
+    preVM = ''
+      mkdir -p "$out"
+      diskImage="$(pwd)/qimage"
+      ${pkgs.vmTools.qemu}/bin/qemu-img create -f raw "$diskImage" 100M
+    '';
+
+    postVM = ''
+      echo "creating VirtualBox disk image..."
+      ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -O vdi \
+        "$diskImage" "$out/disk.vdi"
+    '';
+
+    buildInputs = [ pkgs.utillinux pkgs.perl ];
+  } ''
+    ${pkgs.parted}/sbin/parted --script /dev/vda mklabel msdos
+    ${pkgs.parted}/sbin/parted --script /dev/vda -- mkpart primary ext2 1M -1s
+    ${pkgs.e2fsprogs}/sbin/mkfs.ext4 /dev/vda1
+    ${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
+    mkdir /mnt
+    mount /dev/vda1 /mnt
+    cp "${cfg.system.build.kernel}/bzImage" /mnt/linux
+    cp "${cfg.system.build.initialRamdisk}/initrd" /mnt/initrd
+
+    ${pkgs.grub2}/bin/grub-install --boot-directory=/mnt /dev/vda
+
+    cat > /mnt/grub/grub.cfg <<GRUB
+    set root=hd0,1
+    linux /linux ${concatStringsSep " " cfg.boot.kernelParams}
+    initrd /initrd
+    boot
+    GRUB
+    umount /mnt
+  '');
+
+  createVM = name: attrs: let
+    mkFlags = concatStringsSep " ";
+
+    sharePath = "/home/alice/vboxshare-${name}";
+
+    createFlags = mkFlags [
+      "--ostype ${if use64bitGuest then "Linux26_64" else "Linux26"}"
+      "--register"
+    ];
+
+    vmFlags = mkFlags ([
+      "--uart1 0x3F8 4"
+      "--uartmode1 client /run/virtualbox-log-${name}.sock"
+      "--memory 768"
+      "--audio none"
+    ] ++ (attrs.vmFlags or []));
+
+    controllerFlags = mkFlags [
+      "--name SATA"
+      "--add sata"
+      "--bootable on"
+      "--hostiocache on"
+    ];
+
+    diskFlags = mkFlags [
+      "--storagectl SATA"
+      "--port 0"
+      "--device 0"
+      "--type hdd"
+      "--mtype immutable"
+      "--medium ${testVM name attrs}/disk.vdi"
+    ];
+
+    sharedFlags = mkFlags [
+      "--name vboxshare"
+      "--hostpath ${sharePath}"
+    ];
+
+    nixstoreFlags = mkFlags [
+      "--name nixstore"
+      "--hostpath /nix/store"
+      "--readonly"
+    ];
+  in {
+    machine = {
+      systemd.sockets."vboxtestlog-${name}" = {
+        description = "VirtualBox Test Machine Log Socket For ${name}";
+        wantedBy = [ "sockets.target" ];
+        before = [ "multi-user.target" ];
+        socketConfig.ListenStream = "/run/virtualbox-log-${name}.sock";
+        socketConfig.Accept = true;
+      };
+
+      systemd.services."vboxtestlog-${name}@" = {
+        description = "VirtualBox Test Machine Log For ${name}";
+        serviceConfig.StandardInput = "socket";
+        serviceConfig.StandardOutput = "syslog";
+        serviceConfig.SyslogIdentifier = "GUEST-${name}";
+        serviceConfig.ExecStart = "${pkgs.coreutils}/bin/cat";
+      };
+    };
+
+    testSubs = ''
+      my ${"$" + name}_sharepath = '${sharePath}';
+
+      sub checkRunning_${name} {
+        my $cmd = 'VBoxManage list runningvms | grep -q "^\"${name}\""';
+        my ($status, $out) = $machine->execute(ru $cmd);
+        return $status == 0;
+      }
+
+      sub cleanup_${name} {
+        $machine->execute(ru "VBoxManage controlvm ${name} poweroff")
+          if checkRunning_${name};
+        $machine->succeed("rm -rf ${sharePath}");
+        $machine->succeed("mkdir -p ${sharePath}");
+        $machine->succeed("chown alice.users ${sharePath}");
+      }
+
+      sub createVM_${name} {
+        vbm("createvm --name ${name} ${createFlags}");
+        vbm("modifyvm ${name} ${vmFlags}");
+        vbm("setextradata ${name} VBoxInternal/PDM/HaltOnReset 1");
+        vbm("storagectl ${name} ${controllerFlags}");
+        vbm("storageattach ${name} ${diskFlags}");
+        vbm("sharedfolder add ${name} ${sharedFlags}");
+        vbm("sharedfolder add ${name} ${nixstoreFlags}");
+        cleanup_${name};
+
+        ${mkLog "$HOME/VirtualBox VMs/${name}/Logs/VBox.log" "HOST-${name}"}
+      }
+
+      sub destroyVM_${name} {
+        cleanup_${name};
+        vbm("unregistervm ${name} --delete");
+      }
+
+      sub waitForVMBoot_${name} {
+        $machine->execute(ru(
+          'set -e; i=0; '.
+          'while ! test -e ${sharePath}/boot-done; do '.
+          'sleep 10; i=$(($i + 10)); [ $i -le 3600 ]; '.
+          'VBoxManage list runningvms | grep -q "^\"${name}\""; '.
+          'done'
+        ));
+      }
+
+      sub waitForIP_${name} ($) {
+        my $property = "/VirtualBox/GuestInfo/Net/$_[0]/V4/IP";
+        my $getip = "VBoxManage guestproperty get ${name} $property | ".
+                    "sed -n -e 's/^Value: //p'";
+        my $ip = $machine->succeed(ru(
+          'for i in $(seq 1000); do '.
+          'if ipaddr="$('.$getip.')" && [ -n "$ipaddr" ]; then '.
+          'echo "$ipaddr"; exit 0; '.
+          'fi; '.
+          'sleep 1; '.
+          'done; '.
+          'echo "Could not get IPv4 address for ${name}!" >&2; '.
+          'exit 1'
+        ));
+        chomp $ip;
+        return $ip;
+      }
+
+      sub waitForStartup_${name} {
+        for (my $i = 0; $i <= 120; $i += 10) {
+          $machine->sleep(10);
+          return if checkRunning_${name};
+          eval { $_[0]->() } if defined $_[0];
+        }
+        die "VirtualBox VM didn't start up within 2 minutes";
+      }
+
+      sub waitForShutdown_${name} {
+        for (my $i = 0; $i <= 120; $i += 10) {
+          $machine->sleep(10);
+          return unless checkRunning_${name};
+        }
+        die "VirtualBox VM didn't shut down within 2 minutes";
+      }
+
+      sub shutdownVM_${name} {
+        $machine->succeed(ru "touch ${sharePath}/shutdown");
+        $machine->execute(
+          'set -e; i=0; '.
+          'while test -e ${sharePath}/shutdown '.
+          '        -o -e ${sharePath}/boot-done; do '.
+          'sleep 1; i=$(($i + 1)); [ $i -le 3600 ]; '.
+          'done'
+        );
+        waitForShutdown_${name};
+      }
+    '';
+  };
+
+  hostonlyVMFlags = [
+    "--nictype1 virtio"
+    "--nictype2 virtio"
+    "--nic2 hostonly"
+    "--hostonlyadapter2 vboxnet0"
+  ];
+
+  # The VirtualBox Oracle Extension Pack lets you use USB 3.0 (xHCI).
+  enableExtensionPackVMFlags = [
+    "--usbxhci on"
+  ];
+
+  dhcpScript = pkgs: ''
+    ${pkgs.dhcp}/bin/dhclient \
+      -lf /run/dhcp.leases \
+      -pf /run/dhclient.pid \
+      -v eth0 eth1
+
+    otherIP="$(${pkgs.netcat}/bin/nc -l 1234 || :)"
+    ${pkgs.iputils}/bin/ping -I eth1 -c1 "$otherIP"
+    echo "$otherIP reachable" | ${pkgs.netcat}/bin/nc -l 5678 || :
+  '';
+
+  sysdDetectVirt = pkgs: ''
+    ${pkgs.systemd}/bin/systemd-detect-virt > /mnt-root/result
+  '';
+
+  vboxVMs = mapAttrs createVM {
+    simple = {};
+
+    detectvirt.vmScript = sysdDetectVirt;
+
+    test1.vmFlags = hostonlyVMFlags;
+    test1.vmScript = dhcpScript;
+
+    test2.vmFlags = hostonlyVMFlags;
+    test2.vmScript = dhcpScript;
+
+    headless.virtualisation.virtualbox.headless = true;
+    headless.services.xserver.enable = false;
+  };
+
+  vboxVMsWithExtpack = mapAttrs createVM {
+    testExtensionPack.vmFlags = enableExtensionPackVMFlags;
+  };
+
+  mkVBoxTest = useExtensionPack: vms: name: testScript: makeTest {
+    name = "virtualbox-${name}";
+
+    machine = { lib, config, ... }: {
+      imports = let
+        mkVMConf = name: val: val.machine // { key = "${name}-config"; };
+        vmConfigs = mapAttrsToList mkVMConf vms;
+      in [ ./common/user-account.nix ./common/x11.nix ] ++ vmConfigs;
+      virtualisation.memorySize = 2048;
+      virtualisation.qemu.options =
+        if useKvmNestedVirt then ["-cpu" "kvm64,vmx=on"] else [];
+      virtualisation.virtualbox.host.enable = true;
+      test-support.displayManager.auto.user = "alice";
+      users.users.alice.extraGroups = let
+        inherit (config.virtualisation.virtualbox.host) enableHardening;
+      in lib.mkIf enableHardening (lib.singleton "vboxusers");
+      virtualisation.virtualbox.host.enableExtensionPack = useExtensionPack;
+      nixpkgs.config.allowUnfree = useExtensionPack;
+    };
+
+    testScript = ''
+      sub ru ($) {
+        my $esc = $_[0] =~ s/'/'\\${"'"}'/gr;
+        return "su - alice -c '$esc'";
+      }
+
+      sub vbm {
+        $machine->succeed(ru("VBoxManage ".$_[0]));
+      };
+
+      sub removeUUIDs {
+        return join("\n", grep { $_ !~ /^UUID:/ } split(/\n/, $_[0]))."\n";
+      }
+
+      ${concatStrings (mapAttrsToList (_: getAttr "testSubs") vms)}
+
+      $machine->waitForX;
+
+      ${mkLog "$HOME/.config/VirtualBox/VBoxSVC.log" "HOST-SVC"}
+
+      ${testScript}
+    '';
+
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ aszlig cdepillabout ];
+    };
+  };
+
+  unfreeTests = mapAttrs (mkVBoxTest true vboxVMsWithExtpack) {
+    enable-extension-pack = ''
+      createVM_testExtensionPack;
+      vbm("startvm testExtensionPack");
+      waitForStartup_testExtensionPack;
+      $machine->screenshot("cli_started");
+      waitForVMBoot_testExtensionPack;
+      $machine->screenshot("cli_booted");
+
+      $machine->nest("Checking for privilege escalation", sub {
+        $machine->fail("test -e '/root/VirtualBox VMs'");
+        $machine->fail("test -e '/root/.config/VirtualBox'");
+        $machine->succeed("test -e '/home/alice/VirtualBox VMs'");
+      });
+
+      shutdownVM_testExtensionPack;
+      destroyVM_testExtensionPack;
+    '';
+  };
+
+in mapAttrs (mkVBoxTest false vboxVMs) {
+  simple-gui = ''
+    createVM_simple;
+    $machine->succeed(ru "VirtualBox &");
+    $machine->waitUntilSucceeds(
+      ru "xprop -name 'Oracle VM VirtualBox Manager'"
+    );
+    $machine->sleep(5);
+    $machine->screenshot("gui_manager_started");
+    # Home to select Tools, down to move to the VM, enter to start it.
+    $machine->sendKeys("home");
+    $machine->sendKeys("down");
+    $machine->sendKeys("ret");
+    $machine->screenshot("gui_manager_sent_startup");
+    waitForStartup_simple (sub {
+      $machine->sendKeys("home");
+      $machine->sendKeys("down");
+      $machine->sendKeys("ret");
+    });
+    $machine->screenshot("gui_started");
+    waitForVMBoot_simple;
+    $machine->screenshot("gui_booted");
+    shutdownVM_simple;
+    $machine->sleep(5);
+    $machine->screenshot("gui_stopped");
+    $machine->sendKeys("ctrl-q");
+    $machine->sleep(5);
+    $machine->screenshot("gui_manager_stopped");
+    destroyVM_simple;
+  '';
+
+  simple-cli = ''
+    createVM_simple;
+    vbm("startvm simple");
+    waitForStartup_simple;
+    $machine->screenshot("cli_started");
+    waitForVMBoot_simple;
+    $machine->screenshot("cli_booted");
+
+    $machine->nest("Checking for privilege escalation", sub {
+      $machine->fail("test -e '/root/VirtualBox VMs'");
+      $machine->fail("test -e '/root/.config/VirtualBox'");
+      $machine->succeed("test -e '/home/alice/VirtualBox VMs'");
+    });
+
+    shutdownVM_simple;
+    destroyVM_simple;
+  '';
+
+  headless = ''
+    createVM_headless;
+    $machine->succeed(ru("VBoxHeadless --startvm headless & disown %1"));
+    waitForStartup_headless;
+    waitForVMBoot_headless;
+    shutdownVM_headless;
+    destroyVM_headless;
+  '';
+
+  host-usb-permissions = ''
+    my $userUSB = removeUUIDs vbm("list usbhost");
+    print STDERR $userUSB;
+    my $rootUSB = removeUUIDs $machine->succeed("VBoxManage list usbhost");
+    print STDERR $rootUSB;
+
+    die "USB host devices differ for root and normal user"
+      if $userUSB ne $rootUSB;
+    die "No USB host devices found" if $userUSB =~ /<none>/;
+  '';
+
+  systemd-detect-virt = ''
+    createVM_detectvirt;
+    vbm("startvm detectvirt");
+    waitForStartup_detectvirt;
+    waitForVMBoot_detectvirt;
+    shutdownVM_detectvirt;
+    my $result = $machine->succeed("cat '$detectvirt_sharepath/result'");
+    chomp $result;
+    destroyVM_detectvirt;
+    die "systemd-detect-virt returned \"$result\" instead of \"oracle\""
+      if $result ne "oracle";
+  '';
+
+  net-hostonlyif = ''
+    createVM_test1;
+    createVM_test2;
+
+    vbm("startvm test1");
+    waitForStartup_test1;
+    waitForVMBoot_test1;
+
+    vbm("startvm test2");
+    waitForStartup_test2;
+    waitForVMBoot_test2;
+
+    $machine->screenshot("net_booted");
+
+    my $test1IP = waitForIP_test1 1;
+    my $test2IP = waitForIP_test2 1;
+
+    $machine->succeed("echo '$test2IP' | nc -N '$test1IP' 1234");
+    $machine->succeed("echo '$test1IP' | nc -N '$test2IP' 1234");
+
+    $machine->waitUntilSucceeds("nc -N '$test1IP' 5678 < /dev/null >&2");
+    $machine->waitUntilSucceeds("nc -N '$test2IP' 5678 < /dev/null >&2");
+
+    shutdownVM_test1;
+    shutdownVM_test2;
+
+    destroyVM_test1;
+    destroyVM_test2;
+  '';
+} // (if enableUnfree then unfreeTests else {})
diff --git a/nixpkgs/nixos/tests/wireguard/default.nix b/nixpkgs/nixos/tests/wireguard/default.nix
new file mode 100644
index 000000000000..e3bc31c600f9
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/default.nix
@@ -0,0 +1,71 @@
+import ../make-test-python.nix ({ pkgs, lib, ...} :
+  let
+    wg-snakeoil-keys = import ./snakeoil-keys.nix;
+    peer = (import ./make-peer.nix) { inherit lib; };
+  in
+  {
+    name = "wireguard";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ ma27 ];
+    };
+
+    nodes = {
+      peer0 = peer {
+        ip4 = "192.168.0.1";
+        ip6 = "fd00::1";
+        extraConfig = {
+          networking.firewall.allowedUDPPorts = [ 23542 ];
+          networking.wireguard.interfaces.wg0 = {
+            ips = [ "10.23.42.1/32" "fc00::1/128" ];
+            listenPort = 23542;
+
+            inherit (wg-snakeoil-keys.peer0) privateKey;
+
+            peers = lib.singleton {
+              allowedIPs = [ "10.23.42.2/32" "fc00::2/128" ];
+
+              inherit (wg-snakeoil-keys.peer1) publicKey;
+            };
+          };
+        };
+      };
+
+      peer1 = peer {
+        ip4 = "192.168.0.2";
+        ip6 = "fd00::2";
+        extraConfig = {
+          networking.wireguard.interfaces.wg0 = {
+            ips = [ "10.23.42.2/32" "fc00::2/128" ];
+            listenPort = 23542;
+            allowedIPsAsRoutes = false;
+
+            inherit (wg-snakeoil-keys.peer1) privateKey;
+
+            peers = lib.singleton {
+              allowedIPs = [ "0.0.0.0/0" "::/0" ];
+              endpoint = "192.168.0.1:23542";
+              persistentKeepalive = 25;
+
+              inherit (wg-snakeoil-keys.peer0) publicKey;
+            };
+
+            postSetup = let inherit (pkgs) iproute; in ''
+              ${iproute}/bin/ip route replace 10.23.42.1/32 dev wg0
+              ${iproute}/bin/ip route replace fc00::1/128 dev wg0
+            '';
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      peer0.wait_for_unit("wireguard-wg0.service")
+      peer1.wait_for_unit("wireguard-wg0.service")
+
+      peer1.succeed("ping -c5 fc00::1")
+      peer1.succeed("ping -c5 10.23.42.1")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/wireguard/generated.nix b/nixpkgs/nixos/tests/wireguard/generated.nix
new file mode 100644
index 000000000000..a29afd2d4666
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/generated.nix
@@ -0,0 +1,61 @@
+import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "wireguard-generated";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ma27 grahamc ];
+  };
+
+  nodes = {
+    peer1 = {
+      networking.firewall.allowedUDPPorts = [ 12345 ];
+      networking.wireguard.interfaces.wg0 = {
+        ips = [ "10.10.10.1/24" ];
+        listenPort = 12345;
+        privateKeyFile = "/etc/wireguard/private";
+        generatePrivateKeyFile = true;
+
+      };
+    };
+
+    peer2 = {
+      networking.firewall.allowedUDPPorts = [ 12345 ];
+      networking.wireguard.interfaces.wg0 = {
+        ips = [ "10.10.10.2/24" ];
+        listenPort = 12345;
+        privateKeyFile = "/etc/wireguard/private";
+        generatePrivateKeyFile = true;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    peer1.wait_for_unit("wireguard-wg0.service")
+    peer2.wait_for_unit("wireguard-wg0.service")
+
+    retcode, peer1pubkey = peer1.execute("wg pubkey < /etc/wireguard/private")
+    if retcode != 0:
+        raise Exception("Could not read public key from peer1")
+
+    retcode, peer2pubkey = peer2.execute("wg pubkey < /etc/wireguard/private")
+    if retcode != 0:
+        raise Exception("Could not read public key from peer2")
+
+    peer1.succeed(
+        "wg set wg0 peer {} allowed-ips 10.10.10.2/32 endpoint 192.168.1.2:12345 persistent-keepalive 1".format(
+            peer2pubkey.strip()
+        )
+    )
+    peer1.succeed("ip route replace 10.10.10.2/32 dev wg0 table main")
+
+    peer2.succeed(
+        "wg set wg0 peer {} allowed-ips 10.10.10.1/32 endpoint 192.168.1.1:12345 persistent-keepalive 1".format(
+            peer1pubkey.strip()
+        )
+    )
+    peer2.succeed("ip route replace 10.10.10.1/32 dev wg0 table main")
+
+    peer1.succeed("ping -c1 10.10.10.2")
+    peer2.succeed("ping -c1 10.10.10.1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/wireguard/make-peer.nix b/nixpkgs/nixos/tests/wireguard/make-peer.nix
new file mode 100644
index 000000000000..d2740549738b
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/make-peer.nix
@@ -0,0 +1,23 @@
+{ lib, ... }: { ip4, ip6, extraConfig }:
+lib.mkMerge [
+  {
+    boot.kernel.sysctl = {
+      "net.ipv6.conf.all.forwarding" = "1";
+      "net.ipv6.conf.default.forwarding" = "1";
+      "net.ipv4.ip_forward" = "1";
+    };
+
+    networking.useDHCP = false;
+    networking.interfaces.eth1 = {
+      ipv4.addresses = [{
+        address = ip4;
+        prefixLength = 24;
+      }];
+      ipv6.addresses = [{
+        address = ip6;
+        prefixLength = 64;
+      }];
+    };
+  }
+  extraConfig
+]
diff --git a/nixpkgs/nixos/tests/wireguard/namespaces.nix b/nixpkgs/nixos/tests/wireguard/namespaces.nix
new file mode 100644
index 000000000000..c8a4e3bb52a1
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/namespaces.nix
@@ -0,0 +1,78 @@
+let
+  listenPort = 12345;
+  socketNamespace = "foo";
+  interfaceNamespace = "bar";
+  node = {
+    networking.wireguard.interfaces.wg0 = {
+      listenPort = listenPort;
+      ips = [ "10.10.10.1/24" ];
+      privateKeyFile = "/etc/wireguard/private";
+      generatePrivateKeyFile = true;
+    };
+  };
+
+in
+
+import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "wireguard-with-namespaces";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ asymmetric ];
+  };
+
+  nodes = {
+    # interface should be created in the socketNamespace
+    # and not moved from there
+    peer0 = pkgs.lib.attrsets.recursiveUpdate node {
+      networking.wireguard.interfaces.wg0 = {
+        preSetup = ''
+          ip netns add ${socketNamespace}
+        '';
+        inherit socketNamespace;
+      };
+    };
+    # interface should be created in the init namespace
+    # and moved to the interfaceNamespace
+    peer1 = pkgs.lib.attrsets.recursiveUpdate node {
+      networking.wireguard.interfaces.wg0 = {
+        preSetup = ''
+          ip netns add ${interfaceNamespace}
+        '';
+        inherit interfaceNamespace;
+      };
+    };
+    # interface should be created in the socketNamespace
+    # and moved to the interfaceNamespace
+    peer2 = pkgs.lib.attrsets.recursiveUpdate node {
+      networking.wireguard.interfaces.wg0 = {
+        preSetup = ''
+          ip netns add ${socketNamespace}
+          ip netns add ${interfaceNamespace}
+        '';
+        inherit socketNamespace interfaceNamespace;
+      };
+    };
+    # interface should be created in the socketNamespace
+    # and moved to the init namespace
+    peer3 = pkgs.lib.attrsets.recursiveUpdate node {
+      networking.wireguard.interfaces.wg0 = {
+        preSetup = ''
+          ip netns add ${socketNamespace}
+        '';
+        inherit socketNamespace;
+        interfaceNamespace = "init";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    for machine in peer0, peer1, peer2, peer3:
+        machine.wait_for_unit("wireguard-wg0.service")
+
+    peer0.succeed("ip -n ${socketNamespace} link show wg0")
+    peer1.succeed("ip -n ${interfaceNamespace} link show wg0")
+    peer2.succeed("ip -n ${interfaceNamespace} link show wg0")
+    peer3.succeed("ip link show wg0")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/wireguard/snakeoil-keys.nix b/nixpkgs/nixos/tests/wireguard/snakeoil-keys.nix
new file mode 100644
index 000000000000..55ad582d4059
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/snakeoil-keys.nix
@@ -0,0 +1,11 @@
+{
+  peer0 = {
+    privateKey = "OPuVRS2T0/AtHDp3PXkNuLQYDiqJaBEEnYe42BSnJnQ=";
+    publicKey = "IujkG119YPr2cVQzJkSLYCdjpHIDjvr/qH1w1tdKswY=";
+  };
+
+  peer1 = {
+    privateKey = "uO8JVo/sanx2DOM0L9GUEtzKZ82RGkRnYgpaYc7iXmg=";
+    publicKey = "Ks9yRJIi/0vYgRmn14mIOQRwkcUGBujYINbMpik2SBI=";
+  };
+}
diff --git a/nixpkgs/nixos/tests/wireguard/wg-quick.nix b/nixpkgs/nixos/tests/wireguard/wg-quick.nix
new file mode 100644
index 000000000000..7354dd01a34a
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/wg-quick.nix
@@ -0,0 +1,63 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }:
+  let
+    wg-snakeoil-keys = import ./snakeoil-keys.nix;
+    peer = (import ./make-peer.nix) { inherit lib; };
+  in
+  {
+    name = "wg-quick";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ xwvvvvwx ];
+    };
+
+    nodes = {
+      peer0 = peer {
+        ip4 = "192.168.0.1";
+        ip6 = "fd00::1";
+        extraConfig = {
+          networking.firewall.allowedUDPPorts = [ 23542 ];
+          networking.wg-quick.interfaces.wg0 = {
+            address = [ "10.23.42.1/32" "fc00::1/128" ];
+            listenPort = 23542;
+
+            inherit (wg-snakeoil-keys.peer0) privateKey;
+
+            peers = lib.singleton {
+              allowedIPs = [ "10.23.42.2/32" "fc00::2/128" ];
+
+              inherit (wg-snakeoil-keys.peer1) publicKey;
+            };
+          };
+        };
+      };
+
+      peer1 = peer {
+        ip4 = "192.168.0.2";
+        ip6 = "fd00::2";
+        extraConfig = {
+          networking.wg-quick.interfaces.wg0 = {
+            address = [ "10.23.42.2/32" "fc00::2/128" ];
+            inherit (wg-snakeoil-keys.peer1) privateKey;
+
+            peers = lib.singleton {
+              allowedIPs = [ "0.0.0.0/0" "::/0" ];
+              endpoint = "192.168.0.1:23542";
+              persistentKeepalive = 25;
+
+              inherit (wg-snakeoil-keys.peer0) publicKey;
+            };
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      peer0.wait_for_unit("wg-quick-wg0.service")
+      peer1.wait_for_unit("wg-quick-wg0.service")
+
+      peer1.succeed("ping -c5 fc00::1")
+      peer1.succeed("ping -c5 10.23.42.1")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/wordpress.nix b/nixpkgs/nixos/tests/wordpress.nix
new file mode 100644
index 000000000000..b7449859f7e6
--- /dev/null
+++ b/nixpkgs/nixos/tests/wordpress.nix
@@ -0,0 +1,57 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "wordpress";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [
+      flokli
+      grahamc # under duress!
+      mmilata
+    ];
+  };
+
+  machine =
+    { ... }:
+    { services.httpd.adminAddr = "webmaster@site.local";
+      services.httpd.logPerVirtualHost = true;
+
+      services.wordpress."site1.local" = {
+        database.tablePrefix = "site1_";
+      };
+
+      services.wordpress."site2.local" = {
+        database.tablePrefix = "site2_";
+      };
+
+      networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+    };
+
+  testScript = ''
+    import re
+
+    start_all()
+
+    machine.wait_for_unit("httpd")
+
+    machine.wait_for_unit("phpfpm-wordpress-site1.local")
+    machine.wait_for_unit("phpfpm-wordpress-site2.local")
+
+    site_names = ["site1.local", "site2.local"]
+
+    with subtest("website returns welcome screen"):
+        for site_name in site_names:
+            assert "Welcome to the famous" in machine.succeed(f"curl -L {site_name}")
+
+    with subtest("wordpress-init went through"):
+        for site_name in site_names:
+            info = machine.get_unit_info(f"wordpress-init-{site_name}")
+            assert info["Result"] == "success"
+
+    with subtest("secret keys are set"):
+        pattern = re.compile(r"^define.*NONCE_SALT.{64,};$", re.MULTILINE)
+        for site_name in site_names:
+            assert pattern.search(
+                machine.succeed(f"cat /var/lib/wordpress/{site_name}/secret-keys.php")
+            )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xandikos.nix b/nixpkgs/nixos/tests/xandikos.nix
new file mode 100644
index 000000000000..0fded20ff1a9
--- /dev/null
+++ b/nixpkgs/nixos/tests/xandikos.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+
+    {
+      name = "xandikos";
+
+      meta.maintainers = [ lib.maintainers."0x4A6F" ];
+
+      nodes = {
+        xandikos_client = {};
+        xandikos_default = {
+          networking.firewall.allowedTCPPorts = [ 8080 ];
+          services.xandikos.enable = true;
+        };
+        xandikos_proxy = {
+          networking.firewall.allowedTCPPorts = [ 80 8080 ];
+          services.xandikos.enable = true;
+          services.xandikos.address = "localhost";
+          services.xandikos.port = 8080;
+          services.xandikos.routePrefix = "/xandikos/";
+          services.xandikos.extraOptions = [
+            "--defaults"
+          ];
+          services.nginx = {
+            enable = true;
+            recommendedProxySettings = true;
+            virtualHosts."xandikos" = {
+              serverName = "xandikos.local";
+              basicAuth.xandikos = "snakeOilPassword";
+              locations."/xandikos/" = {
+                proxyPass = "http://localhost:8080/";
+              };
+            };
+          };
+        };
+      };
+
+      testScript = ''
+        start_all()
+
+        with subtest("Xandikos default"):
+            xandikos_default.wait_for_unit("multi-user.target")
+            xandikos_default.wait_for_unit("xandikos.service")
+            xandikos_default.wait_for_open_port(8080)
+            xandikos_default.succeed("curl --fail http://localhost:8080/")
+            xandikos_default.succeed(
+                "curl -s --fail --location http://localhost:8080/ | grep -qi Xandikos"
+            )
+            xandikos_client.wait_for_unit("network.target")
+            xandikos_client.fail("curl --fail http://xandikos_default:8080/")
+
+        with subtest("Xandikos proxy"):
+            xandikos_proxy.wait_for_unit("multi-user.target")
+            xandikos_proxy.wait_for_unit("xandikos.service")
+            xandikos_proxy.wait_for_open_port(8080)
+            xandikos_proxy.succeed("curl --fail http://localhost:8080/")
+            xandikos_proxy.succeed(
+                "curl -s --fail --location http://localhost:8080/ | grep -qi Xandikos"
+            )
+            xandikos_client.wait_for_unit("network.target")
+            xandikos_client.fail("curl --fail http://xandikos_proxy:8080/")
+            xandikos_client.succeed(
+                "curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/ | grep -qi Xandikos"
+            )
+            xandikos_client.succeed(
+                "curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/user/ | grep -qi Xandikos"
+            )
+      '';
+    }
+)
diff --git a/nixpkgs/nixos/tests/xautolock.nix b/nixpkgs/nixos/tests/xautolock.nix
new file mode 100644
index 000000000000..4a8d3f4cebf7
--- /dev/null
+++ b/nixpkgs/nixos/tests/xautolock.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+with lib;
+
+{
+  name = "xautolock";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ ma27 ];
+
+  nodes.machine = {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+
+    test-support.displayManager.auto.user = "bob";
+    services.xserver.xautolock.enable = true;
+    services.xserver.xautolock.time = 1;
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_x()
+    machine.fail("pgrep xlock")
+    machine.sleep(120)
+    machine.succeed("pgrep xlock")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xfce.nix b/nixpkgs/nixos/tests/xfce.nix
new file mode 100644
index 000000000000..99065669661a
--- /dev/null
+++ b/nixpkgs/nixos/tests/xfce.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "xfce";
+
+  machine =
+    { pkgs, ... }:
+
+    {
+      imports = [
+        ./common/user-account.nix
+      ];
+
+      services.xserver.enable = true;
+
+      services.xserver.displayManager.lightdm = {
+        enable = true;
+        autoLogin = {
+          enable = true;
+          user = "alice";
+        };
+      };
+
+      services.xserver.desktopManager.xfce.enable = true;
+
+      hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+
+      virtualisation.memorySize = 1024;
+    };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+      machine.wait_for_x()
+      machine.wait_for_file("${user.home}/.Xauthority")
+      machine.succeed("xauth merge ${user.home}/.Xauthority")
+      machine.wait_for_window("xfce4-panel")
+      machine.sleep(10)
+
+      # Check that logging in has given the user ownership of devices.
+      machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+      machine.succeed("su - ${user.name} -c 'DISPLAY=:0.0 xfce4-terminal &'")
+      machine.wait_for_window("Terminal")
+      machine.sleep(10)
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/xmonad.nix b/nixpkgs/nixos/tests/xmonad.nix
new file mode 100644
index 000000000000..56baae8b9d3c
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmonad.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "xmonad";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  machine = { pkgs, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = "none+xmonad";
+    services.xserver.windowManager.xmonad = {
+      enable = true;
+      enableContribAndExtras = true;
+      extraPackages = with pkgs.haskellPackages; haskellPackages: [ xmobar ];
+      config = ''
+        import XMonad
+        import XMonad.Util.EZConfig
+        main = launch $ def `additionalKeysP` myKeys
+        myKeys = [ ("M-C-x", spawn "xterm") ]
+      '';
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    machine.wait_for_x()
+    machine.wait_for_file("${user.home}/.Xauthority")
+    machine.succeed("xauth merge ${user.home}/.Xauthority")
+    machine.send_key("alt-ctrl-x")
+    machine.wait_for_window("${user.name}.*machine")
+    machine.sleep(1)
+    machine.screenshot("terminal")
+    machine.wait_until_succeeds("xmonad --restart")
+    machine.sleep(3)
+    machine.send_key("alt-shift-ret")
+    machine.wait_for_window("${user.name}.*machine")
+    machine.sleep(1)
+    machine.screenshot("terminal")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xmpp/ejabberd.nix b/nixpkgs/nixos/tests/xmpp/ejabberd.nix
new file mode 100644
index 000000000000..1518aaacc8ab
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmpp/ejabberd.nix
@@ -0,0 +1,268 @@
+import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "ejabberd";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ajs124 ];
+  };
+  nodes = {
+    client = { nodes, pkgs, ... }: {
+      environment.systemPackages = [
+        (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
+      ];
+    };
+    server = { config, pkgs, ... }: {
+      networking.extraHosts = ''
+        ${config.networking.primaryIPAddress} example.com
+      '';
+
+      services.ejabberd = {
+        enable = true;
+        configFile = "/etc/ejabberd.yml";
+      };
+
+      environment.etc."ejabberd.yml" = {
+        user = "ejabberd";
+        mode = "0600";
+        text = ''
+          loglevel: 3
+
+          hosts:
+            - "example.com"
+
+          listen:
+            -
+              port: 5222
+              module: ejabberd_c2s
+              zlib: false
+              max_stanza_size: 65536
+              shaper: c2s_shaper
+              access: c2s
+            -
+              port: 5269
+              ip: "::"
+              module: ejabberd_s2s_in
+            -
+              port: 5347
+              ip: "127.0.0.1"
+              module: ejabberd_service
+              access: local
+              shaper: fast
+
+          ## Disabling digest-md5 SASL authentication. digest-md5 requires plain-text
+          ## password storage (see auth_password_format option).
+          disable_sasl_mechanisms: "digest-md5"
+
+          ## Outgoing S2S options
+          ## Preferred address families (which to try first) and connect timeout
+          ## in seconds.
+          outgoing_s2s_families:
+             - ipv4
+             - ipv6
+
+          ## auth_method: Method used to authenticate the users.
+          ## The default method is the internal.
+          ## If you want to use a different method,
+          ## comment this line and enable the correct ones.
+          auth_method: internal
+
+          ## Store the plain passwords or hashed for SCRAM:
+          ## auth_password_format: plain
+          auth_password_format: scram
+
+          ###'  TRAFFIC SHAPERS
+          shaper:
+            # in B/s
+            normal: 1000000
+            fast: 50000000
+
+          ## This option specifies the maximum number of elements in the queue
+          ## of the FSM. Refer to the documentation for details.
+          max_fsm_queue: 1000
+
+          ###'   ACCESS CONTROL LISTS
+          acl:
+            ## The 'admin' ACL grants administrative privileges to XMPP accounts.
+            ## You can put here as many accounts as you want.
+            admin:
+               user:
+                 - "root": "example.com"
+
+            ## Local users: don't modify this.
+            local:
+              user_regexp: ""
+
+            ## Loopback network
+            loopback:
+              ip:
+                - "127.0.0.0/8"
+                - "::1/128"
+                - "::FFFF:127.0.0.1/128"
+
+          ###'  SHAPER RULES
+          shaper_rules:
+            ## Maximum number of simultaneous sessions allowed for a single user:
+            max_user_sessions: 10
+            ## Maximum number of offline messages that users can have:
+            max_user_offline_messages:
+              - 5000: admin
+              - 1024
+            ## For C2S connections, all users except admins use the "normal" shaper
+            c2s_shaper:
+              - none: admin
+              - normal
+            ## All S2S connections use the "fast" shaper
+            s2s_shaper: fast
+
+          ###'  ACCESS RULES
+          access_rules:
+            ## This rule allows access only for local users:
+            local:
+              - allow: local
+            ## Only non-blocked users can use c2s connections:
+            c2s:
+              - deny: blocked
+              - allow
+            ## Only admins can send announcement messages:
+            announce:
+              - allow: admin
+            ## Only admins can use the configuration interface:
+            configure:
+              - allow: admin
+            ## Only accounts of the local ejabberd server can create rooms:
+            muc_create:
+              - allow: local
+            ## Only accounts on the local ejabberd server can create Pubsub nodes:
+            pubsub_createnode:
+              - allow: local
+            ## In-band registration allows registration of any possible username.
+            ## To disable in-band registration, replace 'allow' with 'deny'.
+            register:
+              - allow
+            ## Only allow to register from localhost
+            trusted_network:
+              - allow: loopback
+
+          ## ===============
+          ## API PERMISSIONS
+          ## ===============
+          ##
+          ## This section allows you to define who and using what method
+          ## can execute commands offered by ejabberd.
+          ##
+          ## By default "console commands" section allow executing all commands
+          ## issued using ejabberdctl command, and "admin access" section allows
+          ## users in admin acl that connect from 127.0.0.1 to  execute all
+          ## commands except start and stop with any available access method
+          ## (ejabberdctl, http-api, xmlrpc depending what is enabled on server).
+          ##
+          ## If you remove "console commands" there will be one added by
+          ## default allowing executing all commands, but if you just change
+          ## permissions in it, version from config file will be used instead
+          ## of default one.
+          ##
+          api_permissions:
+            "console commands":
+              from:
+                - ejabberd_ctl
+              who: all
+              what: "*"
+
+          language: "en"
+
+          ###'  MODULES
+          ## Modules enabled in all ejabberd virtual hosts.
+          modules:
+            mod_adhoc: {}
+            mod_announce: # recommends mod_adhoc
+              access: announce
+            mod_blocking: {} # requires mod_privacy
+            mod_caps: {}
+            mod_carboncopy: {}
+            mod_client_state: {}
+            mod_configure: {} # requires mod_adhoc
+            ## mod_delegation: {} # for xep0356
+            #mod_irc:
+            #  host: "irc.@HOST@"
+            #  default_encoding: "utf-8"
+            ## mod_bosh: {}
+            ## mod_http_fileserver:
+            ##   docroot: "/var/www"
+            ##   accesslog: "/var/log/ejabberd/access.log"
+            #mod_http_upload:
+            #  thumbnail: false # otherwise needs the identify command from ImageMagick installed
+            #  put_url: "https://@HOST@:5444"
+            ##   # docroot: "@HOME@/upload"
+            #mod_http_upload_quota:
+            #  max_days: 14
+            mod_last: {}
+            ## XEP-0313: Message Archive Management
+            ## You might want to setup a SQL backend for MAM because the mnesia database is
+            ## limited to 2GB which might be exceeded on large servers
+            mod_mam: {}
+            mod_muc:
+              host: "muc.@HOST@"
+              access:
+                - allow
+              access_admin:
+                - allow: admin
+              access_create: muc_create
+              access_persistent: muc_create
+            mod_muc_admin: {}
+            mod_muc_log: {}
+            mod_offline:
+              access_max_user_messages: max_user_offline_messages
+            mod_ping: {}
+            ## mod_pres_counter:
+            ##   count: 5
+            ##   interval: 60
+            mod_privacy: {}
+            mod_private: {}
+            mod_roster:
+                versioning: true
+            mod_shared_roster: {}
+            mod_stats: {}
+            mod_time: {}
+            mod_vcard:
+              search: false
+            mod_vcard_xupdate: {}
+            ## Convert all avatars posted by Android clients from WebP to JPEG
+            mod_avatar: {}
+            #  convert:
+            #    webp: jpeg
+            mod_version: {}
+            mod_stream_mgmt: {}
+            ##   The module for S2S dialback (XEP-0220). Please note that you cannot
+            ##   rely solely on dialback if you want to federate with other servers,
+            ##   because a lot of servers have dialback disabled and instead rely on
+            ##   PKIX authentication. Make sure you have proper certificates installed
+            ##   and check your accessibility at https://check.messaging.one/
+            mod_s2s_dialback: {}
+            mod_pubsub:
+              plugins:
+                - "pep"
+            mod_push: {}
+        '';
+      };
+
+      networking.firewall.enable = false;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    ejabberd_prefix = "su ejabberd -s $(which ejabberdctl) "
+
+    server.wait_for_unit("ejabberd.service")
+
+    assert "status: started" in server.succeed(ejabberd_prefix + "status")
+
+    server.succeed(
+        ejabberd_prefix + "register azurediamond example.com hunter2",
+        ejabberd_prefix + "register cthon98 example.com nothunter2",
+    )
+    server.fail(ejabberd_prefix + "register asdf wrong.domain")
+    client.succeed("send-message")
+    server.succeed(
+        ejabberd_prefix + "unregister cthon98 example.com",
+        ejabberd_prefix + "unregister azurediamond example.com",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xmpp/prosody-mysql.nix b/nixpkgs/nixos/tests/xmpp/prosody-mysql.nix
new file mode 100644
index 000000000000..0507227021b2
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmpp/prosody-mysql.nix
@@ -0,0 +1,77 @@
+import ../make-test-python.nix {
+  name = "prosody-mysql";
+
+  nodes = {
+    client = { nodes, pkgs, ... }: {
+      environment.systemPackages = [
+        (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
+      ];
+    };
+    server = { config, pkgs, ... }: {
+      nixpkgs.overlays = [
+        (self: super: {
+          prosody = super.prosody.override {
+            withDBI = true;
+            withExtraLibs = [ pkgs.luaPackages.luadbi-mysql ];
+          };
+        })
+      ];
+      networking.extraHosts = ''
+        ${config.networking.primaryIPAddress} example.com
+      '';
+      networking.firewall.enable = false;
+      services.prosody = {
+        enable = true;
+        # TODO: use a self-signed certificate
+        c2sRequireEncryption = false;
+        extraConfig = ''
+          storage = "sql"
+          sql = {
+            driver = "MySQL";
+            database = "prosody";
+            host = "mysql";
+            port = 3306;
+            username = "prosody";
+            password = "password123";
+          };
+        '';
+        virtualHosts.test = {
+          domain = "example.com";
+          enabled = true;
+        };
+      };
+    };
+    mysql = { config, pkgs, ... }: {
+      networking.firewall.enable = false;
+      services.mysql = {
+        enable = true;
+        initialScript = pkgs.writeText "mysql_init.sql" ''
+          CREATE DATABASE prosody;
+          CREATE USER 'prosody'@'server' IDENTIFIED BY 'password123';
+          GRANT ALL PRIVILEGES ON prosody.* TO 'prosody'@'server';
+          FLUSH PRIVILEGES;
+        '';
+        package = pkgs.mariadb;
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    mysql.wait_for_unit("mysql.service")
+    server.wait_for_unit("prosody.service")
+    server.succeed('prosodyctl status | grep "Prosody is running"')
+
+    # set password to 'nothunter2' (it's asked twice)
+    server.succeed("yes nothunter2 | prosodyctl adduser cthon98@example.com")
+    # set password to 'y'
+    server.succeed("yes | prosodyctl adduser azurediamond@example.com")
+    # correct password to 'hunter2'
+    server.succeed("yes hunter2 | prosodyctl passwd azurediamond@example.com")
+
+    client.succeed("send-message")
+
+    server.succeed("prosodyctl deluser cthon98@example.com")
+    server.succeed("prosodyctl deluser azurediamond@example.com")
+  '';
+}
+
diff --git a/nixpkgs/nixos/tests/xmpp/prosody.nix b/nixpkgs/nixos/tests/xmpp/prosody.nix
new file mode 100644
index 000000000000..9d1374bff6bd
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmpp/prosody.nix
@@ -0,0 +1,46 @@
+import ../make-test-python.nix {
+  name = "prosody";
+
+  nodes = {
+    client = { nodes, pkgs, ... }: {
+      environment.systemPackages = [
+        (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
+      ];
+    };
+    server = { config, pkgs, ... }: {
+      networking.extraHosts = ''
+        ${config.networking.primaryIPAddress} example.com
+      '';
+      networking.firewall.enable = false;
+      services.prosody = {
+        enable = true;
+        # TODO: use a self-signed certificate
+        c2sRequireEncryption = false;
+        extraConfig = ''
+          storage = "sql"
+        '';
+        virtualHosts.test = {
+          domain = "example.com";
+          enabled = true;
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    server.wait_for_unit("prosody.service")
+    server.succeed('prosodyctl status | grep "Prosody is running"')
+
+    # set password to 'nothunter2' (it's asked twice)
+    server.succeed("yes nothunter2 | prosodyctl adduser cthon98@example.com")
+    # set password to 'y'
+    server.succeed("yes | prosodyctl adduser azurediamond@example.com")
+    # correct password to "hunter2"
+    server.succeed("yes hunter2 | prosodyctl passwd azurediamond@example.com")
+
+    client.succeed("send-message")
+
+    server.succeed("prosodyctl deluser cthon98@example.com")
+    server.succeed("prosodyctl deluser azurediamond@example.com")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/xmpp/xmpp-sendmessage.nix b/nixpkgs/nixos/tests/xmpp/xmpp-sendmessage.nix
new file mode 100644
index 000000000000..2a075a018134
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmpp/xmpp-sendmessage.nix
@@ -0,0 +1,46 @@
+{ writeScriptBin, python3, connectTo ? "localhost" }:
+writeScriptBin "send-message" ''
+  #!${(python3.withPackages (ps: [ ps.sleekxmpp ])).interpreter}
+  # Based on the sleekxmpp send_client example, look there for more details:
+  # https://github.com/fritzy/SleekXMPP/blob/develop/examples/send_client.py
+  import sleekxmpp
+
+  class SendMsgBot(sleekxmpp.ClientXMPP):
+      """
+      A basic SleekXMPP bot that will log in, send a message,
+      and then log out.
+      """
+      def __init__(self, jid, password, recipient, message):
+          sleekxmpp.ClientXMPP.__init__(self, jid, password)
+
+          self.recipient = recipient
+          self.msg = message
+
+          self.add_event_handler("session_start", self.start, threaded=True)
+
+      def start(self, event):
+          self.send_presence()
+          self.get_roster()
+
+          self.send_message(mto=self.recipient,
+                            mbody=self.msg,
+                            mtype='chat')
+
+          self.disconnect(wait=True)
+
+
+  if __name__ == '__main__':
+      xmpp = SendMsgBot("cthon98@example.com", "nothunter2", "azurediamond@example.com", "hey, if you type in your pw, it will show as stars")
+      xmpp.register_plugin('xep_0030') # Service Discovery
+      xmpp.register_plugin('xep_0199') # XMPP Ping
+
+      # TODO: verify certificate
+      # If you want to verify the SSL certificates offered by a server:
+      # xmpp.ca_certs = "path/to/ca/cert"
+
+      if xmpp.connect(('${connectTo}', 5222)):
+          xmpp.process(block=True)
+      else:
+          print("Unable to connect.")
+          sys.exit(1)
+''
diff --git a/nixpkgs/nixos/tests/xrdp.nix b/nixpkgs/nixos/tests/xrdp.nix
new file mode 100644
index 000000000000..6d7f2b9249ff
--- /dev/null
+++ b/nixpkgs/nixos/tests/xrdp.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "xrdp";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ volth ];
+  };
+
+  nodes = {
+    server = { pkgs, ... }: {
+      imports = [ ./common/user-account.nix ];
+      services.xrdp.enable = true;
+      services.xrdp.defaultWindowManager = "${pkgs.xterm}/bin/xterm";
+      networking.firewall.allowedTCPPorts = [ 3389 ];
+    };
+
+    client = { pkgs, ... }: {
+      imports = [ ./common/x11.nix ./common/user-account.nix ];
+      test-support.displayManager.auto.user = "alice";
+      environment.systemPackages = [ pkgs.freerdp ];
+      services.xrdp.enable = true;
+      services.xrdp.defaultWindowManager = "${pkgs.icewm}/bin/icewm";
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.client.config.users.users.alice;
+  in ''
+    start_all()
+
+    client.wait_for_x()
+    client.wait_for_file("${user.home}/.Xauthority")
+    client.succeed("xauth merge ${user.home}/.Xauthority")
+
+    client.sleep(5)
+
+    client.execute("xterm &")
+    client.sleep(1)
+    client.send_chars("xfreerdp /cert-tofu /w:640 /h:480 /v:127.0.0.1 /u:${user.name} /p:${user.password}\n")
+    client.sleep(5)
+    client.screenshot("localrdp")
+
+    client.execute("xterm &")
+    client.sleep(1)
+    client.send_chars("xfreerdp /cert-tofu /w:640 /h:480 /v:server /u:${user.name} /p:${user.password}\n")
+    client.sleep(5)
+    client.screenshot("remoterdp")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xss-lock.nix b/nixpkgs/nixos/tests/xss-lock.nix
new file mode 100644
index 000000000000..b77bbbbb3c4e
--- /dev/null
+++ b/nixpkgs/nixos/tests/xss-lock.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+with lib;
+
+{
+  name = "xss-lock";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ ma27 ];
+
+  nodes = {
+    simple = {
+      imports = [ ./common/x11.nix ./common/user-account.nix ];
+      programs.xss-lock.enable = true;
+      test-support.displayManager.auto.user = "alice";
+    };
+
+    custom_lockcmd = { pkgs, ... }: {
+      imports = [ ./common/x11.nix ./common/user-account.nix ];
+      test-support.displayManager.auto.user = "alice";
+
+      programs.xss-lock = {
+        enable = true;
+        extraOptions = [ "-n" "${pkgs.libnotify}/bin/notify-send 'About to sleep!'"];
+        lockerCommand = "${pkgs.xlockmore}/bin/xlock -mode ant";
+      };
+    };
+  };
+
+  testScript = ''
+    def perform_xsslock_test(machine, lockCmd):
+        machine.start()
+        machine.wait_for_x()
+        machine.wait_for_unit("xss-lock.service", "alice")
+        machine.fail(f"pgrep {lockCmd}")
+        machine.succeed("su -l alice -c 'xset dpms force standby'")
+        machine.wait_until_succeeds(f"pgrep {lockCmd}")
+
+
+    with subtest("simple"):
+        perform_xsslock_test(simple, "i3lock")
+
+    with subtest("custom_cmd"):
+        perform_xsslock_test(custom_lockcmd, "xlock")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/yabar.nix b/nixpkgs/nixos/tests/yabar.nix
new file mode 100644
index 000000000000..b374ef296807
--- /dev/null
+++ b/nixpkgs/nixos/tests/yabar.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+with lib;
+
+{
+  name = "yabar";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ma27 ];
+  };
+
+  machine = {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+
+    test-support.displayManager.auto.user = "bob";
+
+    programs.yabar.enable = true;
+    programs.yabar.bars = {
+      top.indicators.date.exec = "YABAR_DATE";
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_x()
+
+    # confirm proper startup
+    machine.wait_for_unit("yabar.service", "bob")
+    machine.sleep(10)
+    machine.wait_for_unit("yabar.service", "bob")
+
+    machine.screenshot("top_bar")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/yggdrasil.nix b/nixpkgs/nixos/tests/yggdrasil.nix
new file mode 100644
index 000000000000..468fcf671274
--- /dev/null
+++ b/nixpkgs/nixos/tests/yggdrasil.nix
@@ -0,0 +1,125 @@
+let
+  aliceIp6 = "200:3b91:b2d8:e708:fbf3:f06:fdd5:90d0";
+  aliceKeys = {
+    EncryptionPublicKey = "13e23986fe76bc3966b42453f479bc563348b7ff76633b7efcb76e185ec7652f";
+    EncryptionPrivateKey = "9f86947b15e86f9badac095517a1982e39a2db37ca726357f95987b898d82208";
+    SigningPublicKey = "e2c43349083bc1e998e4ec4535b4c6a8f44ca9a5a8e07336561267253b2be5f4";
+    SigningPrivateKey = "fe3add8da35316c05f6d90d3ca79bd2801e6ccab6d37e5339fef4152589398abe2c43349083bc1e998e4ec4535b4c6a8f44ca9a5a8e07336561267253b2be5f4";
+  };
+  bobIp6 = "201:ebbd:bde9:f138:c302:4afa:1fb6:a19a";
+  bobConfig = {
+    InterfacePeers = {
+      eth1 = [ "tcp://192.168.1.200:12345" ];
+    };
+    MulticastInterfaces = [ "eth1" ];
+    LinkLocalTCPPort = 54321;
+    EncryptionPublicKey = "c99d6830111e12d1b004c52fe9e5a2eef0f6aefca167aca14589a370b7373279";
+    EncryptionPrivateKey = "2e698a53d3fdce5962d2ff37de0fe77742a5c8b56cd8259f5da6aa792f6e8ba3";
+    SigningPublicKey = "de111da0ec781e45bf6c63ecb45a78c24d7d4655abfaeea83b26c36eb5c0fd5b";
+    SigningPrivateKey = "2a6c21550f3fca0331df50668ffab66b6dce8237bcd5728e571e8033b363e247de111da0ec781e45bf6c63ecb45a78c24d7d4655abfaeea83b26c36eb5c0fd5b";
+  };
+
+in import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "yggdrasil";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ gazally ];
+  };
+
+  nodes = rec {
+    # Alice is listening for peerings on a specified port,
+    # but has multicast peering disabled.  Alice has part of her
+    # yggdrasil config in Nix and part of it in a file.
+    alice =
+      { ... }:
+      {
+        networking = {
+          interfaces.eth1.ipv4.addresses = [{
+            address = "192.168.1.200";
+            prefixLength = 24;
+          }];
+          firewall.allowedTCPPorts = [ 80 12345 ];
+        };
+        services.httpd.enable = true;
+        services.httpd.adminAddr = "foo@example.org";
+
+        services.yggdrasil = {
+          enable = true;
+          config = {
+            Listen = ["tcp://0.0.0.0:12345"];
+            MulticastInterfaces = [ ];
+          };
+          configFile = toString (pkgs.writeTextFile {
+                         name = "yggdrasil-alice-conf";
+                         text = builtins.toJSON aliceKeys;
+                       });
+        };
+      };
+
+    # Bob is set up to peer with Alice, and also to do local multicast
+    # peering.  Bob's yggdrasil config is in a file.
+    bob =
+      { ... }:
+      {
+        networking.firewall.allowedTCPPorts = [ 54321 ];
+        services.yggdrasil = {
+          enable = true;
+          openMulticastPort = true;
+          configFile = toString (pkgs.writeTextFile {
+                         name = "yggdrasil-bob-conf";
+                         text = builtins.toJSON bobConfig;
+                       });
+        };
+      };
+
+    # Carol only does local peering.  Carol's yggdrasil config is all Nix.
+    carol =
+      { ... }:
+      {
+        networking.firewall.allowedTCPPorts = [ 43210 ];
+        services.yggdrasil = {
+          enable = true;
+          denyDhcpcdInterfaces = [ "ygg0" ];
+          config = {
+            IfTAPMode = true;
+            IfName = "ygg0";
+            MulticastInterfaces = [ "eth1" ];
+            LinkLocalTCPPort = 43210;
+          };
+        };
+      };
+    };
+
+  testScript =
+    ''
+      import re
+
+      # Give Alice a head start so she is ready when Bob calls.
+      alice.start()
+      alice.wait_for_unit("yggdrasil.service")
+
+      bob.start()
+      carol.start()
+      bob.wait_for_unit("yggdrasil.service")
+      carol.wait_for_unit("yggdrasil.service")
+
+      ip_addr_show = "ip -o -6 addr show dev ygg0 scope global"
+      carol.wait_until_succeeds(f"[ `{ip_addr_show} | grep -v tentative | wc -l` -ge 1 ]")
+      carol_ip6 = re.split(" +|/", carol.succeed(ip_addr_show))[3]
+
+      # If Alice can talk to Carol, then Bob's outbound peering and Carol's
+      # local peering have succeeded and everybody is connected.
+      alice.wait_until_succeeds(f"ping -c 1 {carol_ip6}")
+      alice.succeed(f"ping -c 1 ${bobIp6}")
+
+      bob.succeed("ping -c 1 ${aliceIp6}")
+      bob.succeed(f"ping -c 1 {carol_ip6}")
+
+      carol.succeed("ping -c 1 ${aliceIp6}")
+      carol.succeed("ping -c 1 ${bobIp6}")
+
+      carol.fail("journalctl -u dhcpcd | grep ygg0")
+
+      alice.wait_for_unit("httpd.service")
+      carol.succeed("curl --fail -g http://[${aliceIp6}]")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/zfs.nix b/nixpkgs/nixos/tests/zfs.nix
new file mode 100644
index 000000000000..7ba60ee9806c
--- /dev/null
+++ b/nixpkgs/nixos/tests/zfs.nix
@@ -0,0 +1,75 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+
+  makeZfsTest = name:
+    { kernelPackage ? pkgs.linuxPackages_latest
+    , enableUnstable ? false
+    , extraTest ? ""
+    }:
+    makeTest {
+      name = "zfs-" + name;
+      meta = with pkgs.stdenv.lib.maintainers; {
+        maintainers = [ adisbladis ];
+      };
+
+      machine = { pkgs, ... }: {
+        virtualisation.emptyDiskImages = [ 4096 ];
+        networking.hostId = "deadbeef";
+        boot.kernelPackages = kernelPackage;
+        boot.supportedFilesystems = [ "zfs" ];
+        boot.zfs.enableUnstable = enableUnstable;
+
+        environment.systemPackages = [ pkgs.parted ];
+      };
+
+      testScript = ''
+        machine.succeed(
+            "modprobe zfs",
+            "zpool status",
+            "ls /dev",
+            "mkdir /tmp/mnt",
+            "udevadm settle",
+            "parted --script /dev/vdb mklabel msdos",
+            "parted --script /dev/vdb -- mkpart primary 1024M -1s",
+            "udevadm settle",
+            "zpool create rpool /dev/vdb1",
+            "zfs create -o mountpoint=legacy rpool/root",
+            "mount -t zfs rpool/root /tmp/mnt",
+            "udevadm settle",
+            "umount /tmp/mnt",
+            "zpool destroy rpool",
+            "udevadm settle",
+        )
+      '' + extraTest;
+
+    };
+
+
+in {
+
+  stable = makeZfsTest "stable" { };
+
+  unstable = makeZfsTest "unstable" {
+    enableUnstable = true;
+    extraTest = ''
+      machine.succeed(
+          'echo password | zpool create -o altroot="/tmp/mnt" '
+          + "-O encryption=aes-256-gcm -O keyformat=passphrase rpool /dev/vdb1",
+          "zfs create -o mountpoint=legacy rpool/root",
+          "mount -t zfs rpool/root /tmp/mnt",
+          "udevadm settle",
+          "umount /tmp/mnt",
+          "zpool destroy rpool",
+          "udevadm settle",
+      )
+    '';
+  };
+
+  installer = (import ./installer.nix { }).zfsroot;
+}
diff --git a/nixpkgs/nixos/tests/zookeeper.nix b/nixpkgs/nixos/tests/zookeeper.nix
new file mode 100644
index 000000000000..42cf20b39c52
--- /dev/null
+++ b/nixpkgs/nixos/tests/zookeeper.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "zookeeper";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  nodes = {
+    server = { ... }: {
+      services.zookeeper = {
+        enable = true;
+      };
+
+      networking.firewall.allowedTCPPorts = [ 2181 ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("zookeeper")
+    server.wait_for_unit("network.target")
+    server.wait_for_open_port(2181)
+
+    server.wait_until_succeeds(
+        "${pkgs.zookeeper}/bin/zkCli.sh -server localhost:2181 create /foo bar"
+    )
+    server.wait_until_succeeds(
+        "${pkgs.zookeeper}/bin/zkCli.sh -server localhost:2181 set /foo hello"
+    )
+    server.wait_until_succeeds(
+        "${pkgs.zookeeper}/bin/zkCli.sh -server localhost:2181 get /foo | grep hello"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/zsh-history.nix b/nixpkgs/nixos/tests/zsh-history.nix
new file mode 100644
index 000000000000..4380ec9adfd2
--- /dev/null
+++ b/nixpkgs/nixos/tests/zsh-history.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "zsh-history";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ kampka ];
+  };
+
+  nodes.default = { ... }: {
+    programs = {
+      zsh.enable = true;
+    };
+    environment.systemPackages = [ pkgs.zsh-history ];
+    programs.zsh.interactiveShellInit = ''
+      source ${pkgs.zsh-history.out}/share/zsh/init.zsh
+    '';
+    users.users.root.shell = "${pkgs.zsh}/bin/zsh";
+  };
+
+  testScript = ''
+    start_all()
+    default.wait_for_unit("multi-user.target")
+    default.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+
+    # Login
+    default.wait_until_tty_matches(1, "login: ")
+    default.send_chars("root\n")
+    default.wait_until_tty_matches(1, "root@default>")
+
+    # Generate some history
+    default.send_chars("echo foobar\n")
+    default.wait_until_tty_matches(1, "foobar")
+
+    # Ensure that command was recorded in history
+    default.succeed("/run/current-system/sw/bin/history list | grep -q foobar")
+  '';
+})