about summary refs log tree commit diff
path: root/nixos/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/tests')
-rw-r--r--nixos/tests/acme.nix5
-rw-r--r--nixos/tests/all-tests.nix24
-rw-r--r--nixos/tests/chromium.nix2
-rw-r--r--nixos/tests/doas.nix83
-rw-r--r--nixos/tests/docker-containers.nix27
-rw-r--r--nixos/tests/dokuwiki.nix83
-rw-r--r--nixos/tests/ec2.nix2
-rw-r--r--nixos/tests/elk.nix1
-rw-r--r--nixos/tests/enlightenment.nix101
-rw-r--r--nixos/tests/gitdaemon.nix3
-rw-r--r--nixos/tests/google-oslogin/default.nix18
-rw-r--r--nixos/tests/google-oslogin/server.py83
-rw-r--r--nixos/tests/installer.nix30
-rw-r--r--nixos/tests/ipfs.nix58
-rw-r--r--nixos/tests/ldap.nix405
-rw-r--r--nixos/tests/mediawiki.nix7
-rw-r--r--nixos/tests/minio.nix2
-rw-r--r--nixos/tests/mysql/mariadb-galera-mariabackup.nix223
-rw-r--r--nixos/tests/mysql/mariadb-galera-rsync.nix216
-rw-r--r--nixos/tests/mysql/mysql-autobackup.nix (renamed from nixos/tests/automysqlbackup.nix)2
-rw-r--r--nixos/tests/mysql/mysql-backup.nix (renamed from nixos/tests/mysql-backup.nix)2
-rw-r--r--nixos/tests/mysql/mysql-replication.nix (renamed from nixos/tests/mysql-replication.nix)2
-rw-r--r--nixos/tests/mysql/mysql.nix (renamed from nixos/tests/mysql.nix)2
-rw-r--r--nixos/tests/mysql/testdb.sql (renamed from nixos/tests/testdb.sql)0
-rw-r--r--nixos/tests/nginx-pubhtml.nix1
-rw-r--r--nixos/tests/nginx-sandbox.nix66
-rw-r--r--nixos/tests/oci-containers.nix43
-rw-r--r--nixos/tests/partition.nix247
-rw-r--r--nixos/tests/php/default.nix1
-rw-r--r--nixos/tests/php/fpm.nix14
-rw-r--r--nixos/tests/php/httpd.nix31
-rw-r--r--nixos/tests/php/pcre.nix10
-rw-r--r--nixos/tests/podman.nix60
-rw-r--r--nixos/tests/privacyidea.nix36
-rw-r--r--nixos/tests/prometheus.nix2
-rw-r--r--nixos/tests/redmine.nix70
-rw-r--r--nixos/tests/service-runner.nix2
-rw-r--r--nixos/tests/systemd-boot.nix31
-rw-r--r--nixos/tests/systemd-confinement.nix135
-rw-r--r--nixos/tests/systemd-networkd-dhcpserver.nix58
-rw-r--r--nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix295
-rw-r--r--nixos/tests/udisks2.nix2
-rw-r--r--nixos/tests/web-servers/unit-php.nix47
-rw-r--r--nixos/tests/xmpp/prosody-mysql.nix15
-rw-r--r--nixos/tests/xmpp/prosody.nix85
-rw-r--r--nixos/tests/xmpp/xmpp-sendmessage.nix105
46 files changed, 1747 insertions, 990 deletions
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index 693f02962f45..fc41dc1eb5ff 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -12,8 +12,9 @@ let
     fi
   '';
 
-in import ./make-test-python.nix {
+in import ./make-test-python.nix ({ lib, ... }: {
   name = "acme";
+  meta.maintainers = lib.teams.acme.members;
 
   nodes = rec {
     acme = { nodes, lib, ... }: {
@@ -207,4 +208,4 @@ in import ./make-test-python.nix {
               "curl --cacert /tmp/ca.crt https://c.example.test/ | grep -qF 'hello world'"
           )
     '';
-}
+})
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 30229a3a5b2f..f3e90f9bfa70 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -24,7 +24,6 @@ in
   _3proxy = handleTest ./3proxy.nix {};
   acme = handleTest ./acme.nix {};
   atd = handleTest ./atd.nix {};
-  automysqlbackup = handleTest ./automysqlbackup.nix {};
   avahi = handleTest ./avahi.nix {};
   babeld = handleTest ./babeld.nix {};
   bcachefs = handleTestOn ["x86_64-linux"] ./bcachefs.nix {}; # linux-4.18.2018.10.12 is unsupported on aarch64
@@ -69,8 +68,9 @@ in
   deluge = handleTest ./deluge.nix {};
   dhparams = handleTest ./dhparams.nix {};
   dnscrypt-proxy2 = handleTestOn ["x86_64-linux"] ./dnscrypt-proxy2.nix {};
+  doas = handleTest ./doas.nix {};
   docker = handleTestOn ["x86_64-linux"] ./docker.nix {};
-  docker-containers = handleTestOn ["x86_64-linux"] ./docker-containers.nix {};
+  oci-containers = handleTestOn ["x86_64-linux"] ./oci-containers.nix {};
   docker-edge = handleTestOn ["x86_64-linux"] ./docker-edge.nix {};
   docker-preloader = handleTestOn ["x86_64-linux"] ./docker-preloader.nix {};
   docker-registry = handleTest ./docker-registry.nix {};
@@ -85,6 +85,7 @@ in
   ecryptfs = handleTest ./ecryptfs.nix {};
   ejabberd = handleTest ./xmpp/ejabberd.nix {};
   elk = handleTestOn ["x86_64-linux"] ./elk.nix {};
+  enlightenment = handleTest ./enlightenment.nix {};
   env = handleTest ./env.nix {};
   etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
   etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
@@ -143,6 +144,7 @@ in
   initrdNetwork = handleTest ./initrd-network.nix {};
   installer = handleTest ./installer.nix {};
   iodine = handleTest ./iodine.nix {};
+  ipfs = handleTest ./ipfs.nix {};
   ipv6 = handleTest ./ipv6.nix {};
   jackett = handleTest ./jackett.nix {};
   jellyfin = handleTest ./jellyfin.nix {};
@@ -164,7 +166,6 @@ in
   kubernetes.rbac = handleTestOn ["x86_64-linux"] ./kubernetes/rbac.nix {};
   latestKernel.hardened = handleTest ./hardened.nix { latestKernel = true; };
   latestKernel.login = handleTest ./login.nix { latestKernel = true; };
-  ldap = handleTest ./ldap.nix {};
   leaps = handleTest ./leaps.nix {};
   lidarr = handleTest ./lidarr.nix {};
   lightdm = handleTest ./lightdm.nix {};
@@ -176,6 +177,8 @@ in
   magnetico = handleTest ./magnetico.nix {};
   magic-wormhole-mailbox-server = handleTest ./magic-wormhole-mailbox-server.nix {};
   mailcatcher = handleTest ./mailcatcher.nix {};
+  mariadb-galera-mariabackup = handleTest ./mysql/mariadb-galera-mariabackup.nix {};
+  mariadb-galera-rsync = handleTest ./mysql/mariadb-galera-rsync.nix {};
   mathics = handleTest ./mathics.nix {};
   matomo = handleTest ./matomo.nix {};
   matrix-synapse = handleTest ./matrix-synapse.nix {};
@@ -197,9 +200,10 @@ in
   munin = handleTest ./munin.nix {};
   mutableUsers = handleTest ./mutable-users.nix {};
   mxisd = handleTest ./mxisd.nix {};
-  mysql = handleTest ./mysql.nix {};
-  mysqlBackup = handleTest ./mysql-backup.nix {};
-  mysqlReplication = handleTest ./mysql-replication.nix {};
+  mysql = handleTest ./mysql/mysql.nix {};
+  mysql-autobackup = handleTest ./mysql/mysql-autobackup.nix {};
+  mysql-backup = handleTest ./mysql/mysql-backup.nix {};
+  mysql-replication = handleTest ./mysql/mysql-replication.nix {};
   nagios = handleTest ./nagios.nix {};
   nat.firewall = handleTest ./nat.nix { withFirewall = true; };
   nat.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; };
@@ -221,6 +225,7 @@ in
   nginx = handleTest ./nginx.nix {};
   nginx-etag = handleTest ./nginx-etag.nix {};
   nginx-pubhtml = handleTest ./nginx-pubhtml.nix {};
+  nginx-sandbox = handleTestOn ["x86_64-linux"] ./nginx-sandbox.nix {};
   nginx-sso = handleTest ./nginx-sso.nix {};
   nix-ssh-serve = handleTest ./nix-ssh-serve.nix {};
   nixos-generate-config = handleTest ./nixos-generate-config.nix {};
@@ -248,6 +253,7 @@ in
   php = handleTest ./php {};
   plasma5 = handleTest ./plasma5.nix {};
   plotinus = handleTest ./plotinus.nix {};
+  podman = handleTest ./podman.nix {};
   postgis = handleTest ./postgis.nix {};
   postgresql = handleTest ./postgresql.nix {};
   postgresql-wal-receiver = handleTest ./postgresql-wal-receiver.nix {};
@@ -255,6 +261,7 @@ in
   pppd = handleTest ./pppd.nix {};
   predictable-interface-names = handleTest ./predictable-interface-names.nix {};
   printing = handleTest ./printing.nix {};
+  privacyidea = handleTest ./privacyidea.nix {};
   prometheus = handleTest ./prometheus.nix {};
   prometheus-exporters = handleTest ./prometheus-exporters.nix {};
   prosody = handleTest ./xmpp/prosody.nix {};
@@ -286,6 +293,7 @@ in
   snapper = handleTest ./snapper.nix {};
   solr = handleTest ./solr.nix {};
   spacecookie = handleTest ./spacecookie.nix {};
+  spike = handleTest ./spike.nix {};
   sonarr = handleTest ./sonarr.nix {};
   strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
   sudo = handleTest ./sudo.nix {};
@@ -295,10 +303,13 @@ in
   syncthing-relay = handleTest ./syncthing-relay.nix {};
   systemd = handleTest ./systemd.nix {};
   systemd-analyze = handleTest ./systemd-analyze.nix {};
+  systemd-boot = handleTestOn ["x86_64-linux"] ./systemd-boot.nix {};
   systemd-confinement = handleTest ./systemd-confinement.nix {};
   systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
   systemd-networkd-vrf = handleTest ./systemd-networkd-vrf.nix {};
   systemd-networkd = handleTest ./systemd-networkd.nix {};
+  systemd-networkd-dhcpserver = handleTest ./systemd-networkd-dhcpserver.nix {};
+  systemd-networkd-ipv6-prefix-delegation = handleTest ./systemd-networkd-ipv6-prefix-delegation.nix {};
   systemd-nspawn = handleTest ./systemd-nspawn.nix {};
   pdns-recursor = handleTest ./pdns-recursor.nix {};
   taskserver = handleTest ./taskserver.nix {};
@@ -316,6 +327,7 @@ in
   trickster = handleTest ./trickster.nix {};
   tuptime = handleTest ./tuptime.nix {};
   udisks2 = handleTest ./udisks2.nix {};
+  unit-php = handleTest ./web-servers/unit-php.nix {};
   upnp = handleTest ./upnp.nix {};
   uwsgi = handleTest ./uwsgi.nix {};
   vault = handleTest ./vault.nix {};
diff --git a/nixos/tests/chromium.nix b/nixos/tests/chromium.nix
index fc5d3a5c52fe..795b93f6f54e 100644
--- a/nixos/tests/chromium.nix
+++ b/nixos/tests/chromium.nix
@@ -37,7 +37,7 @@ mapAttrs (channel: chromiumPkg: makeTest rec {
     </head>
     <body onload="javascript:document.title='startup done'">
       <img src="file://${pkgs.fetchurl {
-        url = "http://nixos.org/logo/nixos-hex.svg";
+        url = "https://nixos.org/logo/nixos-hex.svg";
         sha256 = "07ymq6nw8kc22m7kzxjxldhiq8gzmc7f45kq2bvhbdm0w5s112s4";
       }}" />
     </body>
diff --git a/nixos/tests/doas.nix b/nixos/tests/doas.nix
new file mode 100644
index 000000000000..9c0a4bdc7563
--- /dev/null
+++ b/nixos/tests/doas.nix
@@ -0,0 +1,83 @@
+# Some tests to ensure doas is working properly.
+import ./make-test-python.nix (
+  { lib, ... }: {
+    name = "doas";
+    meta = with lib.maintainers; {
+      maintainers = [ cole-h ];
+    };
+
+    machine =
+      { ... }:
+        {
+          users.groups = { foobar = {}; barfoo = {}; baz = { gid = 1337; }; };
+          users.users = {
+            test0 = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+            test1 = { isNormalUser = true; };
+            test2 = { isNormalUser = true; extraGroups = [ "foobar" ]; };
+            test3 = { isNormalUser = true; extraGroups = [ "barfoo" ]; };
+            test4 = { isNormalUser = true; extraGroups = [ "baz" ]; };
+            test5 = { isNormalUser = true; };
+            test6 = { isNormalUser = true; };
+            test7 = { isNormalUser = true; };
+          };
+
+          security.doas = {
+            enable = true;
+            wheelNeedsPassword = false;
+
+            extraRules = [
+              { users = [ "test1" ]; groups = [ "foobar" ]; }
+              { users = [ "test2" ]; noPass = true; setEnv = [ "CORRECT" "HORSE=BATTERY" ]; }
+              { groups = [ "barfoo" 1337 ]; noPass = true; }
+              { users = [ "test5" ]; noPass = true; keepEnv = true; runAs = "test1"; }
+              { users = [ "test6" ]; noPass = true; keepEnv = true; setEnv = [ "-STAPLE" ]; }
+              { users = [ "test7" ]; noPass = true; setEnv = [ "-SSH_AUTH_SOCK" ]; }
+            ];
+          };
+        };
+
+    testScript = ''
+      with subtest("users in wheel group should have passwordless doas"):
+          machine.succeed('su - test0 -c "doas -u root true"')
+
+      with subtest("test1 user should not be able to use doas without password"):
+          machine.fail('su - test1 -c "doas -n -u root true"')
+
+      with subtest("test2 user should be able to keep some env"):
+          if "CORRECT=1" not in machine.succeed('su - test2 -c "CORRECT=1 doas env"'):
+              raise Exception("failed to keep CORRECT")
+
+          if "HORSE=BATTERY" not in machine.succeed('su - test2 -c "doas env"'):
+              raise Exception("failed to setenv HORSE=BATTERY")
+
+      with subtest("users in group 'barfoo' shouldn't require password"):
+          machine.succeed("doas -u test3 doas -n -u root true")
+
+      with subtest("users in group 'baz' (GID 1337) shouldn't require password"):
+          machine.succeed("doas -u test4 doas -n -u root echo true")
+
+      with subtest("test5 user should be able to run commands under test1"):
+          machine.succeed("doas -u test5 doas -n -u test1 true")
+
+      with subtest("test5 user should not be able to run commands under root"):
+          machine.fail("doas -u test5 doas -n -u root true")
+
+      with subtest("test6 user should be able to keepenv"):
+          envs = ["BATTERY=HORSE", "CORRECT=false"]
+          out = machine.succeed(
+              'su - test6 -c "BATTERY=HORSE CORRECT=false STAPLE=Tr0ub4dor doas env"'
+          )
+
+          if not all(env in out for env in envs):
+              raise Exception("failed to keep BATTERY or CORRECT")
+          if "STAPLE=Tr0ub4dor" in out:
+              raise Exception("failed to exclude STAPLE")
+
+      with subtest("test7 should not have access to SSH_AUTH_SOCK"):
+          if "SSH_AUTH_SOCK=HOLEY" in machine.succeed(
+              'su - test7 -c "SSH_AUTH_SOCK=HOLEY doas env"'
+          ):
+              raise Exception("failed to exclude SSH_AUTH_SOCK")
+    '';
+  }
+)
diff --git a/nixos/tests/docker-containers.nix b/nixos/tests/docker-containers.nix
deleted file mode 100644
index 0e318a52d9f1..000000000000
--- a/nixos/tests/docker-containers.nix
+++ /dev/null
@@ -1,27 +0,0 @@
-# Test Docker containers as systemd units
-
-import ./make-test-python.nix ({ pkgs, lib, ... }: {
-  name = "docker-containers";
-  meta = {
-    maintainers = with lib.maintainers; [ benley mkaito ];
-  };
-
-  nodes = {
-    docker = { pkgs, ... }: {
-      virtualisation.docker.enable = true;
-
-      docker-containers.nginx = {
-        image = "nginx-container";
-        imageFile = pkgs.dockerTools.examples.nginx;
-        ports = ["8181:80"];
-      };
-    };
-  };
-
-  testScript = ''
-    start_all()
-    docker.wait_for_unit("docker-nginx.service")
-    docker.wait_for_open_port(8181)
-    docker.wait_until_succeeds("curl http://localhost:8181 | grep Hello")
-  '';
-})
diff --git a/nixos/tests/dokuwiki.nix b/nixos/tests/dokuwiki.nix
index 38bde10f47ed..05271919effe 100644
--- a/nixos/tests/dokuwiki.nix
+++ b/nixos/tests/dokuwiki.nix
@@ -1,29 +1,74 @@
-import ./make-test-python.nix ({ lib, ... }:
+import ./make-test-python.nix ({ pkgs, ... }:
 
-with lib;
+let
+  template-bootstrap3 = pkgs.stdenv.mkDerivation {
+    name = "bootstrap3";
+    # Download the theme from the dokuwiki site
+    src = pkgs.fetchurl {
+      url = "https://github.com/giterlizzi/dokuwiki-template-bootstrap3/archive/v2019-05-22.zip";
+      sha256 = "4de5ff31d54dd61bbccaf092c9e74c1af3a4c53e07aa59f60457a8f00cfb23a6";
+    };
+    # We need unzip to build this package
+    buildInputs = [ pkgs.unzip ];
+    # Installing simply means copying all files to the output directory
+    installPhase = "mkdir -p $out; cp -R * $out/";
+  };
+
+
+  # Let's package the icalevents plugin
+  plugin-icalevents = pkgs.stdenv.mkDerivation {
+    name = "icalevents";
+    # Download the plugin from the dokuwiki site
+    src = pkgs.fetchurl {
+      url = "https://github.com/real-or-random/dokuwiki-plugin-icalevents/releases/download/2017-06-16/dokuwiki-plugin-icalevents-2017-06-16.zip";
+      sha256 = "e40ed7dd6bbe7fe3363bbbecb4de481d5e42385b5a0f62f6a6ce6bf3a1f9dfa8";
+    };
+    # We need unzip to build this package
+    buildInputs = [ pkgs.unzip ];
+    sourceRoot = ".";
+    # Installing simply means copying all files to the output directory
+    installPhase = "mkdir -p $out; cp -R * $out/";
+  };
 
-{
+in {
   name = "dokuwiki";
-  meta.maintainers = with maintainers; [ maintainers."1000101" ];
-
-  nodes.machine =
-    { pkgs, ... }:
-    { services.dokuwiki = {
-        enable = true;
-        acl = " ";
-        superUser = null;
-        nginx = {
-          forceSSL = false;
-          enableACME = false;
-        };
-      }; 
+  meta.maintainers = with pkgs.lib.maintainers; [ "1000101" ];
+
+  machine = { ... }: {
+    services.dokuwiki."site1.local" = {
+      aclUse = false;
+      superUser = "admin";
+      nginx = {
+        forceSSL = false;
+        enableACME = false;
+      };
+    };
+    services.dokuwiki."site2.local" = {
+      aclUse = true;
+      superUser = "admin";
+      nginx = {
+        forceSSL = false;
+        enableACME = false;
+      };
+      templates = [ template-bootstrap3 ];
+      plugins = [ plugin-icalevents ];
     };
+    networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+  };
 
   testScript = ''
-    machine.start()
-    machine.wait_for_unit("phpfpm-dokuwiki.service")
+    site_names = ["site1.local", "site2.local"]
+
+    start_all()
+
+    machine.wait_for_unit("phpfpm-dokuwiki-site1.local.service")
+    machine.wait_for_unit("phpfpm-dokuwiki-site2.local.service")
+
     machine.wait_for_unit("nginx.service")
+
     machine.wait_for_open_port(80)
-    machine.succeed("curl -sSfL http://localhost/ | grep 'DokuWiki'")
+
+    machine.succeed("curl -sSfL http://site1.local/ | grep 'DokuWiki'")
+    machine.succeed("curl -sSfL http://site2.local/ | grep 'DokuWiki'")
   '';
 })
diff --git a/nixos/tests/ec2.nix b/nixos/tests/ec2.nix
index 6aeeb17ba31a..5a59d65e6026 100644
--- a/nixos/tests/ec2.nix
+++ b/nixos/tests/ec2.nix
@@ -108,7 +108,7 @@ in {
     inherit image;
     sshPublicKey = snakeOilPublicKey;
 
-    # ### http://nixos.org/channels/nixos-unstable nixos
+    # ### https://nixos.org/channels/nixos-unstable nixos
     userData = ''
       { pkgs, ... }:
 
diff --git a/nixos/tests/elk.nix b/nixos/tests/elk.nix
index d3dc6dde1359..7e87197ed9f3 100644
--- a/nixos/tests/elk.nix
+++ b/nixos/tests/elk.nix
@@ -101,6 +101,7 @@ let
                       prefixed indices. Ignore the error if the filter does not result in an
                       actionable list of indices (ignore_empty_list) and exit cleanly.
                     options:
+                      allow_ilm_indices: true
                       ignore_empty_list: True
                       disable_action: False
                     filters:
diff --git a/nixos/tests/enlightenment.nix b/nixos/tests/enlightenment.nix
new file mode 100644
index 000000000000..5fa8d765dd1f
--- /dev/null
+++ b/nixos/tests/enlightenment.nix
@@ -0,0 +1,101 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+{
+  name = "enlightenment";
+
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ romildo ];
+  };
+
+  machine = { ... }:
+  {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.desktopManager.enlightenment.enable = true;
+    services.xserver.displayManager.lightdm = {
+      enable = true;
+      autoLogin = {
+        enable = true;
+        user = "alice";
+      };
+    };
+    hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+    virtualisation.memorySize = 1024;
+    environment.systemPackages = [ pkgs.xdotool ];
+    services.acpid.enable = true;
+    services.connman.enable = true;
+    services.connman.package = pkgs.connmanMinimal;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    with subtest("Ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+    with subtest("Check that logging in has given the user ownership of devices"):
+        machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+    with subtest("First time wizard"):
+        machine.wait_for_text("Default")  # Language
+        machine.succeed("xdotool mousemove 512 185 click 1")  # Default Language
+        machine.screenshot("wizard1")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("English")  # Keyboard (default)
+        machine.screenshot("wizard2")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Standard")  # Profile (default)
+        machine.screenshot("wizard3")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Title")  # Sizing (default)
+        machine.screenshot("wizard4")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("clicked")  # Windows Phocus
+        machine.succeed("xdotool mousemove 512 370 click 1")  # Click
+        machine.screenshot("wizard5")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("bindings")  # Mouse Modifiers (default)
+        machine.screenshot("wizard6")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Connman")  # Network Management (default)
+        machine.screenshot("wizard7")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("BlusZ")  # Bluetooh Management (default)
+        machine.screenshot("wizard8")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Compositing")  # Compositing (default)
+        machine.screenshot("wizard9")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("update")  # Updates
+        machine.succeed("xdotool mousemove 512 495 click 1")  # Disable
+        machine.screenshot("wizard10")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("taskbar")  # Taskbar
+        machine.succeed("xdotool mousemove 480 410 click 1")  # Enable
+        machine.screenshot("wizard11")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Home")  # The desktop
+        machine.screenshot("wizard12")
+
+    with subtest("Run Terminology"):
+        machine.succeed("terminology &")
+        machine.sleep(5)
+        machine.send_chars("ls --color -alF\n")
+        machine.sleep(2)
+        machine.screenshot("terminology")
+  '';
+})
diff --git a/nixos/tests/gitdaemon.nix b/nixos/tests/gitdaemon.nix
index b610caf06fb2..c4a707943ef1 100644
--- a/nixos/tests/gitdaemon.nix
+++ b/nixos/tests/gitdaemon.nix
@@ -55,6 +55,9 @@ in {
     with subtest("git daemon starts"):
         server.wait_for_unit("git-daemon.service")
 
+    server.wait_for_unit("network-online.target")
+    client.wait_for_unit("network-online.target")
+
     with subtest("client can clone project.git"):
         client.succeed(
             "git clone git://server/project.git /project",
diff --git a/nixos/tests/google-oslogin/default.nix b/nixos/tests/google-oslogin/default.nix
index 1977e92e9877..97783c81f397 100644
--- a/nixos/tests/google-oslogin/default.nix
+++ b/nixos/tests/google-oslogin/default.nix
@@ -22,6 +22,8 @@ in {
     client = { ... }: {};
   };
   testScript =  ''
+    MOCKUSER = "mockuser_nixos_org"
+    MOCKADMIN = "mockadmin_nixos_org"
     start_all()
 
     server.wait_for_unit("mock-google-metadata.service")
@@ -29,10 +31,10 @@ in {
 
     # mockserver should return a non-expired ssh key for both mockuser and mockadmin
     server.succeed(
-        '${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys mockuser | grep -q "${snakeOilPublicKey}"'
+        f'${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys {MOCKUSER} | grep -q "${snakeOilPublicKey}"'
     )
     server.succeed(
-        '${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys mockadmin | grep -q "${snakeOilPublicKey}"'
+        f'${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys {MOCKADMIN} | grep -q "${snakeOilPublicKey}"'
     )
 
     # install snakeoil ssh key on the client, and provision .ssh/config file
@@ -50,20 +52,22 @@ in {
     client.fail("ssh ghost@server 'true'")
 
     # we should be able to connect as mockuser
-    client.succeed("ssh mockuser@server 'true'")
+    client.succeed(f"ssh {MOCKUSER}@server 'true'")
     # but we shouldn't be able to sudo
     client.fail(
-        "ssh mockuser@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
+        f"ssh {MOCKUSER}@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
     )
 
     # we should also be able to log in as mockadmin
-    client.succeed("ssh mockadmin@server 'true'")
+    client.succeed(f"ssh {MOCKADMIN}@server 'true'")
     # pam_oslogin_admin.so should now have generated a sudoers file
-    server.succeed("find /run/google-sudoers.d | grep -q '/run/google-sudoers.d/mockadmin'")
+    server.succeed(
+        f"find /run/google-sudoers.d | grep -q '/run/google-sudoers.d/{MOCKADMIN}'"
+    )
 
     # and we should be able to sudo
     client.succeed(
-        "ssh mockadmin@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
+        f"ssh {MOCKADMIN}@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
     )
   '';
   })
diff --git a/nixos/tests/google-oslogin/server.py b/nixos/tests/google-oslogin/server.py
index bfc527cb97d3..5ea9bbd2c96b 100644
--- a/nixos/tests/google-oslogin/server.py
+++ b/nixos/tests/google-oslogin/server.py
@@ -7,24 +7,29 @@ import hashlib
 import base64
 
 from http.server import BaseHTTPRequestHandler, HTTPServer
+from urllib.parse import urlparse, parse_qs
 from typing import Dict
 
 SNAKEOIL_PUBLIC_KEY = os.environ['SNAKEOIL_PUBLIC_KEY']
+MOCKUSER="mockuser_nixos_org"
+MOCKADMIN="mockadmin_nixos_org"
 
 
-def w(msg):
+def w(msg: bytes):
     sys.stderr.write(f"{msg}\n")
     sys.stderr.flush()
 
 
-def gen_fingerprint(pubkey):
+def gen_fingerprint(pubkey: str):
     decoded_key = base64.b64decode(pubkey.encode("ascii").split()[1])
     return hashlib.sha256(decoded_key).hexdigest()
 
-def gen_email(username):
+
+def gen_email(username: str):
     """username seems to be a 21 characters long number string, so mimic that in a reproducible way"""
     return str(int(hashlib.sha256(username.encode()).hexdigest(), 16))[0:21]
 
+
 def gen_mockuser(username: str, uid: str, gid: str, home_directory: str, snakeoil_pubkey: str) -> Dict:
     snakeoil_pubkey_fingerprint = gen_fingerprint(snakeoil_pubkey)
     # seems to be a 21 characters long numberstring, so mimic that in a reproducible way
@@ -56,7 +61,8 @@ def gen_mockuser(username: str, uid: str, gid: str, home_directory: str, snakeoi
 
 
 class ReqHandler(BaseHTTPRequestHandler):
-    def _send_json_ok(self, data):
+
+    def _send_json_ok(self, data: dict):
         self.send_response(200)
         self.send_header('Content-type', 'application/json')
         self.end_headers()
@@ -64,29 +70,62 @@ class ReqHandler(BaseHTTPRequestHandler):
         w(out)
         self.wfile.write(out)
 
+    def _send_json_success(self, success=True):
+        self.send_response(200)
+        self.send_header('Content-type', 'application/json')
+        self.end_headers()
+        out = json.dumps({"success": success}).encode()
+        w(out)
+        self.wfile.write(out)
+
+    def _send_404(self):
+        self.send_response(404)
+        self.end_headers()
+
     def do_GET(self):
         p = str(self.path)
-        # mockuser and mockadmin are allowed to login, both use the same snakeoil public key
-        if p == '/computeMetadata/v1/oslogin/users?username=mockuser' \
-            or p == '/computeMetadata/v1/oslogin/users?uid=1009719690':
-            self._send_json_ok(gen_mockuser(username='mockuser', uid='1009719690', gid='1009719690',
-                                            home_directory='/home/mockuser', snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY))
-        elif p == '/computeMetadata/v1/oslogin/users?username=mockadmin' \
-            or p == '/computeMetadata/v1/oslogin/users?uid=1009719691':
-            self._send_json_ok(gen_mockuser(username='mockadmin', uid='1009719691', gid='1009719691',
-                                            home_directory='/home/mockadmin', snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY))
-
-        # mockuser is allowed to login
-        elif p == f"/computeMetadata/v1/oslogin/authorize?email={gen_email('mockuser')}&policy=login":
-            self._send_json_ok({'success': True})
-
-        # mockadmin may also become root
-        elif p == f"/computeMetadata/v1/oslogin/authorize?email={gen_email('mockadmin')}&policy=login" or p == f"/computeMetadata/v1/oslogin/authorize?email={gen_email('mockadmin')}&policy=adminLogin":
-            self._send_json_ok({'success': True})
+        pu = urlparse(p)
+        params = parse_qs(pu.query)
+
+        # users endpoint
+        if pu.path == "/computeMetadata/v1/oslogin/users":
+            # mockuser and mockadmin are allowed to login, both use the same snakeoil public key
+            if params.get('username') == [MOCKUSER] or params.get('uid') == ["1009719690"]:
+                username = MOCKUSER
+                uid = "1009719690"
+            elif params.get('username') == [MOCKADMIN] or params.get('uid') == ["1009719691"]:
+                username = MOCKADMIN
+                uid = "1009719691"
+            else:
+                self._send_404()
+                return
+
+            self._send_json_ok(gen_mockuser(username=username, uid=uid, gid=uid, home_directory=f"/home/{username}", snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY))
+            return
+
+        # authorize endpoint
+        elif pu.path == "/computeMetadata/v1/oslogin/authorize":
+            # is user allowed to login?
+            if params.get("policy") == ["login"]:
+                # mockuser and mockadmin are allowed to login
+                if params.get('email') == [gen_email(MOCKUSER)] or params.get('email') == [gen_email(MOCKADMIN)]:
+                    self._send_json_success()
+                    return
+                self._send_json_success(False)
+                return
+            # is user allowed to become root?
+            elif params.get("policy") == ["adminLogin"]:
+                # only mockadmin is allowed to become admin
+                self._send_json_success((params['email'] == [gen_email(MOCKADMIN)]))
+                return
+            # send 404 for other policies
+            else:
+                self._send_404()
+                return
         else:
             sys.stderr.write(f"Unhandled path: {p}\n")
             sys.stderr.flush()
-            self.send_response(501)
+            self.send_response(404)
             self.end_headers()
             self.wfile.write(b'')
 
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index c5abd458ec9a..eef9abebf9f2 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -29,7 +29,7 @@ let
             boot.loader.grub.splashImage = null;
           ''}
 
-          boot.loader.grub.extraConfig = "serial; terminal_output.serial";
+          boot.loader.grub.extraConfig = "serial; terminal_output serial";
           ${if grubUseEfi then ''
             boot.loader.grub.device = "nodev";
             boot.loader.grub.efiSupport = true;
@@ -97,7 +97,7 @@ let
 
 
       def create_machine_named(name):
-          return create_machine({**default_flags, "name": "boot-after-install"})
+          return create_machine({**default_flags, "name": name})
 
 
       machine.start()
@@ -650,6 +650,32 @@ in {
     '';
   };
 
+  bcache = makeInstallerTest "bcache" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda --"
+          + " mklabel msdos"
+          + " mkpart primary ext2 1M 50MB"  # /boot
+          + " mkpart primary 50MB 512MB  "  # swap
+          + " mkpart primary 512MB 1024MB"  # Cache (typically SSD)
+          + " mkpart primary 1024MB -1s ",  # Backing device (typically HDD)
+          "modprobe bcache",
+          "udevadm settle",
+          "make-bcache -B /dev/vda4 -C /dev/vda3",
+          "echo /dev/vda3 > /sys/fs/bcache/register",
+          "echo /dev/vda4 > /sys/fs/bcache/register",
+          "udevadm settle",
+          "mkfs.ext3 -L nixos /dev/bcache0",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+          "mkswap -f /dev/vda2 -L swap",
+          "swapon -L swap",
+      )
+    '';
+  };
+
   # Test a basic install using GRUB 1.
   grub1 = makeInstallerTest "grub1" {
     createPartitions = ''
diff --git a/nixos/tests/ipfs.nix b/nixos/tests/ipfs.nix
index 3cff7e99ff88..4d721aec0c73 100644
--- a/nixos/tests/ipfs.nix
+++ b/nixos/tests/ipfs.nix
@@ -1,55 +1,25 @@
-
-import ./make-test.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ...} : {
   name = "ipfs";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ mguentner ];
   };
 
-  nodes = {
-    adder =
-      { ... }:
-      {
-        services.ipfs = {
-          enable = true;
-          defaultMode = "norouting";
-          gatewayAddress = "/ip4/127.0.0.1/tcp/2323";
-          apiAddress = "/ip4/127.0.0.1/tcp/2324";
-        };
-        networking.firewall.allowedTCPPorts = [ 4001 ];
-      };
-    getter =
-      { ... }:
-      {
-        services.ipfs = {
-          enable = true;
-          defaultMode = "norouting";
-          autoMount = true;
-        };
-        networking.firewall.allowedTCPPorts = [ 4001 ];
-      };
+  nodes.machine = { ... }: {
+    services.ipfs = {
+      enable = true;
+      apiAddress = "/ip4/127.0.0.1/tcp/2324";
+    };
   };
 
   testScript = ''
-    startAll;
-    $adder->waitForUnit("ipfs-norouting");
-    $getter->waitForUnit("ipfs-norouting");
-
-    # wait until api is available
-    $adder->waitUntilSucceeds("ipfs --api /ip4/127.0.0.1/tcp/2324 id");
-    my $addrId = $adder->succeed("ipfs --api /ip4/127.0.0.1/tcp/2324 id -f=\"<id>\"");
-    my $addrIp = (split /[ \/]+/, $adder->succeed("ip -o -4 addr show dev eth1"))[3];
-
-    $adder->mustSucceed("[ -n \"\$(ipfs --api /ip4/127.0.0.1/tcp/2324 config Addresses.Gateway | grep /ip4/127.0.0.1/tcp/2323)\" ]");
-
-    # wait until api is available
-    $getter->waitUntilSucceeds("ipfs --api /ip4/127.0.0.1/tcp/5001 id");
-    my $ipfsHash = $adder->mustSucceed("echo fnord | ipfs --api /ip4/127.0.0.1/tcp/2324 add | cut -d' ' -f2");
-    chomp($ipfsHash);
+    start_all()
+    machine.wait_for_unit("ipfs")
 
-    $adder->mustSucceed("[ -n \"\$(echo fnord | ipfs --api /ip4/127.0.0.1/tcp/2324 add | grep added)\" ]");
+    machine.wait_until_succeeds("ipfs --api /ip4/127.0.0.1/tcp/2324 id")
+    ipfs_hash = machine.succeed(
+        "echo fnord | ipfs --api /ip4/127.0.0.1/tcp/2324 add | awk '{ print $2 }'"
+    )
 
-    $getter->mustSucceed("ipfs --api /ip4/127.0.0.1/tcp/5001 swarm connect /ip4/$addrIp/tcp/4001/ipfs/$addrId");
-    $getter->mustSucceed("[ -n \"\$(ipfs --api /ip4/127.0.0.1/tcp/5001 cat /ipfs/$ipfsHash | grep fnord)\" ]");
-    $getter->mustSucceed("[ -n \"$(cat /ipfs/$ipfsHash | grep fnord)\" ]");
-    '';
+    machine.succeed(f"ipfs cat /ipfs/{ipfs_hash.strip()} | grep fnord")
+  '';
 })
diff --git a/nixos/tests/ldap.nix b/nixos/tests/ldap.nix
deleted file mode 100644
index 74b002fc00ee..000000000000
--- a/nixos/tests/ldap.nix
+++ /dev/null
@@ -1,405 +0,0 @@
-import ./make-test-python.nix ({ pkgs, lib, ...} :
-
-let
-  unlines = lib.concatStringsSep "\n";
-  unlinesAttrs = f: as: unlines (lib.mapAttrsToList f as);
-
-  dbDomain = "example.com";
-  dbSuffix = "dc=example,dc=com";
-  dbAdminDn = "cn=admin,${dbSuffix}";
-  dbAdminPwd = "admin-password";
-  # NOTE: slappasswd -h "{SSHA}" -s '${dbAdminPwd}'
-  dbAdminPwdHash = "{SSHA}i7FopSzkFQMrHzDMB1vrtkI0rBnwouP8";
-  ldapUser = "test-ldap-user";
-  ldapUserId = 10000;
-  ldapUserPwd = "user-password";
-  # NOTE: slappasswd -h "{SSHA}" -s '${ldapUserPwd}'
-  ldapUserPwdHash = "{SSHA}v12XICMZNGT6r2KJ26rIkN8Vvvp4QX6i";
-  ldapGroup = "test-ldap-group";
-  ldapGroupId = 10000;
-
-  mkClient = useDaemon:
-    { lib, ... }:
-    {
-      virtualisation.memorySize = 256;
-      virtualisation.vlans = [ 1 ];
-      security.pam.services.su.rootOK = lib.mkForce false;
-      users.ldap.enable = true;
-      users.ldap.daemon = {
-        enable = useDaemon;
-        rootpwmoddn = "cn=admin,${dbSuffix}";
-        rootpwmodpwFile = "/etc/nslcd.rootpwmodpw";
-      };
-      users.ldap.loginPam = true;
-      users.ldap.nsswitch = true;
-      users.ldap.server = "ldap://server";
-      users.ldap.base = "ou=posix,${dbSuffix}";
-      users.ldap.bind = {
-        distinguishedName = "cn=admin,${dbSuffix}";
-        passwordFile = "/etc/ldap/bind.password";
-      };
-      # NOTE: passwords stored in clear in Nix's store, but this is a test.
-      environment.etc."ldap/bind.password".source = pkgs.writeText "password" dbAdminPwd;
-      environment.etc."nslcd.rootpwmodpw".source = pkgs.writeText "rootpwmodpw" dbAdminPwd;
-    };
-in
-
-{
-  name = "ldap";
-  meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ montag451 ];
-  };
-
-  nodes = {
-
-    server =
-      { pkgs, config, ... }:
-      let
-        inherit (config.services) openldap;
-
-        slapdConfig = pkgs.writeText "cn=config.ldif" (''
-          dn: cn=config
-          objectClass: olcGlobal
-          #olcPidFile: /run/slapd/slapd.pid
-          # List of arguments that were passed to the server
-          #olcArgsFile: /run/slapd/slapd.args
-          # Read slapd-config(5) for possible values
-          olcLogLevel: none
-          # The tool-threads parameter sets the actual amount of CPU's
-          # that is used for indexing.
-          olcToolThreads: 1
-
-          dn: olcDatabase={-1}frontend,cn=config
-          objectClass: olcDatabaseConfig
-          objectClass: olcFrontendConfig
-          # The maximum number of entries that is returned for a search operation
-          olcSizeLimit: 500
-          # Allow unlimited access to local connection from the local root user
-          olcAccess: to *
-            by dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth manage
-            by * break
-          # Allow unauthenticated read access for schema and base DN autodiscovery
-          olcAccess: to dn.exact=""
-            by * read
-          olcAccess: to dn.base="cn=Subschema"
-            by * read
-
-          dn: olcDatabase=config,cn=config
-          objectClass: olcDatabaseConfig
-          olcRootDN: cn=admin,cn=config
-          #olcRootPW:
-          # NOTE: access to cn=config, system root can be manager
-          # with SASL mechanism (-Y EXTERNAL) over unix socket (-H ldapi://)
-          olcAccess: to *
-            by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" manage
-            by * break
-
-          dn: cn=schema,cn=config
-          objectClass: olcSchemaConfig
-
-          include: file://${pkgs.openldap}/etc/schema/core.ldif
-          include: file://${pkgs.openldap}/etc/schema/cosine.ldif
-          include: file://${pkgs.openldap}/etc/schema/nis.ldif
-          include: file://${pkgs.openldap}/etc/schema/inetorgperson.ldif
-
-          dn: cn=module{0},cn=config
-          objectClass: olcModuleList
-          # Where the dynamically loaded modules are stored
-          #olcModulePath: /usr/lib/ldap
-          olcModuleLoad: back_mdb
-
-          ''
-          + unlinesAttrs (olcSuffix: {conf, ...}:
-              "include: file://" + pkgs.writeText "config.ldif" conf
-            ) slapdDatabases
-          );
-
-        slapdDatabases = {
-          ${dbSuffix} = {
-            conf = ''
-              dn: olcBackend={1}mdb,cn=config
-              objectClass: olcBackendConfig
-
-              dn: olcDatabase={1}mdb,cn=config
-              olcSuffix: ${dbSuffix}
-              olcDbDirectory: ${openldap.dataDir}/${dbSuffix}
-              objectClass: olcDatabaseConfig
-              objectClass: olcMdbConfig
-              # NOTE: checkpoint the database periodically in case of system failure
-              # and to speed up slapd shutdown.
-              olcDbCheckpoint: 512 30
-              # Database max size is 1G
-              olcDbMaxSize: 1073741824
-              olcLastMod: TRUE
-              # NOTE: database superuser. Needed for syncrepl,
-              # and used to auth as admin through a TCP connection.
-              olcRootDN: cn=admin,${dbSuffix}
-              olcRootPW: ${dbAdminPwdHash}
-              #
-              olcDbIndex: objectClass eq
-              olcDbIndex: cn,uid eq
-              olcDbIndex: uidNumber,gidNumber eq
-              olcDbIndex: member,memberUid eq
-              #
-              olcAccess: to attrs=userPassword
-                by self write
-                by anonymous auth
-                by dn="cn=admin,${dbSuffix}" write
-                by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
-                by * none
-              olcAccess: to attrs=shadowLastChange
-                by self write
-                by dn="cn=admin,${dbSuffix}" write
-                by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
-                by * none
-              olcAccess: to dn.sub="ou=posix,${dbSuffix}"
-                by self read
-                by dn="cn=admin,${dbSuffix}" read
-                by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" read
-              olcAccess: to *
-                by self read
-                by * none
-            '';
-            data = ''
-              dn: ${dbSuffix}
-              objectClass: top
-              objectClass: dcObject
-              objectClass: organization
-              o: ${dbDomain}
-
-              dn: cn=admin,${dbSuffix}
-              objectClass: simpleSecurityObject
-              objectClass: organizationalRole
-              description: ${dbDomain} LDAP administrator
-              roleOccupant: ${dbSuffix}
-              userPassword: ${ldapUserPwdHash}
-
-              dn: ou=posix,${dbSuffix}
-              objectClass: top
-              objectClass: organizationalUnit
-
-              dn: ou=accounts,ou=posix,${dbSuffix}
-              objectClass: top
-              objectClass: organizationalUnit
-
-              dn: ou=groups,ou=posix,${dbSuffix}
-              objectClass: top
-              objectClass: organizationalUnit
-            ''
-            + lib.concatMapStrings posixAccount [
-              { uid=ldapUser; uidNumber=ldapUserId; gidNumber=ldapGroupId; userPassword=ldapUserPwdHash; }
-            ]
-            + lib.concatMapStrings posixGroup [
-              { gid=ldapGroup; gidNumber=ldapGroupId; members=[]; }
-            ];
-          };
-        };
-
-        # NOTE: create a user account using the posixAccount objectClass.
-        posixAccount =
-          { uid
-          , uidNumber ? null
-          , gidNumber ? null
-          , cn ? ""
-          , sn ? ""
-          , userPassword ? ""
-          , loginShell ? "/bin/sh"
-          }: ''
-
-            dn: uid=${uid},ou=accounts,ou=posix,${dbSuffix}
-            objectClass: person
-            objectClass: posixAccount
-            objectClass: shadowAccount
-            cn: ${cn}
-            gecos:
-            ${if gidNumber == null then "#" else "gidNumber: ${toString gidNumber}"}
-            homeDirectory: /home/${uid}
-            loginShell: ${loginShell}
-            sn: ${sn}
-            ${if uidNumber == null then "#" else "uidNumber: ${toString uidNumber}"}
-            ${if userPassword == "" then "#" else "userPassword: ${userPassword}"}
-          '';
-
-        # NOTE: create a group using the posixGroup objectClass.
-        posixGroup =
-          { gid
-          , gidNumber
-          , members
-          }: ''
-
-            dn: cn=${gid},ou=groups,ou=posix,${dbSuffix}
-            objectClass: top
-            objectClass: posixGroup
-            gidNumber: ${toString gidNumber}
-            ${lib.concatMapStrings (member: "memberUid: ${member}\n") members}
-          '';
-      in
-      {
-        virtualisation.memorySize = 256;
-        virtualisation.vlans = [ 1 ];
-        networking.firewall.allowedTCPPorts = [ 389 ];
-        services.openldap.enable = true;
-        services.openldap.dataDir = "/var/db/openldap";
-        services.openldap.configDir = "/var/db/slapd";
-        services.openldap.urlList = [
-          "ldap:///"
-          "ldapi:///"
-        ];
-        systemd.services.openldap = {
-          preStart = ''
-              set -e
-              # NOTE: slapd's config is always re-initialized.
-              rm -rf "${openldap.configDir}"/cn=config \
-                     "${openldap.configDir}"/cn=config.ldif
-              install -D -d -m 0700 -o "${openldap.user}" -g "${openldap.group}" "${openldap.configDir}"
-              # NOTE: olcDbDirectory must be created before adding the config.
-              '' +
-              unlinesAttrs (olcSuffix: {data, ...}: ''
-                # NOTE: database is always re-initialized.
-                rm -rf "${openldap.dataDir}/${olcSuffix}"
-                install -D -d -m 0700 -o "${openldap.user}" -g "${openldap.group}" \
-                 "${openldap.dataDir}/${olcSuffix}"
-                '') slapdDatabases
-              + ''
-              # NOTE: slapd is supposed to be stopped while in preStart,
-              #       hence slap* commands can safely be used.
-              umask 0077
-              ${pkgs.openldap}/bin/slapadd -n 0 \
-               -F "${openldap.configDir}" \
-               -l ${slapdConfig}
-              chown -R "${openldap.user}:${openldap.group}" "${openldap.configDir}"
-              # NOTE: slapadd(8): To populate the config database slapd-config(5),
-              #                   use -n 0 as it is always the first database.
-              #                   It must physically exist on the filesystem prior to this, however.
-            '' +
-            unlinesAttrs (olcSuffix: {data, ...}: ''
-              # NOTE: load database ${olcSuffix}
-              # (as root to avoid depending on sudo or chpst)
-              ${pkgs.openldap}/bin/slapadd \
-               -F "${openldap.configDir}" \
-               -l ${pkgs.writeText "data.ldif" data}
-              '' + ''
-              # NOTE: redundant with default openldap's preStart, but do not harm.
-              chown -R "${openldap.user}:${openldap.group}" \
-               "${openldap.dataDir}/${olcSuffix}"
-            '') slapdDatabases;
-        };
-      };
-
-    client1 = mkClient true; # use nss_pam_ldapd
-    client2 = mkClient false; # use nss_ldap and pam_ldap
-  };
-
-  testScript = ''
-    def expect_script(*commands):
-        script = ";".join(commands)
-        return f"${pkgs.expect}/bin/expect -c '{script}'"
-
-
-    server.start()
-    server.wait_for_unit("default.target")
-
-    with subtest("slapd: auth as database admin with SASL and check a POSIX account"):
-        server.succeed(
-            'test "$(ldapsearch -LLL -H ldapi:// -Y EXTERNAL '
-            + "-b 'uid=${ldapUser},ou=accounts,ou=posix,${dbSuffix}' "
-            + "-s base uidNumber | "
-            + "sed -ne 's/^uidNumber: \\(.*\\)/\\1/p')\" -eq ${toString ldapUserId}"
-        )
-
-    with subtest("slapd: auth as database admin with password and check a POSIX account"):
-        server.succeed(
-            "test \"$(ldapsearch -LLL -H ldap://server -D 'cn=admin,${dbSuffix}' "
-            + "-w '${dbAdminPwd}' -b 'uid=${ldapUser},ou=accounts,ou=posix,${dbSuffix}' "
-            + "-s base uidNumber | "
-            + "sed -ne 's/^uidNumber: \\(.*\\)/\\1/p')\" -eq ${toString ldapUserId}"
-        )
-
-    client1.start()
-    client1.wait_for_unit("default.target")
-
-    with subtest("password: su with password to a POSIX account"):
-        client1.succeed(
-            expect_script(
-                'spawn su "${ldapUser}"',
-                'expect "Password:"',
-                'send "${ldapUserPwd}\n"',
-                'expect "*"',
-                'send "whoami\n"',
-                'expect -ex "${ldapUser}" {exit}',
-                "exit 1",
-            )
-        )
-
-    with subtest("password: change password of a POSIX account as root"):
-        client1.succeed(
-            "chpasswd <<<'${ldapUser}:new-password'",
-            expect_script(
-                'spawn su "${ldapUser}"',
-                'expect "Password:"',
-                'send "new-password\n"',
-                'expect "*"',
-                'send "whoami\n"',
-                'expect -ex "${ldapUser}" {exit}',
-                "exit 1",
-            ),
-            "chpasswd <<<'${ldapUser}:${ldapUserPwd}'",
-        )
-
-    with subtest("password: change password of a POSIX account from itself"):
-        client1.succeed(
-            "chpasswd <<<'${ldapUser}:${ldapUserPwd}' ",
-            expect_script(
-                "spawn su --login ${ldapUser} -c passwd",
-                'expect "Password: "',
-                'send "${ldapUserPwd}\n"',
-                'expect "(current) UNIX password: "',
-                'send "${ldapUserPwd}\n"',
-                'expect "New password: "',
-                'send "new-password\n"',
-                'expect "Retype new password: "',
-                'send "new-password\n"',
-                'expect "passwd: password updated successfully" {exit}',
-                "exit 1",
-            ),
-            expect_script(
-                'spawn su "${ldapUser}"',
-                'expect "Password:"',
-                'send "${ldapUserPwd}\n"',
-                'expect "su: Authentication failure" {exit}',
-                "exit 1",
-            ),
-            expect_script(
-                'spawn su "${ldapUser}"',
-                'expect "Password:"',
-                'send "new-password\n"',
-                'expect "*"',
-                'send "whoami\n"',
-                'expect -ex "${ldapUser}" {exit}',
-                "exit 1",
-            ),
-            "chpasswd <<<'${ldapUser}:${ldapUserPwd}'",
-        )
-
-    client2.start()
-    client2.wait_for_unit("default.target")
-
-    with subtest("NSS"):
-        client1.succeed(
-            "test \"$(id -u    '${ldapUser}')\" -eq ${toString ldapUserId}",
-            "test \"$(id -u -n '${ldapUser}')\" =  '${ldapUser}'",
-            "test \"$(id -g    '${ldapUser}')\" -eq ${toString ldapGroupId}",
-            "test \"$(id -g -n '${ldapUser}')\" =  '${ldapGroup}'",
-            "test \"$(id -u    '${ldapUser}')\" -eq ${toString ldapUserId}",
-            "test \"$(id -u -n '${ldapUser}')\" =  '${ldapUser}'",
-            "test \"$(id -g    '${ldapUser}')\" -eq ${toString ldapGroupId}",
-            "test \"$(id -g -n '${ldapUser}')\" =  '${ldapGroup}'",
-        )
-
-    with subtest("PAM"):
-        client1.succeed(
-            "echo ${ldapUserPwd} | su -l '${ldapUser}' -c true",
-            "echo ${ldapUserPwd} | su -l '${ldapUser}' -c true",
-        )
-  '';
-})
diff --git a/nixos/tests/mediawiki.nix b/nixos/tests/mediawiki.nix
index 9468c1de8ccb..008682310cf6 100644
--- a/nixos/tests/mediawiki.nix
+++ b/nixos/tests/mediawiki.nix
@@ -8,6 +8,13 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       services.mediawiki.virtualHost.hostName = "localhost";
       services.mediawiki.virtualHost.adminAddr = "root@example.com";
       services.mediawiki.passwordFile = pkgs.writeText "password" "correcthorsebatterystaple";
+      services.mediawiki.extensions = {
+        Matomo = pkgs.fetchzip {
+          url = "https://github.com/DaSchTour/matomo-mediawiki-extension/archive/v4.0.1.tar.gz";
+          sha256 = "0g5rd3zp0avwlmqagc59cg9bbkn3r7wx7p6yr80s644mj6dlvs1b";
+        };
+        ParserFunctions = null;
+      };
     };
 
   testScript = ''
diff --git a/nixos/tests/minio.nix b/nixos/tests/minio.nix
index 3b0619742671..02d1f7aa6c20 100644
--- a/nixos/tests/minio.nix
+++ b/nixos/tests/minio.nix
@@ -44,7 +44,7 @@ in {
 
     # Create a test bucket on the server
     machine.succeed(
-        "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} S3v4"
+        "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4"
     )
     machine.succeed("mc mb minio/test-bucket")
     machine.succeed("${minioPythonScript}")
diff --git a/nixos/tests/mysql/mariadb-galera-mariabackup.nix b/nixos/tests/mysql/mariadb-galera-mariabackup.nix
new file mode 100644
index 000000000000..73abf6c555f9
--- /dev/null
+++ b/nixos/tests/mysql/mariadb-galera-mariabackup.nix
@@ -0,0 +1,223 @@
+import ./../make-test-python.nix ({ pkgs, ...} :
+
+let
+  mysqlenv-common      = pkgs.buildEnv { name = "mysql-path-env-common";      pathsToLink = [ "/bin" ]; paths = with pkgs; [ bash gawk gnutar inetutils which ]; };
+  mysqlenv-mariabackup = pkgs.buildEnv { name = "mysql-path-env-mariabackup"; pathsToLink = [ "/bin" ]; paths = with pkgs; [ gzip iproute netcat procps pv socat ]; };
+
+in {
+  name = "mariadb-galera-mariabackup";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ izorkin ];
+  };
+
+  # The test creates a Galera cluster with 3 nodes and is checking if mariabackup-based SST works. The cluster is tested by creating a DB and an empty table on one node,
+  # and checking the table's presence on the other node.
+
+  nodes = {
+    galera_01 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.1.1"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.1.1 galera_01
+          192.168.1.2 galera_02
+          192.168.1.3 galera_03
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-mariabackup ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        ensureDatabases = [ "testdb" ];
+        ensureUsers = [{
+          name = "testuser";
+          ensurePermissions = {
+            "testdb.*" = "ALL PRIVILEGES";
+          };
+        }];
+        initialScript = pkgs.writeText "mariadb-init.sql" ''
+          GRANT ALL PRIVILEGES ON *.* TO 'check_repl'@'localhost' IDENTIFIED BY 'check_pass' WITH GRANT OPTION;
+          FLUSH PRIVILEGES;
+        '';
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://";
+            wsrep_cluster_name = "galera";
+            wsrep_node_address = "192.168.1.1";
+            wsrep_node_name = "galera_01";
+            wsrep_sst_method = "mariabackup";
+            wsrep_sst_auth = "check_repl:check_pass";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+
+    galera_02 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.1.2"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.1.1 galera_01
+          192.168.1.2 galera_02
+          192.168.1.3 galera_03
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-mariabackup ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://galera_01,galera_02,galera_03";
+            wsrep_cluster_name = "galera";
+            wsrep_node_address = "192.168.1.2";
+            wsrep_node_name = "galera_02";
+            wsrep_sst_method = "mariabackup";
+            wsrep_sst_auth = "check_repl:check_pass";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+
+    galera_03 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.1.3"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.1.1 galera_01
+          192.168.1.2 galera_02
+          192.168.1.3 galera_03
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-mariabackup ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://galera_01,galera_02,galera_03";
+            wsrep_cluster_name = "galera";
+            wsrep_node_address = "192.168.1.3";
+            wsrep_node_name = "galera_03";
+            wsrep_sst_method = "mariabackup";
+            wsrep_sst_auth = "check_repl:check_pass";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    galera_01.start()
+    galera_01.wait_for_unit("mysql")
+    galera_01.wait_for_open_port(3306)
+    galera_01.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_01.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (37);'"
+    )
+    galera_02.start()
+    galera_02.wait_for_unit("mysql")
+    galera_02.wait_for_open_port(3306)
+    galera_03.start()
+    galera_03.wait_for_unit("mysql")
+    galera_03.wait_for_open_port(3306)
+    galera_02.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 37"
+    )
+    galera_02.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_02.succeed("systemctl stop mysql")
+    galera_01.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (38);'"
+    )
+    galera_03.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_01.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (39);'"
+    )
+    galera_02.succeed("systemctl start mysql")
+    galera_02.wait_for_open_port(3306)
+    galera_02.succeed(
+        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+    )
+    galera_03.succeed(
+        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+    )
+    galera_01.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db3;' -N | grep 39"
+    )
+    galera_02.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db2;' -N | grep 38"
+    )
+    galera_03.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 37"
+    )
+    galera_01.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
+    galera_02.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db2;'")
+    galera_03.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db1;'")
+  '';
+})
diff --git a/nixos/tests/mysql/mariadb-galera-rsync.nix b/nixos/tests/mysql/mariadb-galera-rsync.nix
new file mode 100644
index 000000000000..cacae4569b57
--- /dev/null
+++ b/nixos/tests/mysql/mariadb-galera-rsync.nix
@@ -0,0 +1,216 @@
+import ./../make-test-python.nix ({ pkgs, ...} :
+
+let
+  mysqlenv-common      = pkgs.buildEnv { name = "mysql-path-env-common";      pathsToLink = [ "/bin" ]; paths = with pkgs; [ bash gawk gnutar inetutils which ]; };
+  mysqlenv-rsync       = pkgs.buildEnv { name = "mysql-path-env-rsync";       pathsToLink = [ "/bin" ]; paths = with pkgs; [ lsof procps rsync stunnel ]; };
+
+in {
+  name = "mariadb-galera-rsync";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ izorkin ];
+  };
+
+  # The test creates a Galera cluster with 3 nodes and is checking if rsync-based SST works. The cluster is tested by creating a DB and an empty table on one node,
+  # and checking the table's presence on the other node.
+
+  nodes = {
+    galera_04 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.1"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.2.1 galera_04
+          192.168.2.2 galera_05
+          192.168.2.3 galera_06
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-rsync ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        ensureDatabases = [ "testdb" ];
+        ensureUsers = [{
+          name = "testuser";
+          ensurePermissions = {
+            "testdb.*" = "ALL PRIVILEGES";
+          };
+        }];
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://";
+            wsrep_cluster_name = "galera-rsync";
+            wsrep_node_address = "192.168.2.1";
+            wsrep_node_name = "galera_04";
+            wsrep_sst_method = "rsync";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+
+    galera_05 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.2"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.2.1 galera_04
+          192.168.2.2 galera_05
+          192.168.2.3 galera_06
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-rsync ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://galera_04,galera_05,galera_06";
+            wsrep_cluster_name = "galera-rsync";
+            wsrep_node_address = "192.168.2.2";
+            wsrep_node_name = "galera_05";
+            wsrep_sst_method = "rsync";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+
+    galera_06 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.3"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.2.1 galera_04
+          192.168.2.2 galera_05
+          192.168.2.3 galera_06
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-rsync ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://galera_04,galera_05,galera_06";
+            wsrep_cluster_name = "galera-rsync";
+            wsrep_node_address = "192.168.2.3";
+            wsrep_node_name = "galera_06";
+            wsrep_sst_method = "rsync";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    galera_04.start()
+    galera_04.wait_for_unit("mysql")
+    galera_04.wait_for_open_port(3306)
+    galera_04.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_04.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (41);'"
+    )
+    galera_05.start()
+    galera_05.wait_for_unit("mysql")
+    galera_05.wait_for_open_port(3306)
+    galera_06.start()
+    galera_06.wait_for_unit("mysql")
+    galera_06.wait_for_open_port(3306)
+    galera_05.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 41"
+    )
+    galera_05.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_05.succeed("systemctl stop mysql")
+    galera_04.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (42);'"
+    )
+    galera_06.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_04.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (43);'"
+    )
+    galera_05.succeed("systemctl start mysql")
+    galera_05.wait_for_open_port(3306)
+    galera_05.succeed(
+        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+    )
+    galera_06.succeed(
+        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+    )
+    galera_04.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db3;' -N | grep 43"
+    )
+    galera_05.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db2;' -N | grep 42"
+    )
+    galera_06.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 41"
+    )
+    galera_04.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
+    galera_05.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db2;'")
+    galera_06.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db1;'")
+  '';
+})
diff --git a/nixos/tests/automysqlbackup.nix b/nixos/tests/mysql/mysql-autobackup.nix
index 224b93862fbd..65576e52a537 100644
--- a/nixos/tests/automysqlbackup.nix
+++ b/nixos/tests/mysql/mysql-autobackup.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, lib, ... }:
+import ./../make-test-python.nix ({ pkgs, lib, ... }:
 
 {
   name = "automysqlbackup";
diff --git a/nixos/tests/mysql-backup.nix b/nixos/tests/mysql/mysql-backup.nix
index a0595e4d5539..c4c1079a8a64 100644
--- a/nixos/tests/mysql-backup.nix
+++ b/nixos/tests/mysql/mysql-backup.nix
@@ -1,5 +1,5 @@
 # Test whether mysqlBackup option works
-import ./make-test-python.nix ({ pkgs, ... } : {
+import ./../make-test-python.nix ({ pkgs, ... } : {
   name = "mysql-backup";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ rvl ];
diff --git a/nixos/tests/mysql-replication.nix b/nixos/tests/mysql/mysql-replication.nix
index a2654f041add..81038dccd947 100644
--- a/nixos/tests/mysql-replication.nix
+++ b/nixos/tests/mysql/mysql-replication.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ...} :
+import ./../make-test-python.nix ({ pkgs, ...} :
 
 let
   replicateUser = "replicate";
diff --git a/nixos/tests/mysql.nix b/nixos/tests/mysql/mysql.nix
index 11c1dabf9360..d236ce946328 100644
--- a/nixos/tests/mysql.nix
+++ b/nixos/tests/mysql/mysql.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ./../make-test-python.nix ({ pkgs, ...} : {
   name = "mysql";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ eelco shlevy ];
diff --git a/nixos/tests/testdb.sql b/nixos/tests/mysql/testdb.sql
index 3c68c49ae82c..3c68c49ae82c 100644
--- a/nixos/tests/testdb.sql
+++ b/nixos/tests/mysql/testdb.sql
diff --git a/nixos/tests/nginx-pubhtml.nix b/nixos/tests/nginx-pubhtml.nix
index 432913cb42d2..6e1e605628e9 100644
--- a/nixos/tests/nginx-pubhtml.nix
+++ b/nixos/tests/nginx-pubhtml.nix
@@ -2,6 +2,7 @@ import ./make-test-python.nix {
   name = "nginx-pubhtml";
 
   machine = { pkgs, ... }: {
+    systemd.services.nginx.serviceConfig.ProtectHome = "read-only";
     services.nginx.enable = true;
     services.nginx.virtualHosts.localhost = {
       locations."~ ^/\\~([a-z0-9_]+)(/.*)?$".alias = "/home/$1/public_html$2";
diff --git a/nixos/tests/nginx-sandbox.nix b/nixos/tests/nginx-sandbox.nix
new file mode 100644
index 000000000000..bc9d3ba8add7
--- /dev/null
+++ b/nixos/tests/nginx-sandbox.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx-sandbox";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ izorkin ];
+  };
+
+  # This test checks the creation and reading of a file in sandbox mode. Used simple lua script.
+
+  machine = { pkgs, ... }: {
+    nixpkgs.overlays = [
+      (self: super: {
+        nginx-lua = super.nginx.override {
+          modules = [
+            pkgs.nginxModules.lua
+          ];
+        };
+      })
+    ];
+    services.nginx.enable = true;
+    services.nginx.package = pkgs.nginx-lua;
+    services.nginx.enableSandbox = true;
+    services.nginx.virtualHosts.localhost = {
+      extraConfig = ''
+        location /test1-write {
+          content_by_lua_block {
+            local create = os.execute('${pkgs.coreutils}/bin/mkdir /tmp/test1-read')
+            local create = os.execute('${pkgs.coreutils}/bin/touch /tmp/test1-read/foo.txt')
+            local echo = os.execute('${pkgs.coreutils}/bin/echo worked > /tmp/test1-read/foo.txt')
+          }
+        }
+        location /test1-read {
+          root /tmp;
+        }
+        location /test2-write {
+          content_by_lua_block {
+            local create = os.execute('${pkgs.coreutils}/bin/mkdir /var/web/test2-read')
+            local create = os.execute('${pkgs.coreutils}/bin/touch /var/web/test2-read/bar.txt')
+            local echo = os.execute('${pkgs.coreutils}/bin/echo error-worked > /var/web/test2-read/bar.txt')
+          }
+        }
+        location /test2-read {
+          root /var/web;
+        }
+      '';
+    };
+    users.users.foo.isNormalUser = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nginx")
+    machine.wait_for_open_port(80)
+
+    # Checking write in temporary folder
+    machine.succeed("$(curl -vvv http://localhost/test1-write)")
+    machine.succeed('test "$(curl -fvvv http://localhost/test1-read/foo.txt)" = worked')
+
+    # Checking write in protected folder. In sandbox mode for the nginx service, the folder /var/web is mounted
+    # in read-only mode.
+    machine.succeed("mkdir -p /var/web")
+    machine.succeed("chown nginx:nginx /var/web")
+    machine.succeed("$(curl -vvv http://localhost/test2-write)")
+    assert "404 Not Found" in machine.succeed(
+        "curl -vvv -s http://localhost/test2-read/bar.txt"
+    )
+  '';
+})
diff --git a/nixos/tests/oci-containers.nix b/nixos/tests/oci-containers.nix
new file mode 100644
index 000000000000..bb6c019f07c9
--- /dev/null
+++ b/nixos/tests/oci-containers.nix
@@ -0,0 +1,43 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, lib ? pkgs.lib
+}:
+
+let
+
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+
+  mkOCITest = backend: makeTest {
+    name = "oci-containers-${backend}";
+
+    meta = {
+      maintainers = with lib.maintainers; [ adisbladis benley mkaito ];
+    };
+
+    nodes = {
+      ${backend} = { pkgs, ... }: {
+        virtualisation.oci-containers = {
+          inherit backend;
+          containers.nginx = {
+            image = "nginx-container";
+            imageFile = pkgs.dockerTools.examples.nginx;
+            ports = ["8181:80"];
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+      ${backend}.wait_for_unit("${backend}-nginx.service")
+      ${backend}.wait_for_open_port(8181)
+      ${backend}.wait_until_succeeds("curl http://localhost:8181 | grep Hello")
+    '';
+  };
+
+in
+lib.foldl' (attrs: backend: attrs // { ${backend} = mkOCITest backend; }) {} [
+  "docker"
+  "podman"
+]
diff --git a/nixos/tests/partition.nix b/nixos/tests/partition.nix
deleted file mode 100644
index 01a08995950f..000000000000
--- a/nixos/tests/partition.nix
+++ /dev/null
@@ -1,247 +0,0 @@
-import ./make-test.nix ({ pkgs, ... }:
-
-with pkgs.lib;
-
-let
-  ksExt = pkgs.writeText "ks-ext4" ''
-    clearpart --all --initlabel --drives=vdb
-
-    part /boot --recommended --label=boot --fstype=ext2 --ondisk=vdb
-    part swap --recommended --label=swap --fstype=swap --ondisk=vdb
-    part /nix --size=500 --label=nix --fstype=ext3 --ondisk=vdb
-    part / --recommended --label=root --fstype=ext4 --ondisk=vdb
-  '';
-
-  ksBtrfs = pkgs.writeText "ks-btrfs" ''
-    clearpart --all --initlabel --drives=vdb,vdc
-
-    part swap1 --recommended --label=swap1 --fstype=swap --ondisk=vdb
-    part swap2 --recommended --label=swap2 --fstype=swap --ondisk=vdc
-
-    part btrfs.1 --grow --ondisk=vdb
-    part btrfs.2 --grow --ondisk=vdc
-
-    btrfs / --data=0 --metadata=1 --label=root btrfs.1 btrfs.2
-  '';
-
-  ksF2fs = pkgs.writeText "ks-f2fs" ''
-    clearpart --all --initlabel --drives=vdb
-
-    part swap  --recommended --label=swap --fstype=swap --ondisk=vdb
-    part /boot --recommended --label=boot --fstype=f2fs --ondisk=vdb
-    part /     --recommended --label=root --fstype=f2fs --ondisk=vdb
-  '';
-
-  ksRaid = pkgs.writeText "ks-raid" ''
-    clearpart --all --initlabel --drives=vdb,vdc
-
-    part raid.01 --size=200 --ondisk=vdb
-    part raid.02 --size=200 --ondisk=vdc
-
-    part swap1 --size=500 --label=swap1 --fstype=swap --ondisk=vdb
-    part swap2 --size=500 --label=swap2 --fstype=swap --ondisk=vdc
-
-    part raid.11 --grow --ondisk=vdb
-    part raid.12 --grow --ondisk=vdc
-
-    raid /boot --level=1 --fstype=ext3 --device=md0 raid.01 raid.02
-    raid / --level=1 --fstype=xfs --device=md1 raid.11 raid.12
-  '';
-
-  ksRaidLvmCrypt = pkgs.writeText "ks-lvm-crypt" ''
-    clearpart --all --initlabel --drives=vdb,vdc
-
-    part raid.1 --grow --ondisk=vdb
-    part raid.2 --grow --ondisk=vdc
-
-    raid pv.0 --level=1 --encrypted --passphrase=x --device=md0 raid.1 raid.2
-
-    volgroup nixos pv.0
-
-    logvol /boot --size=200 --fstype=ext3 --name=boot --vgname=nixos
-    logvol swap --size=500 --fstype=swap --name=swap --vgname=nixos
-    logvol / --size=1000 --grow --fstype=ext4 --name=root --vgname=nixos
-  '';
-in {
-  name = "partitiion";
-
-  machine = { pkgs, ... }: {
-    environment.systemPackages = [
-      pkgs.pythonPackages.nixpart0
-      pkgs.file pkgs.btrfs-progs pkgs.xfsprogs pkgs.lvm2
-    ];
-    virtualisation.emptyDiskImages = [ 4096 4096 ];
-  };
-
-  testScript = ''
-    my $diskStart;
-    my @mtab;
-
-    sub getMtab {
-      my $mounts = $machine->succeed("cat /proc/mounts");
-      chomp $mounts;
-      return map [split], split /\n/, $mounts;
-    }
-
-    sub parttest {
-      my ($desc, $code) = @_;
-      $machine->start;
-      $machine->waitForUnit("default.target");
-
-      # Gather mounts and superblock
-      @mtab = getMtab;
-      $diskStart = $machine->succeed("dd if=/dev/vda bs=512 count=1");
-
-      subtest($desc, $code);
-      $machine->shutdown;
-    }
-
-    sub ensureSanity {
-      # Check whether the filesystem in /dev/vda is still intact
-      my $newDiskStart = $machine->succeed("dd if=/dev/vda bs=512 count=1");
-      if ($diskStart ne $newDiskStart) {
-        $machine->log("Something went wrong, the partitioner wrote " .
-                      "something into the first 512 bytes of /dev/vda!");
-        die;
-      }
-
-      # Check whether nixpart has unmounted anything
-      my @currentMtab = getMtab;
-      for my $mount (@mtab) {
-        my $path = $mount->[1];
-        unless (grep { $_->[1] eq $path } @currentMtab) {
-          $machine->log("The partitioner seems to have unmounted $path.");
-          die;
-        }
-      }
-    }
-
-    sub checkMount {
-      my $mounts = $machine->succeed("cat /proc/mounts");
-
-    }
-
-    sub kickstart {
-      $machine->copyFileFromHost($_[0], "/kickstart");
-      $machine->succeed("nixpart -v /kickstart");
-      ensureSanity;
-    }
-
-    sub ensurePartition {
-      my ($name, $match) = @_;
-      my $path = $name =~ /^\// ? $name : "/dev/disk/by-label/$name";
-      my $out = $machine->succeed("file -Ls $path");
-      my @matches = grep(/^$path: .*$match/i, $out);
-      if (!@matches) {
-        $machine->log("Partition on $path was expected to have a " .
-                      "file system that matches $match, but instead has: $out");
-        die;
-      }
-    }
-
-    sub ensureNoPartition {
-      $machine->succeed("test ! -e /dev/$_[0]");
-    }
-
-    sub ensureMountPoint {
-      $machine->succeed("mountpoint $_[0]");
-    }
-
-    sub remountAndCheck {
-      $machine->nest("Remounting partitions:", sub {
-        # XXX: "findmnt -ARunl -oTARGET /mnt" seems to NOT print all mounts!
-        my $getmounts_cmd = "cat /proc/mounts | cut -d' ' -f2 | grep '^/mnt'";
-        # Insert canaries first
-        my $canaries = $machine->succeed($getmounts_cmd . " | while read p;" .
-                                         " do touch \"\$p/canary\";" .
-                                         " echo \"\$p/canary\"; done");
-        # Now unmount manually
-        $machine->succeed($getmounts_cmd . " | tac | xargs -r umount");
-        # /mnt should be empty or non-existing
-        my $found = $machine->succeed("find /mnt -mindepth 1");
-        chomp $found;
-        if ($found) {
-          $machine->log("Cruft found in /mnt:\n$found");
-          die;
-        }
-        # Try to remount with nixpart
-        $machine->succeed("nixpart -vm /kickstart");
-        ensureMountPoint("/mnt");
-        # Check if our beloved canaries are dead
-        chomp $canaries;
-        $machine->nest("Checking canaries:", sub {
-          for my $canary (split /\n/, $canaries) {
-            $machine->succeed("test -e '$canary'");
-          }
-        });
-      });
-    }
-
-    parttest "ext2, ext3 and ext4 filesystems", sub {
-      kickstart("${ksExt}");
-      ensurePartition("boot", "ext2");
-      ensurePartition("swap", "swap");
-      ensurePartition("nix", "ext3");
-      ensurePartition("root", "ext4");
-      ensurePartition("/dev/vdb4", "boot sector");
-      ensureNoPartition("vdb6");
-      ensureNoPartition("vdc1");
-      remountAndCheck;
-      ensureMountPoint("/mnt/boot");
-      ensureMountPoint("/mnt/nix");
-    };
-
-    parttest "btrfs filesystem", sub {
-      $machine->succeed("modprobe btrfs");
-      kickstart("${ksBtrfs}");
-      ensurePartition("swap1", "swap");
-      ensurePartition("swap2", "swap");
-      ensurePartition("/dev/vdb2", "btrfs");
-      ensurePartition("/dev/vdc2", "btrfs");
-      ensureNoPartition("vdb3");
-      ensureNoPartition("vdc3");
-      remountAndCheck;
-    };
-
-    parttest "f2fs filesystem", sub {
-      $machine->succeed("modprobe f2fs");
-      kickstart("${ksF2fs}");
-      ensurePartition("swap", "swap");
-      ensurePartition("boot", "f2fs");
-      ensurePartition("root", "f2fs");
-      remountAndCheck;
-      ensureMountPoint("/mnt/boot", "f2fs");
-    };
-
-    parttest "RAID1 with XFS", sub {
-      kickstart("${ksRaid}");
-      ensurePartition("swap1", "swap");
-      ensurePartition("swap2", "swap");
-      ensurePartition("/dev/md0", "ext3");
-      ensurePartition("/dev/md1", "xfs");
-      ensureNoPartition("vdb4");
-      ensureNoPartition("vdc4");
-      ensureNoPartition("md2");
-      remountAndCheck;
-      ensureMountPoint("/mnt/boot");
-    };
-
-    parttest "RAID1 with LUKS and LVM", sub {
-      kickstart("${ksRaidLvmCrypt}");
-      ensurePartition("/dev/vdb1", "data");
-      ensureNoPartition("vdb2");
-      ensurePartition("/dev/vdc1", "data");
-      ensureNoPartition("vdc2");
-
-      ensurePartition("/dev/md0", "luks");
-      ensureNoPartition("md1");
-
-      ensurePartition("/dev/nixos/boot", "ext3");
-      ensurePartition("/dev/nixos/swap", "swap");
-      ensurePartition("/dev/nixos/root", "ext4");
-
-      remountAndCheck;
-      ensureMountPoint("/mnt/boot");
-    };
-  '';
-})
diff --git a/nixos/tests/php/default.nix b/nixos/tests/php/default.nix
index 9ab14f722d08..ee7a3b56a3ef 100644
--- a/nixos/tests/php/default.nix
+++ b/nixos/tests/php/default.nix
@@ -3,5 +3,6 @@
   pkgs ? import ../../.. { inherit system config; }
 }: {
   fpm = import ./fpm.nix { inherit system pkgs; };
+  httpd = import ./httpd.nix { inherit system pkgs; };
   pcre = import ./pcre.nix { inherit system pkgs; };
 }
diff --git a/nixos/tests/php/fpm.nix b/nixos/tests/php/fpm.nix
index e93a31834185..513abd943737 100644
--- a/nixos/tests/php/fpm.nix
+++ b/nixos/tests/php/fpm.nix
@@ -1,6 +1,6 @@
-import ../make-test-python.nix ({pkgs, ...}: {
+import ../make-test-python.nix ({pkgs, lib, ...}: {
   name = "php-fpm-nginx-test";
-  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ etu ];
+  meta.maintainers = lib.teams.php.members;
 
   machine = { config, lib, pkgs, ... }: {
     services.nginx = {
@@ -43,13 +43,11 @@ import ../make-test-python.nix ({pkgs, ...}: {
     machine.wait_for_unit("phpfpm-foobar.service")
 
     # Check so we get an evaluated PHP back
-    assert "PHP Version ${pkgs.php.version}" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    response = machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    assert "PHP Version ${pkgs.php.version}" in response, "PHP version not detected"
 
     # Check so we have database and some other extensions loaded
-    assert "json" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
-    assert "opcache" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
-    assert "pdo_mysql" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
-    assert "pdo_pgsql" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
-    assert "pdo_sqlite" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    for ext in ["json", "opcache", "pdo_mysql", "pdo_pgsql", "pdo_sqlite"]:
+        assert ext in response, f"Missing {ext} extension"
   '';
 })
diff --git a/nixos/tests/php/httpd.nix b/nixos/tests/php/httpd.nix
new file mode 100644
index 000000000000..1092e0ecadd3
--- /dev/null
+++ b/nixos/tests/php/httpd.nix
@@ -0,0 +1,31 @@
+import ../make-test-python.nix ({pkgs, lib, ...}: {
+  name = "php-httpd-test";
+  meta.maintainers = lib.teams.php.members;
+
+  machine = { config, lib, pkgs, ... }: {
+    services.httpd = {
+      enable = true;
+      adminAddr = "admin@phpfpm";
+      virtualHosts."phpfpm" = let
+        testdir = pkgs.writeTextDir "web/index.php" "<?php phpinfo();";
+      in {
+        documentRoot = "${testdir}/web";
+        locations."/" = {
+          index = "index.php index.html";
+        };
+      };
+      enablePHP = true;
+    };
+  };
+  testScript = { ... }: ''
+    machine.wait_for_unit("httpd.service")
+
+    # Check so we get an evaluated PHP back
+    response = machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    assert "PHP Version ${pkgs.php.version}" in response, "PHP version not detected"
+
+    # Check so we have database and some other extensions loaded
+    for ext in ["json", "opcache", "pdo_mysql", "pdo_pgsql", "pdo_sqlite"]:
+        assert ext in response, f"Missing {ext} extension"
+  '';
+})
diff --git a/nixos/tests/php/pcre.nix b/nixos/tests/php/pcre.nix
index 56a87778579f..3dd0964e60fb 100644
--- a/nixos/tests/php/pcre.nix
+++ b/nixos/tests/php/pcre.nix
@@ -1,7 +1,9 @@
 let
   testString = "can-use-subgroups";
-in import ../make-test-python.nix ({ ...}: {
+in import ../make-test-python.nix ({lib, ...}: {
   name = "php-httpd-pcre-jit-test";
+  meta.maintainers = lib.teams.php.members;
+
   machine = { lib, pkgs, ... }: {
     time.timeZone = "UTC";
     services.httpd = {
@@ -30,8 +32,8 @@ in import ../make-test-python.nix ({ ...}: {
     ''
       machine.wait_for_unit("httpd.service")
       # Ensure php evaluation by matching on the var_dump syntax
-      assert 'string(${toString (builtins.stringLength testString)}) "${testString}"' in machine.succeed(
-          "curl -vvv -s http://127.0.0.1:80/index.php"
-      )
+      response = machine.succeed("curl -vvv -s http://127.0.0.1:80/index.php")
+      expected = 'string(${toString (builtins.stringLength testString)}) "${testString}"'
+      assert expected in response, "Does not appear to be able to use subgroups."
     '';
 })
diff --git a/nixos/tests/podman.nix b/nixos/tests/podman.nix
new file mode 100644
index 000000000000..283db71d9a49
--- /dev/null
+++ b/nixos/tests/podman.nix
@@ -0,0 +1,60 @@
+# This test runs podman and checks if simple container starts
+
+import ./make-test-python.nix (
+  { pkgs, lib, ... }: {
+    name = "podman";
+    meta = {
+      maintainers = lib.teams.podman.members;
+    };
+
+    nodes = {
+      podman =
+        { pkgs, ... }:
+        {
+          virtualisation.podman.enable = true;
+          virtualisation.containers.users = [
+            "alice"
+          ];
+
+          users.users.alice = {
+            isNormalUser = true;
+            home = "/home/alice";
+            description = "Alice Foobar";
+          };
+
+        };
+    };
+
+    testScript = ''
+      import shlex
+
+
+      def su_cmd(cmd):
+          cmd = shlex.quote(cmd)
+          return f"su alice -l -c {cmd}"
+
+
+      podman.wait_for_unit("sockets.target")
+      start_all()
+
+
+      with subtest("Run container as root"):
+          podman.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
+          podman.succeed(
+              "podman run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+          )
+          podman.succeed("podman ps | grep sleeping")
+          podman.succeed("podman stop sleeping")
+
+      with subtest("Run container rootless"):
+          podman.succeed(su_cmd("tar cv --files-from /dev/null | podman import - scratchimg"))
+          podman.succeed(
+              su_cmd(
+                  "podman run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+              )
+          )
+          podman.succeed(su_cmd("podman ps | grep sleeping"))
+          podman.succeed(su_cmd("podman stop sleeping"))
+    '';
+  }
+)
diff --git a/nixos/tests/privacyidea.nix b/nixos/tests/privacyidea.nix
new file mode 100644
index 000000000000..45c7cd37c241
--- /dev/null
+++ b/nixos/tests/privacyidea.nix
@@ -0,0 +1,36 @@
+# Miscellaneous small tests that don't warrant their own VM run.
+
+import ./make-test-python.nix ({ pkgs, ...} : rec {
+  name = "privacyidea";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ fpletz ];
+  };
+
+  machine = { ... }: {
+    virtualisation.cores = 2;
+    virtualisation.memorySize = 512;
+
+    services.privacyidea = {
+      enable = true;
+      secretKey = "testing";
+      pepper = "testing";
+      adminPasswordFile = pkgs.writeText "admin-password" "testing";
+      adminEmail = "root@localhost";
+    };
+    services.nginx = {
+      enable = true;
+      virtualHosts."_".locations."/".extraConfig = ''
+        uwsgi_pass unix:/run/privacyidea/socket;
+      '';
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("curl --fail http://localhost | grep privacyIDEA")
+    machine.succeed(
+        "curl --fail http://localhost/auth -F username=admin -F password=testing | grep token"
+    )
+  '';
+})
diff --git a/nixos/tests/prometheus.nix b/nixos/tests/prometheus.nix
index 8bfd0c131e61..bce489168f9f 100644
--- a/nixos/tests/prometheus.nix
+++ b/nixos/tests/prometheus.nix
@@ -179,7 +179,7 @@ in import ./make-test-python.nix {
     s3.succeed(
         "mc config host add minio "
         + "http://localhost:${toString minioPort} "
-        + "${s3.accessKey} ${s3.secretKey} S3v4",
+        + "${s3.accessKey} ${s3.secretKey} --api s3v4",
         "mc mb minio/thanos-bucket",
     )
 
diff --git a/nixos/tests/redmine.nix b/nixos/tests/redmine.nix
index 73eb684f33a9..3866a1f528c0 100644
--- a/nixos/tests/redmine.nix
+++ b/nixos/tests/redmine.nix
@@ -3,74 +3,42 @@
   pkgs ? import ../.. { inherit system config; }
 }:
 
-with import ../lib/testing.nix { inherit system pkgs; };
+with import ../lib/testing-python.nix { inherit system pkgs; };
 with pkgs.lib;
 
 let
-  mysqlTest = package: makeTest {
-    machine =
-      { config, pkgs, ... }:
-      { services.redmine.enable = true;
-        services.redmine.package = package;
-        services.redmine.database.type = "mysql2";
-        services.redmine.plugins = {
+  redmineTest = { name, type }: makeTest {
+    name = "redmine-${name}";
+    machine = { config, pkgs, ... }: {
+      services.redmine = {
+        enable = true;
+        package = pkgs.redmine;
+        database.type = type;
+        plugins = {
           redmine_env_auth = pkgs.fetchurl {
             url = "https://github.com/Intera/redmine_env_auth/archive/0.7.zip";
             sha256 = "1xb8lyarc7mpi86yflnlgyllh9hfwb9z304f19dx409gqpia99sc";
           };
         };
-        services.redmine.themes = {
+        themes = {
           dkuk-redmine_alex_skin = pkgs.fetchurl {
             url = "https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip";
             sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
           };
         };
       };
+    };
 
     testScript = ''
-      startAll;
-      $machine->waitForUnit('redmine.service');
-      $machine->waitForOpenPort('3000');
-      $machine->succeed("curl --fail http://localhost:3000/");
+      start_all()
+      machine.wait_for_unit("redmine.service")
+      machine.wait_for_open_port(3000)
+      machine.succeed("curl --fail http://localhost:3000/")
     '';
-  };
-
-  pgsqlTest = package: makeTest {
-    machine =
-      { config, pkgs, ... }:
-      { services.redmine.enable = true;
-        services.redmine.package = package;
-        services.redmine.database.type = "postgresql";
-        services.redmine.plugins = {
-          redmine_env_auth = pkgs.fetchurl {
-            url = "https://github.com/Intera/redmine_env_auth/archive/0.7.zip";
-            sha256 = "1xb8lyarc7mpi86yflnlgyllh9hfwb9z304f19dx409gqpia99sc";
-          };
-        };
-        services.redmine.themes = {
-          dkuk-redmine_alex_skin = pkgs.fetchurl {
-            url = "https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip";
-            sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
-          };
-        };
-      };
-
-    testScript = ''
-      startAll;
-      $machine->waitForUnit('redmine.service');
-      $machine->waitForOpenPort('3000');
-      $machine->succeed("curl --fail http://localhost:3000/");
-    '';
-  };
-in
-{
-  mysql = mysqlTest pkgs.redmine // {
-    name = "mysql";
-    meta.maintainers = [ maintainers.aanderse ];
-  };
-
-  pgsql = pgsqlTest pkgs.redmine // {
-    name = "pgsql";
+  } // {
     meta.maintainers = [ maintainers.aanderse ];
   };
+in {
+  mysql = redmineTest { name = "mysql"; type = "mysql2"; };
+  pgsql = redmineTest { name = "pgsql"; type = "postgresql"; };
 }
diff --git a/nixos/tests/service-runner.nix b/nixos/tests/service-runner.nix
index adb3fcd36d7a..39ae66fe1116 100644
--- a/nixos/tests/service-runner.nix
+++ b/nixos/tests/service-runner.nix
@@ -23,7 +23,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         machine.fail(f"curl {url}")
         machine.succeed(
             """
-            mkdir -p /run/nginx /var/spool/nginx/logs
+            mkdir -p /run/nginx /var/log/nginx /var/cache/nginx
             ${nodes.machine.config.systemd.services.nginx.runner} &
             echo $!>my-nginx.pid
             """
diff --git a/nixos/tests/systemd-boot.nix b/nixos/tests/systemd-boot.nix
new file mode 100644
index 000000000000..e911c3933616
--- /dev/null
+++ b/nixos/tests/systemd-boot.nix
@@ -0,0 +1,31 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+makeTest {
+  name = "systemd-boot";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ danielfullmer ];
+
+  machine = { pkgs, lib, ... }: {
+    virtualisation.useBootLoader = true;
+    virtualisation.useEFIBoot = true;
+    boot.loader.systemd-boot.enable = true;
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("multi-user.target")
+
+    machine.succeed("test -e /boot/loader/entries/nixos-generation-1.conf")
+
+    # Ensure we actually booted using systemd-boot.
+    # Magic number is the vendor UUID used by systemd-boot.
+    machine.succeed(
+        "test -e /sys/firmware/efi/efivars/LoaderEntrySelected-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f"
+    )
+  '';
+}
diff --git a/nixos/tests/systemd-confinement.nix b/nixos/tests/systemd-confinement.nix
index f22836e227b0..ebf6d218fd68 100644
--- a/nixos/tests/systemd-confinement.nix
+++ b/nixos/tests/systemd-confinement.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix {
+import ./make-test-python.nix {
   name = "systemd-confinement";
 
   machine = { pkgs, lib, ... }: let
@@ -17,7 +17,7 @@ import ./make-test.nix {
       exit "''${ret:-1}"
     '';
 
-    mkTestStep = num: { description, config ? {}, testScript }: {
+    mkTestStep = num: { config ? {}, testScript }: {
       systemd.sockets."test${toString num}" = {
         description = "Socket for Test Service ${toString num}";
         wantedBy = [ "sockets.target" ];
@@ -34,52 +34,48 @@ import ./make-test.nix {
         };
       } // removeAttrs config [ "confinement" "serviceConfig" ];
 
-      __testSteps = lib.mkOrder num ''
-        subtest '${lib.escape ["\\" "'"] description}', sub {
-          $machine->succeed('echo ${toString num} > /teststep');
-          ${testScript}
-        };
-      '';
+      __testSteps = lib.mkOrder num (''
+        machine.succeed("echo ${toString num} > /teststep")
+      '' + testScript);
     };
 
   in {
     imports = lib.imap1 mkTestStep [
-      { description = "chroot-only confinement";
-        config.confinement.mode = "chroot-only";
+      { config.confinement.mode = "chroot-only";
         testScript = ''
-          $machine->succeed(
-            'test "$(chroot-exec ls -1 / | paste -sd,)" = bin,nix',
-            'test "$(chroot-exec id -u)" = 0',
-            'chroot-exec chown 65534 /bin',
-          );
+          with subtest("chroot-only confinement"):
+              machine.succeed(
+                  'test "$(chroot-exec ls -1 / | paste -sd,)" = bin,nix',
+                  'test "$(chroot-exec id -u)" = 0',
+                  "chroot-exec chown 65534 /bin",
+              )
         '';
       }
-      { description = "full confinement with APIVFS";
-        testScript = ''
-          $machine->fail(
-            'chroot-exec ls -l /etc',
-            'chroot-exec ls -l /run',
-            'chroot-exec chown 65534 /bin',
-          );
-          $machine->succeed(
-            'test "$(chroot-exec id -u)" = 0',
-            'chroot-exec chown 0 /bin',
-          );
+      { testScript = ''
+          with subtest("full confinement with APIVFS"):
+              machine.fail(
+                  "chroot-exec ls -l /etc",
+                  "chroot-exec ls -l /run",
+                  "chroot-exec chown 65534 /bin",
+              )
+              machine.succeed(
+                  'test "$(chroot-exec id -u)" = 0', "chroot-exec chown 0 /bin",
+              )
         '';
       }
-      { description = "check existence of bind-mounted /etc";
-        config.serviceConfig.BindReadOnlyPaths = [ "/etc" ];
+      { config.serviceConfig.BindReadOnlyPaths = [ "/etc" ];
         testScript = ''
-          $machine->succeed('test -n "$(chroot-exec cat /etc/passwd)"');
+          with subtest("check existence of bind-mounted /etc"):
+              machine.succeed('test -n "$(chroot-exec cat /etc/passwd)"')
         '';
       }
-      { description = "check if User/Group really runs as non-root";
-        config.serviceConfig.User = "chroot-testuser";
+      { config.serviceConfig.User = "chroot-testuser";
         config.serviceConfig.Group = "chroot-testgroup";
         testScript = ''
-          $machine->succeed('chroot-exec ls -l /dev');
-          $machine->succeed('test "$(chroot-exec id -u)" != 0');
-          $machine->fail('chroot-exec touch /bin/test');
+          with subtest("check if User/Group really runs as non-root"):
+              machine.succeed("chroot-exec ls -l /dev")
+              machine.succeed('test "$(chroot-exec id -u)" != 0')
+              machine.fail("chroot-exec touch /bin/test")
         '';
       }
       (let
@@ -87,62 +83,60 @@ import ./make-test.nix {
           target = pkgs.writeText "symlink-target" "got me\n";
         } "ln -s \"$target\" \"$out\"";
       in {
-        description = "check if symlinks are properly bind-mounted";
         config.confinement.packages = lib.singleton symlink;
         testScript = ''
-          $machine->fail('chroot-exec test -e /etc');
-          $machine->succeed('chroot-exec cat ${symlink} >&2');
-          $machine->succeed('test "$(chroot-exec cat ${symlink})" = "got me"');
+          with subtest("check if symlinks are properly bind-mounted"):
+              machine.fail("chroot-exec test -e /etc")
+              machine.succeed(
+                  "chroot-exec cat ${symlink} >&2",
+                  'test "$(chroot-exec cat ${symlink})" = "got me"',
+              )
         '';
       })
-      { description = "check if StateDirectory works";
-        config.serviceConfig.User = "chroot-testuser";
+      { config.serviceConfig.User = "chroot-testuser";
         config.serviceConfig.Group = "chroot-testgroup";
         config.serviceConfig.StateDirectory = "testme";
         testScript = ''
-          $machine->succeed('chroot-exec touch /tmp/canary');
-          $machine->succeed('chroot-exec "echo works > /var/lib/testme/foo"');
-          $machine->succeed('test "$(< /var/lib/testme/foo)" = works');
-          $machine->succeed('test ! -e /tmp/canary');
+          with subtest("check if StateDirectory works"):
+              machine.succeed("chroot-exec touch /tmp/canary")
+              machine.succeed('chroot-exec "echo works > /var/lib/testme/foo"')
+              machine.succeed('test "$(< /var/lib/testme/foo)" = works')
+              machine.succeed("test ! -e /tmp/canary")
         '';
       }
-      { description = "check if /bin/sh works";
-        testScript = ''
-          $machine->succeed(
-            'chroot-exec test -e /bin/sh',
-            'test "$(chroot-exec \'/bin/sh -c "echo bar"\')" = bar',
-          );
+      { testScript = ''
+          with subtest("check if /bin/sh works"):
+              machine.succeed(
+                  "chroot-exec test -e /bin/sh",
+                  'test "$(chroot-exec \'/bin/sh -c "echo bar"\')" = bar',
+              )
         '';
       }
-      { description = "check if suppressing /bin/sh works";
-        config.confinement.binSh = null;
+      { config.confinement.binSh = null;
         testScript = ''
-          $machine->succeed(
-            'chroot-exec test ! -e /bin/sh',
-            'test "$(chroot-exec \'/bin/sh -c "echo foo"\')" != foo',
-          );
+          with subtest("check if suppressing /bin/sh works"):
+              machine.succeed("chroot-exec test ! -e /bin/sh")
+              machine.succeed('test "$(chroot-exec \'/bin/sh -c "echo foo"\')" != foo')
         '';
       }
-      { description = "check if we can set /bin/sh to something different";
-        config.confinement.binSh = "${pkgs.hello}/bin/hello";
+      { config.confinement.binSh = "${pkgs.hello}/bin/hello";
         testScript = ''
-          $machine->succeed(
-            'chroot-exec test -e /bin/sh',
-            'test "$(chroot-exec /bin/sh -g foo)" = foo',
-          );
+          with subtest("check if we can set /bin/sh to something different"):
+              machine.succeed("chroot-exec test -e /bin/sh")
+              machine.succeed('test "$(chroot-exec /bin/sh -g foo)" = foo')
         '';
       }
-      { description = "check if only Exec* dependencies are included";
-        config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
+      { config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
         testScript = ''
-          $machine->succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" != eek');
+          with subtest("check if only Exec* dependencies are included"):
+              machine.succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" != eek')
         '';
       }
-      { description = "check if all unit dependencies are included";
-        config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
+      { config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
         config.confinement.fullUnit = true;
         testScript = ''
-          $machine->succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" = eek');
+          with subtest("check if all unit dependencies are included"):
+              machine.succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" = eek')
         '';
       }
     ];
@@ -162,7 +156,6 @@ import ./make-test.nix {
   };
 
   testScript = { nodes, ... }: ''
-    $machine->waitForUnit('multi-user.target');
-    ${nodes.machine.config.__testSteps}
-  '';
+    machine.wait_for_unit("multi-user.target")
+  '' + nodes.machine.config.__testSteps;
 }
diff --git a/nixos/tests/systemd-networkd-dhcpserver.nix b/nixos/tests/systemd-networkd-dhcpserver.nix
new file mode 100644
index 000000000000..f1a2662f8cb4
--- /dev/null
+++ b/nixos/tests/systemd-networkd-dhcpserver.nix
@@ -0,0 +1,58 @@
+# This test predominantly tests systemd-networkd DHCP server, by
+# setting up a DHCP server and client, and ensuring they are mutually
+# reachable via the DHCP allocated address.
+import ./make-test-python.nix ({pkgs, ...}: {
+  name = "systemd-networkd-dhcpserver";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ tomfitzhenry ];
+  };
+  nodes = {
+    router = { config, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+      systemd.network = {
+        networks = {
+          # systemd-networkd will load the first network unit file
+          # that matches, ordered lexiographically by filename.
+          # /etc/systemd/network/{40-eth1,99-main}.network already
+          # exists. This network unit must be loaded for the test,
+          # however, hence why this network is named such.
+          "01-eth1" = {
+            name = "eth1";
+            networkConfig = {
+              DHCPServer = true;
+              Address = "10.0.0.1/24";
+            };
+            dhcpServerConfig = {
+              PoolOffset = 100;
+              PoolSize = 1;
+            };
+          };
+        };
+      };
+    };
+
+    client = { config, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.useDHCP = true;
+      };
+    };
+  };
+  testScript = { ... }: ''
+    start_all()
+    router.wait_for_unit("systemd-networkd-wait-online.service")
+    client.wait_for_unit("systemd-networkd-wait-online.service")
+    client.wait_until_succeeds("ping -c 5 10.0.0.1")
+    router.wait_until_succeeds("ping -c 5 10.0.0.100")
+  '';
+})
diff --git a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
new file mode 100644
index 000000000000..99cd341eec15
--- /dev/null
+++ b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
@@ -0,0 +1,295 @@
+# This test verifies that we can request and assign IPv6 prefixes from upstream
+# (e.g. ISP) routers.
+# The setup consits of three VMs. One for the ISP, as your residential router
+# and the third as a client machine in the residential network.
+#
+# There are two VLANs in this test:
+# - VLAN 1 is the connection between the ISP and the router
+# - VLAN 2 is the connection between the router and the client
+
+import ./make-test-python.nix ({pkgs, ...}: {
+  name = "systemd-networkd-ipv6-prefix-delegation";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ andir ];
+  };
+  nodes = {
+
+    # The ISP's routers job is to delegate IPv6 prefixes via DHCPv6. Like with
+    # regular IPv6 auto-configuration it will also emit IPv6 router
+    # advertisements (RAs). Those RA's will not carry a prefix but in contrast
+    # just set the "Other" flag to indicate to the receiving nodes that they
+    # should attempt DHCPv6.
+    #
+    # Note: On the ISPs device we don't really care if we are using networkd in
+    # this example. That being said we can't use it (yet) as networkd doesn't
+    # implement the serving side of DHCPv6. We will use ISC's well aged dhcpd6
+    # for that task.
+    isp = { lib, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      networking = {
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.ipv4.addresses = lib.mkForce []; # no need for legacy IP
+        interfaces.eth1.ipv6.addresses = lib.mkForce [
+          { address = "2001:DB8::"; prefixLength = 64; }
+        ];
+      };
+
+      # Since we want to program the routes that we delegate to the "customer"
+      # into our routing table we must have a way to gain the required privs.
+      # This security wrapper will do in our test setup.
+      #
+      # DO NOT COPY THIS TO PRODUCTION AS IS. Think about it at least twice.
+      # Everyone on the "isp" machine will be able to add routes to the kernel.
+      security.wrappers.add-dhcpd-lease = {
+        source = pkgs.writeShellScript "add-dhcpd-lease" ''
+          exec ${pkgs.iproute}/bin/ip -6 route replace "$1" via "$2"
+        '';
+        capabilities = "cap_net_admin+ep";
+      };
+      services = {
+        # Configure the DHCPv6 server
+        #
+        # We will hand out /48 prefixes from the subnet 2001:DB8:F000::/36.
+        # That gives us ~8k prefixes. That should be enough for this test.
+        #
+        # Since (usually) you will not receive a prefix with the router
+        # advertisements we also hand out /128 leases from the range
+        # 2001:DB8:0000:0000:FFFF::/112.
+        dhcpd6 = {
+          enable = true;
+          interfaces = [ "eth1" ];
+          extraConfig = ''
+            subnet6 2001:DB8::/36 {
+              range6 2001:DB8:0000:0000:FFFF:: 2001:DB8:0000:0000:FFFF::FFFF;
+              prefix6 2001:DB8:F000:: 2001:DB8:FFFF:: /48;
+            }
+
+            # This is the secret sauce. We have to extract the prefix and the
+            # next hop when commiting the lease to the database.  dhcpd6
+            # (rightfully) has not concept of adding routes to the systems
+            # routing table. It really depends on the setup.
+            #
+            # In a production environment your DHCPv6 server is likely not the
+            # router. You might want to consider BGP, custom NetConf calls, …
+            # in those cases.
+            on commit {
+              set IP = pick-first-value(binary-to-ascii(16, 16, ":", substring(option dhcp6.ia-na, 16, 16)), "n/a");
+              set Prefix = pick-first-value(binary-to-ascii(16, 16, ":", suffix(option dhcp6.ia-pd, 16)), "n/a");
+              set PrefixLength = pick-first-value(binary-to-ascii(10, 8, ":", substring(suffix(option dhcp6.ia-pd, 17), 0, 1)), "n/a");
+              log(concat(IP, " ", Prefix, " ", PrefixLength));
+              execute("/run/wrappers/bin/add-dhcpd-lease", concat(Prefix,"/",PrefixLength), IP);
+            }
+          '';
+        };
+
+        # Finally we have to set up the router advertisements. While we could be
+        # using networkd or bird for this task `radvd` is probably the most
+        # venerable of them all. It was made explicitly for this purpose and
+        # the configuration is much more straightforward than what networkd
+        # requires.
+        # As outlined above we will have to set the `Managed` flag as otherwise
+        # the clients will not know if they should do DHCPv6. (Some do
+        # anyway/always)
+        radvd = {
+          enable = true;
+          config = ''
+            interface eth1 {
+              AdvSendAdvert on;
+              AdvManagedFlag on;
+              AdvOtherConfigFlag off; # we don't really have DNS or NTP or anything like that to distribute
+              prefix ::/64 {
+                AdvOnLink on;
+                AdvAutonomous on;
+              };
+            };
+          '';
+        };
+
+      };
+    };
+
+    # This will be our (residential) router that receives the IPv6 prefix (IA_PD)
+    # and /128 (IA_NA) allocation.
+    #
+    # Here we will actually start using networkd.
+    router = {
+      virtualisation.vlans = [ 1 2 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+
+      boot.kernel.sysctl = {
+        # we want to forward packets from the ISP to the client and back.
+        "net.ipv6.conf.all.forwarding" = 1;
+      };
+
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        # Consider enabling this in production and generating firewall rules
+        # for fowarding/input from the configured interfaces so you do not have
+        # to manage multiple places
+        firewall.enable = false;
+      };
+
+      systemd.network = {
+        networks = {
+          # systemd-networkd will load the first network unit file
+          # that matches, ordered lexiographically by filename.
+          # /etc/systemd/network/{40-eth1,99-main}.network already
+          # exists. This network unit must be loaded for the test,
+          # however, hence why this network is named such.
+
+          # Configuration of the interface to the ISP.
+          # We must request accept RAs and request the PD prefix.
+          "01-eth1" = {
+            name = "eth1";
+            networkConfig = {
+              Description = "ISP interface";
+              IPv6AcceptRA = true;
+              #DHCP = false; # no need for legacy IP
+            };
+            linkConfig = {
+              # We care about this interface when talking about being "online".
+              # If this interface is in the `routable` state we can reach
+              # others and they should be able to reach us.
+              RequiredForOnline = "routable";
+            };
+            # This configures the DHCPv6 client part towards the ISPs DHCPv6 server.
+            dhcpV6Config = {
+              # We have to include a request for a prefix in our DHCPv6 client
+              # request packets.
+              # Otherwise the upstream DHCPv6 server wouldn't know if we want a
+              # prefix or not.  Note: On some installation it makes sense to
+              # always force that option on the DHPCv6 server since there are
+              # certain CPEs that are just not setting this field but happily
+              # accept the delegated prefix.
+              PrefixDelegationHint  = "::/48";
+            };
+            ipv6PrefixDelegationConfig = {
+              # Let networkd know that we would very much like to use DHCPv6
+              # to obtain the "managed" information. Not sure why they can't
+              # just take that from the upstream RAs.
+              Managed = true;
+            };
+          };
+
+          # Interface to the client. Here we should redistribute a /64 from
+          # the prefix we received from the ISP.
+          "01-eth2" = {
+            name = "eth2";
+            networkConfig = {
+              Description = "Client interface";
+              # the client shouldn't be allowed to send us RAs, that would be weird.
+              IPv6AcceptRA = false;
+
+              # Just delegate prefixes from the DHCPv6 PD pool.
+              # If you also want to distribute a local ULA prefix you want to
+              # set this to `yes` as that includes both static prefixes as well
+              # as PD prefixes.
+              IPv6PrefixDelegation = "dhcpv6";
+            };
+            # finally "act as router" (according to systemd.network(5))
+            ipv6PrefixDelegationConfig = {
+              RouterLifetimeSec = 300; # required as otherwise no RA's are being emitted
+
+              # In a production environment you should consider setting these as well:
+              #EmitDNS = true;
+              #EmitDomains = true;
+              #DNS= = "fe80::1"; # or whatever "well known" IP your router will have on the inside.
+            };
+
+            # This adds a "random" ULA prefix to the interface that is being
+            # advertised to the clients.
+            # Not used in this test.
+            # ipv6Prefixes = [
+            #   {
+            #     ipv6PrefixConfig = {
+            #       AddressAutoconfiguration = true;
+            #       PreferredLifetimeSec = 1800;
+            #       ValidLifetimeSec = 1800;
+            #     };
+            #   }
+            # ];
+          };
+
+          # finally we are going to add a static IPv6 unique local address to
+          # the "lo" interface.  This will serve as ICMPv6 echo target to
+          # verify connectivity from the client to the router.
+          "01-lo" = {
+            name = "lo";
+            addresses = [
+              { addressConfig.Address = "FD42::1/128"; }
+            ];
+          };
+        };
+      };
+
+      # make the network-online target a requirement, we wait for it in our test script
+      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
+    };
+
+    # This is the client behind the router. We should be receving router
+    # advertisements for both the ULA and the delegated prefix.
+    # All we have to do is boot with the default (networkd) configuration.
+    client = {
+      virtualisation.vlans = [ 2 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+      };
+
+      # make the network-online target a requirement, we wait for it in our test script
+      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
+    };
+  };
+
+  testScript = ''
+    # First start the router and wait for it it reach a state where we are
+    # certain networkd is up and it is able to send out RAs
+    router.start()
+    router.wait_for_unit("systemd-networkd.service")
+
+    # After that we can boot the client and wait for the network online target.
+    # Since we only care about IPv6 that should not involve waiting for legacy
+    # IP leases.
+    client.start()
+    client.wait_for_unit("network-online.target")
+
+    # the static address on the router should not be reachable
+    client.wait_until_succeeds("ping -6 -c 1 FD42::1")
+
+    # the global IP of the ISP router should still not be a reachable
+    router.fail("ping -6 -c 1 2001:DB8::")
+
+    # Once we have internal connectivity boot up the ISP
+    isp.start()
+
+    # Since for the ISP "being online" should have no real meaning we just
+    # wait for the target where all the units have been started.
+    # It probably still takes a few more seconds for all the RA timers to be
+    # fired etc..
+    isp.wait_for_unit("multi-user.target")
+
+    # wait until the uplink interface has a good status
+    router.wait_for_unit("network-online.target")
+    router.wait_until_succeeds("ping -6 -c1 2001:DB8::")
+
+    # shortly after that the client should have received it's global IPv6
+    # address and thus be able to ping the ISP
+    client.wait_until_succeeds("ping -6 -c1 2001:DB8::")
+
+    # verify that we got a globally scoped address in eth1 from the
+    # documentation prefix
+    ip_output = client.succeed("ip --json -6 address show dev eth1")
+
+    import json
+
+    ip_json = json.loads(ip_output)[0]
+    assert any(
+        addr["local"].upper().startswith("2001:DB8:")
+        for addr in ip_json["addr_info"]
+        if addr["scope"] == "global"
+    )
+  '';
+})
diff --git a/nixos/tests/udisks2.nix b/nixos/tests/udisks2.nix
index 64f5b6c40d20..50a023968918 100644
--- a/nixos/tests/udisks2.nix
+++ b/nixos/tests/udisks2.nix
@@ -3,7 +3,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
 let
 
   stick = pkgs.fetchurl {
-    url = "http://nixos.org/~eelco/nix/udisks-test.img.xz";
+    url = "https://nixos.org/~eelco/nix/udisks-test.img.xz";
     sha256 = "0was1xgjkjad91nipzclaz5biv3m4b2nk029ga6nk7iklwi19l8b";
   };
 
diff --git a/nixos/tests/web-servers/unit-php.nix b/nixos/tests/web-servers/unit-php.nix
new file mode 100644
index 000000000000..c6327a1f825d
--- /dev/null
+++ b/nixos/tests/web-servers/unit-php.nix
@@ -0,0 +1,47 @@
+import ../make-test-python.nix ({pkgs, ...}:
+ let
+    testdir = pkgs.writeTextDir "www/info.php" "<?php phpinfo();";
+
+in {
+  name = "unit-php-test";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ izorkin ];
+
+  machine = { config, lib, pkgs, ... }: {
+    services.unit = {
+      enable = true;
+      config = ''
+        {
+          "listeners": {
+            "*:9074": {
+              "application": "php_74"
+            }
+          },
+          "applications": {
+            "php_74": {
+              "type": "php 7.4",
+              "processes": 1,
+              "user": "testuser",
+              "group": "testgroup",
+              "root": "${testdir}/www",
+              "index": "info.php"
+            }
+          }
+        }
+      '';
+    };
+    users = {
+      users.testuser = {
+        isNormalUser = false;
+        uid = 1074;
+        group = "testgroup";
+      };
+      groups.testgroup = {
+        gid= 1074;
+      };
+    };
+  };
+  testScript = ''
+    machine.wait_for_unit("unit.service")
+    assert "PHP Version ${pkgs.php74.version}" in machine.succeed("curl -vvv -s http://127.0.0.1:9074/")
+  '';
+})
diff --git a/nixos/tests/xmpp/prosody-mysql.nix b/nixos/tests/xmpp/prosody-mysql.nix
index 0507227021b2..9a00bcabf389 100644
--- a/nixos/tests/xmpp/prosody-mysql.nix
+++ b/nixos/tests/xmpp/prosody-mysql.nix
@@ -6,6 +6,11 @@ import ../make-test-python.nix {
       environment.systemPackages = [
         (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
       ];
+      networking.extraHosts = ''
+        ${nodes.server.config.networking.primaryIPAddress} example.com
+        ${nodes.server.config.networking.primaryIPAddress} conference.example.com
+        ${nodes.server.config.networking.primaryIPAddress} uploads.example.com
+      '';
     };
     server = { config, pkgs, ... }: {
       nixpkgs.overlays = [
@@ -18,6 +23,8 @@ import ../make-test-python.nix {
       ];
       networking.extraHosts = ''
         ${config.networking.primaryIPAddress} example.com
+        ${config.networking.primaryIPAddress} conference.example.com
+        ${config.networking.primaryIPAddress} uploads.example.com
       '';
       networking.firewall.enable = false;
       services.prosody = {
@@ -39,6 +46,14 @@ import ../make-test-python.nix {
           domain = "example.com";
           enabled = true;
         };
+        muc = [
+          {
+            domain = "conference.example.com";
+          }
+        ];
+        uploadHttp = {
+          domain = "uploads.example.com";
+        };
       };
     };
     mysql = { config, pkgs, ... }: {
diff --git a/nixos/tests/xmpp/prosody.nix b/nixos/tests/xmpp/prosody.nix
index 9d1374bff6bd..e7755e24bab4 100644
--- a/nixos/tests/xmpp/prosody.nix
+++ b/nixos/tests/xmpp/prosody.nix
@@ -1,27 +1,80 @@
-import ../make-test-python.nix {
-  name = "prosody";
+let
+  cert = pkgs: pkgs.runCommandNoCC "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=example.com/CN=uploads.example.com/CN=conference.example.com'
+    mkdir -p $out
+    cp key.pem cert.pem $out
+  '';
+  createUsers = pkgs: pkgs.writeScriptBin "create-prosody-users" ''
+    #!${pkgs.bash}/bin/bash
+    set -e
+
+    # Creates and set password for the 2 xmpp test users.
+    #
+    # Doing that in a bash script instead of doing that in the test
+    # script allow us to easily provision the users when running that
+    # test interactively.
+
+    prosodyctl register cthon98 example.com nothunter2
+    prosodyctl register azurediamond example.com hunter2
+  '';
+  delUsers = pkgs: pkgs.writeScriptBin "delete-prosody-users" ''
+    #!${pkgs.bash}/bin/bash
+    set -e
+
+    # Deletes the test users.
+    #
+    # Doing that in a bash script instead of doing that in the test
+    # script allow us to easily provision the users when running that
+    # test interactively.
 
+    prosodyctl deluser cthon98@example.com
+    prosodyctl deluser azurediamond@example.com
+  '';
+in import ../make-test-python.nix {
+  name = "prosody";
   nodes = {
-    client = { nodes, pkgs, ... }: {
+    client = { nodes, pkgs, config, ... }: {
+      security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      console.keyMap = "fr-bepo";
+      networking.extraHosts = ''
+        ${nodes.server.config.networking.primaryIPAddress} example.com
+        ${nodes.server.config.networking.primaryIPAddress} conference.example.com
+        ${nodes.server.config.networking.primaryIPAddress} uploads.example.com
+      '';
       environment.systemPackages = [
         (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
       ];
     };
     server = { config, pkgs, ... }: {
+      security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      console.keyMap = "fr-bepo";
       networking.extraHosts = ''
         ${config.networking.primaryIPAddress} example.com
+        ${config.networking.primaryIPAddress} conference.example.com
+        ${config.networking.primaryIPAddress} uploads.example.com
       '';
       networking.firewall.enable = false;
+      environment.systemPackages = [
+        (createUsers pkgs)
+        (delUsers pkgs)
+      ];
       services.prosody = {
         enable = true;
-        # TODO: use a self-signed certificate
-        c2sRequireEncryption = false;
-        extraConfig = ''
-          storage = "sql"
-        '';
-        virtualHosts.test = {
+        ssl.cert = "${cert pkgs}/cert.pem";
+        ssl.key = "${cert pkgs}/key.pem";
+        virtualHosts.example = {
           domain = "example.com";
           enabled = true;
+          ssl.cert = "${cert pkgs}/cert.pem";
+          ssl.key = "${cert pkgs}/key.pem";
+        };
+        muc = [
+          {
+            domain = "conference.example.com";
+          }
+        ];
+        uploadHttp = {
+          domain = "uploads.example.com";
         };
       };
     };
@@ -31,16 +84,8 @@ import ../make-test-python.nix {
     server.wait_for_unit("prosody.service")
     server.succeed('prosodyctl status | grep "Prosody is running"')
 
-    # set password to 'nothunter2' (it's asked twice)
-    server.succeed("yes nothunter2 | prosodyctl adduser cthon98@example.com")
-    # set password to 'y'
-    server.succeed("yes | prosodyctl adduser azurediamond@example.com")
-    # correct password to "hunter2"
-    server.succeed("yes hunter2 | prosodyctl passwd azurediamond@example.com")
-
-    client.succeed("send-message")
-
-    server.succeed("prosodyctl deluser cthon98@example.com")
-    server.succeed("prosodyctl deluser azurediamond@example.com")
+    server.succeed("create-prosody-users")
+    client.succeed('send-message 2>&1 | grep "XMPP SCRIPT TEST SUCCESS"')
+    server.succeed("delete-prosody-users")
   '';
 }
diff --git a/nixos/tests/xmpp/xmpp-sendmessage.nix b/nixos/tests/xmpp/xmpp-sendmessage.nix
index 2a075a018134..349b9c6f38e4 100644
--- a/nixos/tests/xmpp/xmpp-sendmessage.nix
+++ b/nixos/tests/xmpp/xmpp-sendmessage.nix
@@ -1,46 +1,61 @@
-{ writeScriptBin, python3, connectTo ? "localhost" }:
-writeScriptBin "send-message" ''
-  #!${(python3.withPackages (ps: [ ps.sleekxmpp ])).interpreter}
-  # Based on the sleekxmpp send_client example, look there for more details:
-  # https://github.com/fritzy/SleekXMPP/blob/develop/examples/send_client.py
-  import sleekxmpp
-
-  class SendMsgBot(sleekxmpp.ClientXMPP):
-      """
-      A basic SleekXMPP bot that will log in, send a message,
-      and then log out.
-      """
-      def __init__(self, jid, password, recipient, message):
-          sleekxmpp.ClientXMPP.__init__(self, jid, password)
-
-          self.recipient = recipient
-          self.msg = message
-
-          self.add_event_handler("session_start", self.start, threaded=True)
-
-      def start(self, event):
-          self.send_presence()
-          self.get_roster()
-
-          self.send_message(mto=self.recipient,
-                            mbody=self.msg,
-                            mtype='chat')
-
-          self.disconnect(wait=True)
-
-
-  if __name__ == '__main__':
-      xmpp = SendMsgBot("cthon98@example.com", "nothunter2", "azurediamond@example.com", "hey, if you type in your pw, it will show as stars")
-      xmpp.register_plugin('xep_0030') # Service Discovery
-      xmpp.register_plugin('xep_0199') # XMPP Ping
-
-      # TODO: verify certificate
-      # If you want to verify the SSL certificates offered by a server:
-      # xmpp.ca_certs = "path/to/ca/cert"
-
-      if xmpp.connect(('${connectTo}', 5222)):
-          xmpp.process(block=True)
-      else:
-          print("Unable to connect.")
-          sys.exit(1)
+{ writeScriptBin, writeText, python3, connectTo ? "localhost" }:
+let
+  dummyFile = writeText "dummy-file" ''
+    Dear dog,
+
+    Please find this *really* important attachment.
+
+    Yours truly,
+    John
+  '';
+in writeScriptBin "send-message" ''
+#!${(python3.withPackages (ps: [ ps.slixmpp ])).interpreter}
+import logging
+import sys
+from types import MethodType
+
+from slixmpp import ClientXMPP
+from slixmpp.exceptions import IqError, IqTimeout
+
+
+class CthonTest(ClientXMPP):
+
+    def __init__(self, jid, password):
+        ClientXMPP.__init__(self, jid, password)
+        self.add_event_handler("session_start", self.session_start)
+
+    async def session_start(self, event):
+        log = logging.getLogger(__name__)
+        self.send_presence()
+        self.get_roster()
+        # Sending a test message
+        self.send_message(mto="azurediamond@example.com", mbody="Hello, this is dog.", mtype="chat")
+        log.info('Message sent')
+
+        # Test http upload (XEP_0363)
+        def timeout_callback(arg):
+            log.error("ERROR: Cannot upload file. XEP_0363 seems broken")
+            sys.exit(1)
+        url = await self['xep_0363'].upload_file("${dummyFile}",timeout=10, timeout_callback=timeout_callback)
+        log.info('Upload success!')
+        # Test MUC
+        self.plugin['xep_0045'].join_muc('testMucRoom', 'cthon98', wait=True)
+        log.info('MUC join success!')
+        log.info('XMPP SCRIPT TEST SUCCESS')
+        self.disconnect(wait=True)
+
+
+if __name__ == '__main__':
+    logging.basicConfig(level=logging.DEBUG,
+                        format='%(levelname)-8s %(message)s')
+
+    ct = CthonTest('cthon98@example.com', 'nothunter2')
+    ct.register_plugin('xep_0071')
+    ct.register_plugin('xep_0128')
+    # HTTP Upload
+    ct.register_plugin('xep_0363')
+    # MUC
+    ct.register_plugin('xep_0045')
+    ct.connect(("server", 5222))
+    ct.process(forever=False)
 ''