about summary refs log tree commit diff
path: root/nixpkgs/nixos/tests
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2021-04-28 14:39:00 +0000
committerAlyssa Ross <hi@alyssa.is>2021-06-10 08:52:36 +0000
commit693e64ef7421374338ddb1dc12b9573feec75972 (patch)
tree2526ac075d248699c35d63e04499890ee4381f5f /nixpkgs/nixos/tests
parent7014df2256694d97093d6f2bb1db340d346dea88 (diff)
parent8e4fe32876ca15e3d5eb3ecd3ca0b224417f5f17 (diff)
downloadnixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.gz
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.bz2
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.lz
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.xz
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.zst
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.zip
Merge commit '8e4fe32876ca15e3d5eb3ecd3ca0b224417f5f17'
Diffstat (limited to 'nixpkgs/nixos/tests')
-rw-r--r--nixpkgs/nixos/tests/all-tests.nix7
-rw-r--r--nixpkgs/nixos/tests/babeld.nix6
-rw-r--r--nixpkgs/nixos/tests/containers-nested.nix30
-rw-r--r--nixpkgs/nixos/tests/docker-tools.nix43
-rw-r--r--nixpkgs/nixos/tests/doh-proxy-rust.nix43
-rw-r--r--nixpkgs/nixos/tests/dovecot.nix2
-rw-r--r--nixpkgs/nixos/tests/gitea.nix2
-rw-r--r--nixpkgs/nixos/tests/gobgpd.nix71
-rw-r--r--nixpkgs/nixos/tests/hibernate.nix124
-rw-r--r--nixpkgs/nixos/tests/home-assistant.nix10
-rw-r--r--nixpkgs/nixos/tests/iscsi-root.nix161
-rw-r--r--nixpkgs/nixos/tests/kernel-generic.nix4
-rw-r--r--nixpkgs/nixos/tests/mxisd.nix17
-rw-r--r--nixpkgs/nixos/tests/mysql/mariadb-galera-mariabackup.nix6
-rw-r--r--nixpkgs/nixos/tests/mysql/mariadb-galera-rsync.nix6
-rw-r--r--nixpkgs/nixos/tests/mysql/mysql.nix12
-rw-r--r--nixpkgs/nixos/tests/nebula.nix223
-rw-r--r--nixpkgs/nixos/tests/nextcloud/basic.nix2
-rw-r--r--nixpkgs/nixos/tests/packagekit.nix1
-rw-r--r--nixpkgs/nixos/tests/podgrab.nix34
-rw-r--r--nixpkgs/nixos/tests/prometheus-exporters.nix6
-rw-r--r--nixpkgs/nixos/tests/quagga.nix96
-rw-r--r--nixpkgs/nixos/tests/redis.nix5
-rw-r--r--nixpkgs/nixos/tests/rspamd.nix5
-rw-r--r--nixpkgs/nixos/tests/searx.nix2
-rw-r--r--nixpkgs/nixos/tests/shadow.nix3
-rw-r--r--nixpkgs/nixos/tests/spacecookie.nix33
-rw-r--r--nixpkgs/nixos/tests/systemd-confinement.nix1
-rw-r--r--nixpkgs/nixos/tests/unbound.nix11
-rw-r--r--nixpkgs/nixos/tests/wmderland.nix54
30 files changed, 833 insertions, 187 deletions
diff --git a/nixpkgs/nixos/tests/all-tests.nix b/nixpkgs/nixos/tests/all-tests.nix
index 58b2ba7fa514..3aefa82301c0 100644
--- a/nixpkgs/nixos/tests/all-tests.nix
+++ b/nixpkgs/nixos/tests/all-tests.nix
@@ -75,6 +75,7 @@ in
   containers-ip = handleTest ./containers-ip.nix {};
   containers-macvlans = handleTest ./containers-macvlans.nix {};
   containers-names = handleTest ./containers-names.nix {};
+  containers-nested = handleTest ./containers-nested.nix {};
   containers-physical_interfaces = handleTest ./containers-physical_interfaces.nix {};
   containers-portforward = handleTest ./containers-portforward.nix {};
   containers-reloadable = handleTest ./containers-reloadable.nix {};
@@ -138,6 +139,7 @@ in
   gnome3 = handleTest ./gnome3.nix {};
   gnome3-xorg = handleTest ./gnome3-xorg.nix {};
   go-neb = handleTest ./go-neb.nix {};
+  gobgpd = handleTest ./gobgpd.nix {};
   gocd-agent = handleTest ./gocd-agent.nix {};
   gocd-server = handleTest ./gocd-server.nix {};
   google-oslogin = handleTest ./google-oslogin {};
@@ -184,6 +186,7 @@ in
   iodine = handleTest ./iodine.nix {};
   ipfs = handleTest ./ipfs.nix {};
   ipv6 = handleTest ./ipv6.nix {};
+  iscsi-root = handleTest ./iscsi-root.nix {};
   jackett = handleTest ./jackett.nix {};
   jellyfin = handleTest ./jellyfin.nix {};
   jenkins = handleTest ./jenkins.nix {};
@@ -261,6 +264,7 @@ in
   nat.standalone = handleTest ./nat.nix { withFirewall = false; };
   ncdns = handleTest ./ncdns.nix {};
   ndppd = handleTest ./ndppd.nix {};
+  nebula = handleTest ./nebula.nix {};
   neo4j = handleTest ./neo4j.nix {};
   netdata = handleTest ./netdata.nix {};
   networking.networkd = handleTest ./networking.nix { networkd = true; };
@@ -320,6 +324,7 @@ in
   pleroma = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./pleroma.nix {};
   plikd = handleTest ./plikd.nix {};
   plotinus = handleTest ./plotinus.nix {};
+  podgrab = handleTest ./podgrab.nix {};
   podman = handleTestOn ["x86_64-linux"] ./podman.nix {};
   pomerium = handleTestOn ["x86_64-linux"] ./pomerium.nix {};
   postfix = handleTest ./postfix.nix {};
@@ -340,7 +345,6 @@ in
   proxy = handleTest ./proxy.nix {};
   pt2-clone = handleTest ./pt2-clone.nix {};
   qboot = handleTestOn ["x86_64-linux" "i686-linux"] ./qboot.nix {};
-  quagga = handleTest ./quagga.nix {};
   quorum = handleTest ./quorum.nix {};
   rabbitmq = handleTest ./rabbitmq.nix {};
   radarr = handleTest ./radarr.nix {};
@@ -432,6 +436,7 @@ in
   wasabibackend = handleTest ./wasabibackend.nix {};
   wiki-js = handleTest ./wiki-js.nix {};
   wireguard = handleTest ./wireguard {};
+  wmderland = handleTest ./wmderland.nix {};
   wordpress = handleTest ./wordpress.nix {};
   xandikos = handleTest ./xandikos.nix {};
   xautolock = handleTest ./xautolock.nix {};
diff --git a/nixpkgs/nixos/tests/babeld.nix b/nixpkgs/nixos/tests/babeld.nix
index 5817ea4ce142..d4df6f86d089 100644
--- a/nixpkgs/nixos/tests/babeld.nix
+++ b/nixpkgs/nixos/tests/babeld.nix
@@ -25,9 +25,6 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
       {
         virtualisation.vlans = [ 10 20 ];
 
-        boot.kernel.sysctl."net.ipv4.conf.all.forwarding" = 1;
-        boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
-
         networking = {
           useDHCP = false;
           firewall.enable = false;
@@ -74,9 +71,6 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
       {
         virtualisation.vlans = [ 20 30 ];
 
-        boot.kernel.sysctl."net.ipv4.conf.all.forwarding" = 1;
-        boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
-
         networking = {
           useDHCP = false;
           firewall.enable = false;
diff --git a/nixpkgs/nixos/tests/containers-nested.nix b/nixpkgs/nixos/tests/containers-nested.nix
new file mode 100644
index 000000000000..a653361494f9
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-nested.nix
@@ -0,0 +1,30 @@
+# Test for NixOS' container nesting.
+
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nested";
+
+  meta = with pkgs.lib.maintainers; { maintainers = [ sorki ]; };
+
+  machine = { lib, ... }:
+    let
+      makeNested = subConf: {
+        containers.nested = {
+          autoStart = true;
+          privateNetwork = true;
+          config = subConf;
+        };
+      };
+    in makeNested (makeNested { });
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("container@nested.service")
+    machine.succeed("systemd-run --pty --machine=nested -- machinectl list | grep nested")
+    print(
+        machine.succeed(
+            "systemd-run --pty --machine=nested -- systemd-run --pty --machine=nested -- systemctl status"
+        )
+    )
+  '';
+})
+
diff --git a/nixpkgs/nixos/tests/docker-tools.nix b/nixpkgs/nixos/tests/docker-tools.nix
index 80d527b453fa..96662b4540cc 100644
--- a/nixpkgs/nixos/tests/docker-tools.nix
+++ b/nixpkgs/nixos/tests/docker-tools.nix
@@ -321,5 +321,48 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         docker.succeed(
             "docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/jane | grep -E ^1000$'"
         )
+
+    with subtest("Ensure docker load on merged images loads all of the constituent images"):
+        docker.succeed(
+            "docker load --input='${examples.mergedBashAndRedis}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.bash.imageName}-${examples.bash.imageTag}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.redis.imageName}-${examples.redis.imageTag}'"
+        )
+        docker.succeed("docker run --rm ${examples.bash.imageName} bash --version")
+        docker.succeed("docker run --rm ${examples.redis.imageName} redis-cli --version")
+        docker.succeed("docker rmi ${examples.bash.imageName}")
+        docker.succeed("docker rmi ${examples.redis.imageName}")
+
+    with subtest(
+        "Ensure docker load on merged images loads all of the constituent images (missing tags)"
+    ):
+        docker.succeed(
+            "docker load --input='${examples.mergedBashNoTagAndRedis}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.bashNoTag.imageName}-${examples.bashNoTag.imageTag}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.redis.imageName}-${examples.redis.imageTag}'"
+        )
+        # we need to explicitly specify the generated tag here
+        docker.succeed(
+            "docker run --rm ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag} bash --version"
+        )
+        docker.succeed("docker run --rm ${examples.redis.imageName} redis-cli --version")
+        docker.succeed("docker rmi ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag}")
+        docker.succeed("docker rmi ${examples.redis.imageName}")
+
+    with subtest("mergeImages preserves owners of the original images"):
+        docker.succeed(
+            "docker load --input='${examples.mergedBashFakeRoot}'"
+        )
+        docker.succeed(
+            "docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/jane | grep -E ^1000$'"
+        )
   '';
 })
diff --git a/nixpkgs/nixos/tests/doh-proxy-rust.nix b/nixpkgs/nixos/tests/doh-proxy-rust.nix
new file mode 100644
index 000000000000..ca150cafab50
--- /dev/null
+++ b/nixpkgs/nixos/tests/doh-proxy-rust.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "doh-proxy-rust";
+  meta = with lib.maintainers; {
+    maintainers = [ stephank ];
+  };
+
+  nodes = {
+    machine = { pkgs, lib, ... }: {
+      services.bind = {
+        enable = true;
+        extraOptions = "empty-zones-enable no;";
+        zones = lib.singleton {
+          name = ".";
+          master = true;
+          file = pkgs.writeText "root.zone" ''
+            $TTL 3600
+            . IN SOA ns.example.org. admin.example.org. ( 1 3h 1h 1w 1d )
+            . IN NS ns.example.org.
+            ns.example.org. IN A    192.168.0.1
+          '';
+        };
+      };
+      services.doh-proxy-rust = {
+        enable = true;
+        flags = [
+          "--server-address=127.0.0.1:53"
+        ];
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    url = "http://localhost:3000/dns-query"
+    query = "AAABAAABAAAAAAAAAm5zB2V4YW1wbGUDb3JnAAABAAE="  # IN A ns.example.org.
+    bin_ip = r"$'\xC0\xA8\x00\x01'"  # 192.168.0.1, as shell binary string
+
+    machine.wait_for_unit("bind.service")
+    machine.wait_for_unit("doh-proxy-rust.service")
+    machine.wait_for_open_port(53)
+    machine.wait_for_open_port(3000)
+    machine.succeed(f"curl --fail '{url}?dns={query}' | grep -qF {bin_ip}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dovecot.nix b/nixpkgs/nixos/tests/dovecot.nix
index 1129e3b45d9d..8913c2a6a7e8 100644
--- a/nixpkgs/nixos/tests/dovecot.nix
+++ b/nixpkgs/nixos/tests/dovecot.nix
@@ -8,6 +8,8 @@ import ./make-test-python.nix {
       enable = true;
       protocols = [ "imap" "pop3" ];
       modules = [ pkgs.dovecot_pigeonhole ];
+      mailUser = "vmail";
+      mailGroup = "vmail";
     };
     environment.systemPackages = let
       sendTestMail = pkgs.writeScriptBin "send-testmail" ''
diff --git a/nixpkgs/nixos/tests/gitea.nix b/nixpkgs/nixos/tests/gitea.nix
index 1fb27593f056..037fc7b31bfa 100644
--- a/nixpkgs/nixos/tests/gitea.nix
+++ b/nixpkgs/nixos/tests/gitea.nix
@@ -61,7 +61,7 @@ let
           + "Please contact your site administrator.'"
       )
       server.succeed(
-          "su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin create-user "
+          "su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
           + "--username test --password totallysafe --email test@localhost'"
       )
 
diff --git a/nixpkgs/nixos/tests/gobgpd.nix b/nixpkgs/nixos/tests/gobgpd.nix
new file mode 100644
index 000000000000..775f65d1199f
--- /dev/null
+++ b/nixpkgs/nixos/tests/gobgpd.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    ifAddr = node: iface: (pkgs.lib.head node.config.networking.interfaces.${iface}.ipv4.addresses).address;
+  in {
+    name = "gobgpd";
+
+    meta = with pkgs.lib.maintainers; { maintainers = [ higebu ]; };
+
+    nodes = {
+      node1 = { nodes, ... }: {
+        environment.systemPackages = [ pkgs.gobgp ];
+        networking.firewall.allowedTCPPorts = [ 179 ];
+        services.gobgpd = {
+          enable = true;
+          settings = {
+            global = {
+              config = {
+                as = 64512;
+                router-id = "192.168.255.1";
+              };
+            };
+            neighbors = [{
+              config = {
+                neighbor-address = ifAddr nodes.node2 "eth1";
+                peer-as = 64513;
+              };
+            }];
+          };
+        };
+      };
+      node2 = { nodes, ... }: {
+        environment.systemPackages = [ pkgs.gobgp ];
+        networking.firewall.allowedTCPPorts = [ 179 ];
+        services.gobgpd = {
+          enable = true;
+          settings = {
+            global = {
+              config = {
+                as = 64513;
+                router-id = "192.168.255.2";
+              };
+            };
+            neighbors = [{
+              config = {
+                neighbor-address = ifAddr nodes.node1 "eth1";
+                peer-as = 64512;
+              };
+            }];
+          };
+        };
+      };
+    };
+
+    testScript = { nodes, ... }: let
+      addr1 = ifAddr nodes.node1 "eth1";
+      addr2 = ifAddr nodes.node2 "eth1";
+    in
+      ''
+      start_all()
+
+      for node in node1, node2:
+          with subtest("should start gobgpd node"):
+              node.wait_for_unit("gobgpd.service")
+          with subtest("should open port 179"):
+              node.wait_for_open_port(179)
+
+      with subtest("should show neighbors by gobgp cli and BGP state should be ESTABLISHED"):
+          node1.wait_until_succeeds("gobgp neighbor ${addr2} | grep -q ESTABLISHED")
+          node2.wait_until_succeeds("gobgp neighbor ${addr1} | grep -q ESTABLISHED")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/hibernate.nix b/nixpkgs/nixos/tests/hibernate.nix
index 8251c6e7ef85..ae506c8542fe 100644
--- a/nixpkgs/nixos/tests/hibernate.nix
+++ b/nixpkgs/nixos/tests/hibernate.nix
@@ -1,44 +1,120 @@
 # Test whether hibernation from partition works.
 
-import ./make-test-python.nix (pkgs: {
-  name = "hibernate";
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+}:
 
-  nodes = {
-    machine = { config, lib, pkgs, ... }: with lib; {
-      virtualisation.emptyDiskImages = [ config.virtualisation.memorySize ];
+with import ../lib/testing-python.nix { inherit system pkgs; };
 
-      systemd.services.backdoor.conflicts = [ "sleep.target" ];
+let
+  # System configuration of the installed system, which is used for the actual
+  # hibernate testing.
+  installedConfig = with pkgs.lib; {
+    imports = [
+      ../modules/testing/test-instrumentation.nix
+      ../modules/profiles/qemu-guest.nix
+      ../modules/profiles/minimal.nix
+    ];
 
-      swapDevices = mkOverride 0 [ { device = "/dev/vdb"; } ];
+    hardware.enableAllFirmware = mkForce false;
+    documentation.nixos.enable = false;
+    boot.loader.grub.device = "/dev/vda";
 
-      networking.firewall.allowedTCPPorts = [ 4444 ];
+    systemd.services.backdoor.conflicts = [ "sleep.target" ];
 
-      systemd.services.listener.serviceConfig.ExecStart = "${pkgs.netcat}/bin/nc -l 4444 -k";
+    powerManagement.resumeCommands = "systemctl --no-block restart backdoor.service";
+
+    fileSystems = {
+      "/".device = "/dev/vda2";
     };
+    swapDevices = mkOverride 0 [ { device = "/dev/vda1"; } ];
+  };
+  installedSystem = (import ../lib/eval-config.nix {
+    inherit system;
+    modules = [ installedConfig ];
+  }).config.system.build.toplevel;
+in makeTest {
+  name = "hibernate";
+
+  nodes = {
+    # System configuration used for installing the installedConfig from above.
+    machine = { config, lib, pkgs, ... }: with lib; {
+      imports = [
+        ../modules/profiles/installation-device.nix
+        ../modules/profiles/base.nix
+      ];
 
-    probe = { pkgs, ...}: {
-      environment.systemPackages = [ pkgs.netcat ];
+      nix.binaryCaches = mkForce [ ];
+      nix.extraOptions = ''
+        hashed-mirrors =
+        connect-timeout = 1
+      '';
+
+      virtualisation.diskSize = 8 * 1024;
+      virtualisation.emptyDiskImages = [
+        # Small root disk for installer
+        512
+      ];
+      virtualisation.bootDevice = "/dev/vdb";
     };
   };
 
   # 9P doesn't support reconnection to virtio transport after a hibernation.
   # Therefore, machine just hangs on any Nix store access.
-  # To work around it we run a daemon which listens to a TCP connection and
-  # try to connect to it as a test.
+  # To avoid this, we install NixOS onto a temporary disk with everything we need
+  # included into the store.
 
   testScript =
     ''
+      def create_named_machine(name):
+          return create_machine(
+              {
+                  "qemuFlags": "-cpu max ${
+                    if system == "x86_64-linux" then "-m 1024"
+                    else "-m 768 -enable-kvm -machine virt,gic-version=host"}",
+                  "hdaInterface": "virtio",
+                  "hda": "vm-state-machine/machine.qcow2",
+                  "name": name,
+              }
+          )
+
+
+      # Install NixOS
       machine.start()
-      machine.wait_for_unit("multi-user.target")
-      machine.succeed("mkswap /dev/vdb")
-      machine.succeed("swapon -a")
-      machine.start_job("listener")
-      machine.wait_for_open_port(4444)
-      machine.succeed("systemctl hibernate &")
-      machine.wait_for_shutdown()
-      probe.wait_for_unit("multi-user.target")
-      machine.start()
-      probe.wait_until_succeeds("echo test | nc machine 4444 -N")
+      machine.succeed(
+          # Partition /dev/vda
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary linux-swap 1M 1024M"
+          + " mkpart primary ext2 1024M -1s",
+          "udevadm settle",
+          "mkfs.ext3 -L nixos /dev/vda2",
+          "mount LABEL=nixos /mnt",
+          "mkswap /dev/vda1 -L swap",
+          # Install onto /mnt
+          "nix-store --load-db < ${pkgs.closureInfo {rootPaths = [installedSystem];}}/registration",
+          "nixos-install --root /mnt --system ${installedSystem} --no-root-passwd",
+      )
+      machine.shutdown()
+
+      # Start up
+      hibernate = create_named_machine("hibernate")
+
+      # Drop in file that checks if we un-hibernated properly (and not booted fresh)
+      hibernate.succeed(
+          "mkdir /run/test",
+          "mount -t ramfs -o size=1m ramfs /run/test",
+          "echo not persisted to disk > /run/test/suspended",
+      )
+
+      # Hibernate machine
+      hibernate.succeed("systemctl hibernate &")
+      hibernate.wait_for_shutdown()
+
+      # Restore machine from hibernation, validate our ramfs file is there.
+      resume = create_named_machine("resume")
+      resume.start()
+      resume.succeed("grep 'not persisted to disk' /run/test/suspended")
     '';
 
-})
+}
diff --git a/nixpkgs/nixos/tests/home-assistant.nix b/nixpkgs/nixos/tests/home-assistant.nix
index 726c7eb6acb6..3b7295324a18 100644
--- a/nixpkgs/nixos/tests/home-assistant.nix
+++ b/nixpkgs/nixos/tests/home-assistant.nix
@@ -14,9 +14,10 @@ in {
     environment.systemPackages = with pkgs; [ mosquitto ];
     services.mosquitto = {
       enable = true;
+      checkPasswords = true;
       users = {
         "${mqttUsername}" = {
-          acl = [ "pattern readwrite #" ];
+          acl = [ "topic readwrite #" ];
           password = mqttPassword;
         };
       };
@@ -77,12 +78,9 @@ in {
         hass.wait_for_open_port(8123)
         hass.succeed("curl --fail http://localhost:8123/lovelace")
     with subtest("Toggle a binary sensor using MQTT"):
-        # wait for broker to become available
-        hass.wait_until_succeeds(
-            "mosquitto_sub -V mqttv311 -t home-assistant/test -u ${mqttUsername} -P '${mqttPassword}' -W 1 -t '*'"
-        )
+        hass.wait_for_open_port(1883)
         hass.succeed(
-            "mosquitto_pub -V mqttv311 -t home-assistant/test -u ${mqttUsername} -P '${mqttPassword}' -m let_there_be_light"
+            "mosquitto_pub -V mqttv5 -t home-assistant/test -u ${mqttUsername} -P '${mqttPassword}' -m let_there_be_light"
         )
     with subtest("Print log to ease debugging"):
         output_log = hass.succeed("cat ${configDir}/home-assistant.log")
diff --git a/nixpkgs/nixos/tests/iscsi-root.nix b/nixpkgs/nixos/tests/iscsi-root.nix
new file mode 100644
index 000000000000..bda51d2c2e42
--- /dev/null
+++ b/nixpkgs/nixos/tests/iscsi-root.nix
@@ -0,0 +1,161 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+    let
+      initiatorName = "iqn.2020-08.org.linux-iscsi.initiatorhost:example";
+      targetName = "iqn.2003-01.org.linux-iscsi.target.x8664:sn.acf8fd9c23af";
+    in
+      {
+        name = "iscsi";
+        meta = {
+          maintainers = pkgs.lib.teams.deshaw.members
+          ++ (with pkgs.lib.maintainers; [ ajs124 ]);
+        };
+
+        nodes = {
+          target = { config, pkgs, lib, ... }: {
+            services.target = {
+              enable = true;
+              config = {
+                fabric_modules = [];
+                storage_objects = [
+                  {
+                    dev = "/dev/vdb";
+                    name = "test";
+                    plugin = "block";
+                    write_back = true;
+                    wwn = "92b17c3f-6b40-4168-b082-ceeb7b495522";
+                  }
+                ];
+                targets = [
+                  {
+                    fabric = "iscsi";
+                    tpgs = [
+                      {
+                        enable = true;
+                        attributes = {
+                          authentication = 0;
+                          generate_node_acls = 1;
+                        };
+                        luns = [
+                          {
+                            alias = "94dfe06967";
+                            alua_tg_pt_gp_name = "default_tg_pt_gp";
+                            index = 0;
+                            storage_object = "/backstores/block/test";
+                          }
+                        ];
+                        node_acls = [
+                          {
+                            mapped_luns = [
+                              {
+                                alias = "d42f5bdf8a";
+                                index = 0;
+                                tpg_lun = 0;
+                                write_protect = false;
+                              }
+                            ];
+                            node_wwn = initiatorName;
+                          }
+                        ];
+                        portals = [
+                          {
+                            ip_address = "0.0.0.0";
+                            iser = false;
+                            offload = false;
+                            port = 3260;
+                          }
+                        ];
+                        tag = 1;
+                      }
+                    ];
+                    wwn = targetName;
+                  }
+                ];
+              };
+            };
+
+            networking.firewall.allowedTCPPorts = [ 3260 ];
+            networking.firewall.allowedUDPPorts = [ 3260 ];
+
+            virtualisation.memorySize = 2048;
+            virtualisation.emptyDiskImages = [ 2048 ];
+          };
+
+          initiatorAuto = { nodes, config, pkgs, ... }: {
+            services.openiscsi = {
+              enable = true;
+              enableAutoLoginOut = true;
+              discoverPortal = "target";
+              name = initiatorName;
+            };
+
+            environment.systemPackages = with pkgs; [
+              xfsprogs
+            ];
+
+            system.extraDependencies = [ nodes.initiatorRootDisk.config.system.build.toplevel ];
+
+            nix.binaryCaches = lib.mkForce [];
+            nix.extraOptions = ''
+              hashed-mirrors =
+              connect-timeout = 1
+            '';
+          };
+
+          initiatorRootDisk = { config, pkgs, modulesPath, lib, ... }: {
+            boot.loader.grub.enable = false;
+            boot.kernelParams = lib.mkOverride 5 (
+              [
+                "boot.shell_on_fail"
+                "console=tty1"
+                "ip=${config.networking.primaryIPAddress}:::255.255.255.0::ens9:none"
+              ]
+            );
+
+            # defaults to true, puts some code in the initrd that tries to mount an overlayfs on /nix/store
+            virtualisation.writableStore = false;
+
+            fileSystems = lib.mkOverride 5 {
+              "/" = {
+                fsType = "xfs";
+                device = "/dev/sda";
+                options = [ "_netdev" ];
+              };
+            };
+
+            boot.iscsi-initiator = {
+              discoverPortal = "target";
+              name = initiatorName;
+              target = targetName;
+            };
+          };
+        };
+
+        testScript = { nodes, ... }: ''
+          target.start()
+          target.wait_for_unit("iscsi-target.service")
+
+          initiatorAuto.start()
+
+          initiatorAuto.wait_for_unit("iscsid.service")
+          initiatorAuto.wait_for_unit("iscsi.service")
+          initiatorAuto.get_unit_info("iscsi")
+
+          initiatorAuto.succeed("set -x; while ! test -e /dev/sda; do sleep 1; done")
+
+          initiatorAuto.succeed("mkfs.xfs /dev/sda")
+          initiatorAuto.succeed("mkdir /mnt && mount /dev/sda /mnt")
+          initiatorAuto.succeed(
+              "nixos-install --no-bootloader --no-root-passwd --system ${nodes.initiatorRootDisk.config.system.build.toplevel}"
+          )
+          initiatorAuto.succeed("umount /mnt && rmdir /mnt")
+          initiatorAuto.shutdown()
+
+          initiatorRootDisk.start()
+          initiatorRootDisk.wait_for_unit("multi-user.target")
+          initiatorRootDisk.wait_for_unit("iscsid")
+          initiatorRootDisk.succeed("touch test")
+          initiatorRootDisk.shutdown()
+        '';
+      }
+)
diff --git a/nixpkgs/nixos/tests/kernel-generic.nix b/nixpkgs/nixos/tests/kernel-generic.nix
index fbead1dc23b7..17089141e9e4 100644
--- a/nixpkgs/nixos/tests/kernel-generic.nix
+++ b/nixpkgs/nixos/tests/kernel-generic.nix
@@ -1,7 +1,7 @@
 { system ? builtins.currentSystem
 , config ? { }
 , pkgs ? import ../.. { inherit system config; }
-}:
+}@args:
 
 with pkgs.lib;
 
@@ -22,7 +22,7 @@ let
         assert "Linux" in machine.succeed("uname -s")
         assert "${linuxPackages.kernel.modDirVersion}" in machine.succeed("uname -a")
       '';
-  }));
+  }) args);
 in
 with pkgs; {
   linux_4_4 = makeKernelTest "4.4" linuxPackages_4_4;
diff --git a/nixpkgs/nixos/tests/mxisd.nix b/nixpkgs/nixos/tests/mxisd.nix
index 22755ea353b6..354612a8a53d 100644
--- a/nixpkgs/nixos/tests/mxisd.nix
+++ b/nixpkgs/nixos/tests/mxisd.nix
@@ -6,25 +6,16 @@ import ./make-test-python.nix ({ pkgs, ... } : {
   };
 
   nodes = {
-    server_mxisd = args : {
+    server = args : {
       services.mxisd.enable = true;
       services.mxisd.matrix.domain = "example.org";
     };
-
-    server_ma1sd = args : {
-      services.mxisd.enable = true;
-      services.mxisd.matrix.domain = "example.org";
-      services.mxisd.package = pkgs.ma1sd;
-    };
   };
 
   testScript = ''
     start_all()
-    server_mxisd.wait_for_unit("mxisd.service")
-    server_mxisd.wait_for_open_port(8090)
-    server_mxisd.succeed("curl -Ssf 'http://127.0.0.1:8090/_matrix/identity/api/v1'")
-    server_ma1sd.wait_for_unit("mxisd.service")
-    server_ma1sd.wait_for_open_port(8090)
-    server_ma1sd.succeed("curl -Ssf 'http://127.0.0.1:8090/_matrix/identity/api/v1'")
+    server.wait_for_unit("mxisd.service")
+    server.wait_for_open_port(8090)
+    server.succeed("curl -Ssf 'http://127.0.0.1:8090/_matrix/identity/api/v1'")
   '';
 })
diff --git a/nixpkgs/nixos/tests/mysql/mariadb-galera-mariabackup.nix b/nixpkgs/nixos/tests/mysql/mariadb-galera-mariabackup.nix
index 0a40c010a471..1c73bc854a57 100644
--- a/nixpkgs/nixos/tests/mysql/mariadb-galera-mariabackup.nix
+++ b/nixpkgs/nixos/tests/mysql/mariadb-galera-mariabackup.nix
@@ -31,7 +31,7 @@ in {
         firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
         firewall.allowedUDPPorts = [ 4567 ];
       };
-      users.users.testuser = { };
+      users.users.testuser = { isSystemUser = true; };
       systemd.services.mysql = with pkgs; {
         path = [ mysqlenv-common mysqlenv-mariabackup ];
       };
@@ -89,7 +89,7 @@ in {
         firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
         firewall.allowedUDPPorts = [ 4567 ];
       };
-      users.users.testuser = { };
+      users.users.testuser = { isSystemUser = true; };
       systemd.services.mysql = with pkgs; {
         path = [ mysqlenv-common mysqlenv-mariabackup ];
       };
@@ -136,7 +136,7 @@ in {
         firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
         firewall.allowedUDPPorts = [ 4567 ];
       };
-      users.users.testuser = { };
+      users.users.testuser = { isSystemUser = true; };
       systemd.services.mysql = with pkgs; {
         path = [ mysqlenv-common mysqlenv-mariabackup ];
       };
diff --git a/nixpkgs/nixos/tests/mysql/mariadb-galera-rsync.nix b/nixpkgs/nixos/tests/mysql/mariadb-galera-rsync.nix
index 6fb3cfef8d73..709a8b5085cb 100644
--- a/nixpkgs/nixos/tests/mysql/mariadb-galera-rsync.nix
+++ b/nixpkgs/nixos/tests/mysql/mariadb-galera-rsync.nix
@@ -31,7 +31,7 @@ in {
         firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
         firewall.allowedUDPPorts = [ 4567 ];
       };
-      users.users.testuser = { };
+      users.users.testuser = { isSystemUser = true; };
       systemd.services.mysql = with pkgs; {
         path = [ mysqlenv-common mysqlenv-rsync ];
       };
@@ -84,7 +84,7 @@ in {
         firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
         firewall.allowedUDPPorts = [ 4567 ];
       };
-      users.users.testuser = { };
+      users.users.testuser = { isSystemUser = true; };
       systemd.services.mysql = with pkgs; {
         path = [ mysqlenv-common mysqlenv-rsync ];
       };
@@ -130,7 +130,7 @@ in {
         firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
         firewall.allowedUDPPorts = [ 4567 ];
       };
-      users.users.testuser = { };
+      users.users.testuser = { isSystemUser = true; };
       systemd.services.mysql = with pkgs; {
         path = [ mysqlenv-common mysqlenv-rsync ];
       };
diff --git a/nixpkgs/nixos/tests/mysql/mysql.nix b/nixpkgs/nixos/tests/mysql/mysql.nix
index 50ad5c68aef1..c21136416d47 100644
--- a/nixpkgs/nixos/tests/mysql/mysql.nix
+++ b/nixpkgs/nixos/tests/mysql/mysql.nix
@@ -9,8 +9,8 @@ import ./../make-test-python.nix ({ pkgs, ...} : {
       { pkgs, ... }:
 
       {
-        users.users.testuser = { };
-        users.users.testuser2 = { };
+        users.users.testuser = { isSystemUser = true; };
+        users.users.testuser2 = { isSystemUser = true; };
         services.mysql.enable = true;
         services.mysql.initialDatabases = [
           { name = "testdb3"; schema = ./testdb.sql; }
@@ -44,8 +44,8 @@ import ./../make-test-python.nix ({ pkgs, ...} : {
         # Kernel panic - not syncing: Out of memory: compulsory panic_on_oom is enabled
         virtualisation.memorySize = 1024;
 
-        users.users.testuser = { };
-        users.users.testuser2 = { };
+        users.users.testuser = { isSystemUser = true; };
+        users.users.testuser2 = { isSystemUser = true; };
         services.mysql.enable = true;
         services.mysql.initialDatabases = [
           { name = "testdb3"; schema = ./testdb.sql; }
@@ -75,8 +75,8 @@ import ./../make-test-python.nix ({ pkgs, ...} : {
       { pkgs, ... }:
 
       {
-        users.users.testuser = { };
-        users.users.testuser2 = { };
+        users.users.testuser = { isSystemUser = true; };
+        users.users.testuser2 = { isSystemUser = true; };
         services.mysql.enable = true;
         services.mysql.initialScript = pkgs.writeText "mariadb-init.sql" ''
           ALTER USER root@localhost IDENTIFIED WITH unix_socket;
diff --git a/nixpkgs/nixos/tests/nebula.nix b/nixpkgs/nixos/tests/nebula.nix
new file mode 100644
index 000000000000..372cfebdf801
--- /dev/null
+++ b/nixpkgs/nixos/tests/nebula.nix
@@ -0,0 +1,223 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
+
+  # We'll need to be able to trade cert files between nodes via scp.
+  inherit (import ./ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+
+  makeNebulaNode = { config, ... }: name: extraConfig: lib.mkMerge [
+    {
+      # Expose nebula for doing cert signing.
+      environment.systemPackages = [ pkgs.nebula ];
+      users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+      services.openssh.enable = true;
+
+      services.nebula.networks.smoke = {
+        # Note that these paths won't exist when the machine is first booted.
+        ca = "/etc/nebula/ca.crt";
+        cert = "/etc/nebula/${name}.crt";
+        key = "/etc/nebula/${name}.key";
+        listen = { host = "0.0.0.0"; port = 4242; };
+      };
+    }
+    extraConfig
+  ];
+
+in
+{
+  name = "nebula";
+
+  nodes = {
+
+    lighthouse = { ... } @ args:
+      makeNebulaNode args "lighthouse" {
+        networking.interfaces.eth1.ipv4.addresses = [{
+          address = "192.168.1.1";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          isLighthouse = true;
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+          };
+        };
+      };
+
+    node2 = { ... } @ args:
+      makeNebulaNode args "node2" {
+        networking.interfaces.eth1.ipv4.addresses = [{
+          address = "192.168.1.2";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
+          isLighthouse = false;
+          lighthouses = [ "10.0.100.1" ];
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+          };
+        };
+      };
+
+    node3 = { ... } @ args:
+      makeNebulaNode args "node3" {
+        networking.interfaces.eth1.ipv4.addresses = [{
+          address = "192.168.1.3";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
+          isLighthouse = false;
+          lighthouses = [ "10.0.100.1" ];
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
+          };
+        };
+      };
+
+    node4 = { ... } @ args:
+      makeNebulaNode args "node4" {
+        networking.interfaces.eth1.ipv4.addresses = [{
+          address = "192.168.1.4";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          enable = true;
+          staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
+          isLighthouse = false;
+          lighthouses = [ "10.0.100.1" ];
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+          };
+        };
+      };
+
+    node5 = { ... } @ args:
+      makeNebulaNode args "node5" {
+        networking.interfaces.eth1.ipv4.addresses = [{
+          address = "192.168.1.5";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          enable = false;
+          staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
+          isLighthouse = false;
+          lighthouses = [ "10.0.100.1" ];
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+          };
+        };
+      };
+
+  };
+
+  testScript = let
+
+    setUpPrivateKey = name: ''
+    ${name}.succeed(
+        "mkdir -p /root/.ssh",
+        "chown 700 /root/.ssh",
+        "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
+        "chown 600 /root/.ssh/id_snakeoil",
+    )
+    '';
+
+    # From what I can tell, StrictHostKeyChecking=no is necessary for ssh to work between machines.
+    sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil";
+
+    restartAndCheckNebula = name: ip: ''
+      ${name}.systemctl("restart nebula@smoke.service")
+      ${name}.succeed("ping -c5 ${ip}")
+    '';
+
+    # Create a keypair on the client node, then use the public key to sign a cert on the lighthouse.
+    signKeysFor = name: ip: ''
+      lighthouse.wait_for_unit("sshd.service")
+      ${name}.wait_for_unit("sshd.service")
+      ${name}.succeed(
+          "mkdir -p /etc/nebula",
+          "nebula-cert keygen -out-key /etc/nebula/${name}.key -out-pub /etc/nebula/${name}.pub",
+          "scp ${sshOpts} /etc/nebula/${name}.pub 192.168.1.1:/tmp/${name}.pub",
+      )
+      lighthouse.succeed(
+          'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /tmp/${name}.pub -out-crt /tmp/${name}.crt',
+      )
+      ${name}.succeed(
+          "scp ${sshOpts} 192.168.1.1:/tmp/${name}.crt /etc/nebula/${name}.crt",
+          "scp ${sshOpts} 192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt",
+      )
+    '';
+
+  in ''
+    start_all()
+
+    # Create the certificate and sign the lighthouse's keys.
+    ${setUpPrivateKey "lighthouse"}
+    lighthouse.succeed(
+        "mkdir -p /etc/nebula",
+        'nebula-cert ca -name "Smoke Test" -out-crt /etc/nebula/ca.crt -out-key /etc/nebula/ca.key',
+        'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "lighthouse" -groups "lighthouse" -ip "10.0.100.1/24" -out-crt /etc/nebula/lighthouse.crt -out-key /etc/nebula/lighthouse.key',
+    )
+
+    # Reboot the lighthouse and verify that the nebula service comes up on boot.
+    # Since rebooting takes a while, we'll just restart the service on the other nodes.
+    lighthouse.shutdown()
+    lighthouse.start()
+    lighthouse.wait_for_unit("nebula@smoke.service")
+    lighthouse.succeed("ping -c5 10.0.100.1")
+
+    # Create keys for node2's nebula service and test that it comes up.
+    ${setUpPrivateKey "node2"}
+    ${signKeysFor "node2" "10.0.100.2/24"}
+    ${restartAndCheckNebula "node2" "10.0.100.2"}
+
+    # Create keys for node3's nebula service and test that it comes up.
+    ${setUpPrivateKey "node3"}
+    ${signKeysFor "node3" "10.0.100.3/24"}
+    ${restartAndCheckNebula "node3" "10.0.100.3"}
+
+    # Create keys for node4's nebula service and test that it comes up.
+    ${setUpPrivateKey "node4"}
+    ${signKeysFor "node4" "10.0.100.4/24"}
+    ${restartAndCheckNebula "node4" "10.0.100.4"}
+
+    # Create keys for node4's nebula service and test that it does not come up.
+    ${setUpPrivateKey "node5"}
+    ${signKeysFor "node5" "10.0.100.5/24"}
+    node5.fail("systemctl status nebula@smoke.service")
+    node5.fail("ping -c5 10.0.100.5")
+
+    # The lighthouse can ping node2 and node3 but not node5
+    lighthouse.succeed("ping -c3 10.0.100.2")
+    lighthouse.succeed("ping -c3 10.0.100.3")
+    lighthouse.fail("ping -c3 10.0.100.5")
+
+    # node2 can ping the lighthouse, but not node3 because of its inbound firewall
+    node2.succeed("ping -c3 10.0.100.1")
+    node2.fail("ping -c3 10.0.100.3")
+
+    # node3 can ping the lighthouse and node2
+    node3.succeed("ping -c3 10.0.100.1")
+    node3.succeed("ping -c3 10.0.100.2")
+
+    # node4 can ping the lighthouse but not node2 or node3
+    node4.succeed("ping -c3 10.0.100.1")
+    node4.fail("ping -c3 10.0.100.2")
+    node4.fail("ping -c3 10.0.100.3")
+
+    # node2 can ping node3 now that node3 pinged it first
+    node2.succeed("ping -c3 10.0.100.3")
+    # node4 can ping node2 if node2 pings it first
+    node2.succeed("ping -c3 10.0.100.4")
+    node4.succeed("ping -c3 10.0.100.2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nextcloud/basic.nix b/nixpkgs/nixos/tests/nextcloud/basic.nix
index 5074b6cdafef..76f7f68dc960 100644
--- a/nixpkgs/nixos/tests/nextcloud/basic.nix
+++ b/nixpkgs/nixos/tests/nextcloud/basic.nix
@@ -51,7 +51,7 @@ in {
     nextcloudWithoutMagick = args@{ config, pkgs, lib, ... }:
       lib.mkMerge
       [ (nextcloud args)
-        { services.nextcloud.disableImagemagick = true; } ];
+        { services.nextcloud.enableImagemagick = false; } ];
   };
 
   testScript = { nodes, ... }: let
diff --git a/nixpkgs/nixos/tests/packagekit.nix b/nixpkgs/nixos/tests/packagekit.nix
index 28d1374bf92c..020a4e65e6d8 100644
--- a/nixpkgs/nixos/tests/packagekit.nix
+++ b/nixpkgs/nixos/tests/packagekit.nix
@@ -8,7 +8,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     environment.systemPackages = with pkgs; [ dbus ];
     services.packagekit = {
       enable = true;
-      backend = "test_nop";
     };
   };
 
diff --git a/nixpkgs/nixos/tests/podgrab.nix b/nixpkgs/nixos/tests/podgrab.nix
new file mode 100644
index 000000000000..e927e25fea56
--- /dev/null
+++ b/nixpkgs/nixos/tests/podgrab.nix
@@ -0,0 +1,34 @@
+let
+  defaultPort = 8080;
+  customPort = 4242;
+in
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "podgrab";
+
+  nodes = {
+    default = { ... }: {
+      services.podgrab.enable = true;
+    };
+
+    customized = { ... }: {
+      services.podgrab = {
+        enable = true;
+        port = customPort;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    default.wait_for_unit("podgrab")
+    default.wait_for_open_port("${toString defaultPort}")
+    default.succeed("curl --fail http://localhost:${toString defaultPort}")
+
+    customized.wait_for_unit("podgrab")
+    customized.wait_for_open_port("${toString customPort}")
+    customized.succeed("curl --fail http://localhost:${toString customPort}")
+  '';
+
+  meta.maintainers = with pkgs.lib.maintainers; [ ambroisie ];
+})
diff --git a/nixpkgs/nixos/tests/prometheus-exporters.nix b/nixpkgs/nixos/tests/prometheus-exporters.nix
index 62c0080dd516..9aa430c25a4f 100644
--- a/nixpkgs/nixos/tests/prometheus-exporters.nix
+++ b/nixpkgs/nixos/tests/prometheus-exporters.nix
@@ -118,6 +118,8 @@ let
       metricProvider = {
         services.bird2.enable = true;
         services.bird2.config = ''
+          router id 127.0.0.1;
+
           protocol kernel MyObviousTestString {
             ipv4 {
               import all;
@@ -132,7 +134,9 @@ let
       exporterTest = ''
         wait_for_unit("prometheus-bird-exporter.service")
         wait_for_open_port(9324)
-        succeed("curl -sSf http://localhost:9324/metrics | grep -q 'MyObviousTestString'")
+        wait_until_succeeds(
+            "curl -sSf http://localhost:9324/metrics | grep -q 'MyObviousTestString'"
+        )
       '';
     };
 
diff --git a/nixpkgs/nixos/tests/quagga.nix b/nixpkgs/nixos/tests/quagga.nix
deleted file mode 100644
index 9aed49bf452f..000000000000
--- a/nixpkgs/nixos/tests/quagga.nix
+++ /dev/null
@@ -1,96 +0,0 @@
-# This test runs Quagga and checks if OSPF routing works.
-#
-# Network topology:
-#   [ client ]--net1--[ router1 ]--net2--[ router2 ]--net3--[ server ]
-#
-# All interfaces are in OSPF Area 0.
-
-import ./make-test-python.nix ({ pkgs, ... }:
-  let
-
-    ifAddr = node: iface: (pkgs.lib.head node.config.networking.interfaces.${iface}.ipv4.addresses).address;
-
-    ospfConf = ''
-      interface eth2
-        ip ospf hello-interval 1
-        ip ospf dead-interval 5
-      !
-      router ospf
-        network 192.168.0.0/16 area 0
-    '';
-
-  in
-    {
-      name = "quagga";
-
-      meta = with pkgs.lib.maintainers; {
-        maintainers = [ tavyc ];
-      };
-
-      nodes = {
-
-        client =
-          { nodes, ... }:
-          {
-            virtualisation.vlans = [ 1 ];
-            networking.defaultGateway = ifAddr nodes.router1 "eth1";
-          };
-
-        router1 =
-          { ... }:
-          {
-            virtualisation.vlans = [ 1 2 ];
-            boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
-            networking.firewall.extraCommands = "iptables -A nixos-fw -i eth2 -p ospf -j ACCEPT";
-            services.quagga.ospf = {
-              enable = true;
-              config = ospfConf;
-            };
-          };
-
-        router2 =
-          { ... }:
-          {
-            virtualisation.vlans = [ 3 2 ];
-            boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
-            networking.firewall.extraCommands = "iptables -A nixos-fw -i eth2 -p ospf -j ACCEPT";
-            services.quagga.ospf = {
-              enable = true;
-              config = ospfConf;
-            };
-          };
-
-        server =
-          { nodes, ... }:
-          {
-            virtualisation.vlans = [ 3 ];
-            networking.defaultGateway = ifAddr nodes.router2 "eth1";
-            networking.firewall.allowedTCPPorts = [ 80 ];
-            services.httpd.enable = true;
-            services.httpd.adminAddr = "foo@example.com";
-          };
-      };
-
-      testScript =
-        { ... }:
-        ''
-          start_all()
-
-          # Wait for the networking to start on all machines
-          for machine in client, router1, router2, server:
-              machine.wait_for_unit("network.target")
-
-          with subtest("Wait for OSPF to form adjacencies"):
-              for gw in router1, router2:
-                  gw.wait_for_unit("ospfd")
-                  gw.wait_until_succeeds("vtysh -c 'show ip ospf neighbor' | grep Full")
-                  gw.wait_until_succeeds("vtysh -c 'show ip route' | grep '^O>'")
-
-          with subtest("Test ICMP"):
-              client.wait_until_succeeds("ping -c 3 server >&2")
-
-          with subtest("Test whether HTTP works"):
-              server.wait_for_unit("httpd")
-              client.succeed("curl --fail http://server/ >&2")
-        '';
-    })
diff --git a/nixpkgs/nixos/tests/redis.nix b/nixpkgs/nixos/tests/redis.nix
index ca1715614359..28b6058c2c02 100644
--- a/nixpkgs/nixos/tests/redis.nix
+++ b/nixpkgs/nixos/tests/redis.nix
@@ -17,16 +17,15 @@ in
         services.redis.unixSocket = redisSocket;
 
         # Allow access to the unix socket for the "redis" group.
-        services.redis.settings.unixsocketperm = "770";
+        services.redis.unixSocketPerm = 770;
 
         users.users."member" = {
           createHome = false;
           description = "A member of the redis group";
+          isNormalUser = true;
           extraGroups = [
             "redis"
           ];
-          group = "users";
-          shell = "/bin/sh";
         };
       };
   };
diff --git a/nixpkgs/nixos/tests/rspamd.nix b/nixpkgs/nixos/tests/rspamd.nix
index 7f41e1a79566..f0ccfe7ea0e6 100644
--- a/nixpkgs/nixos/tests/rspamd.nix
+++ b/nixpkgs/nixos/tests/rspamd.nix
@@ -274,7 +274,10 @@ in
 
         I find cows to be evil don't you?
       '';
-      users.users.tester.password = "test";
+      users.users.tester = {
+        isNormalUser = true;
+        password = "test";
+      };
       services.postfix = {
         enable = true;
         destination = ["example.com"];
diff --git a/nixpkgs/nixos/tests/searx.nix b/nixpkgs/nixos/tests/searx.nix
index 7c28eea30d20..2f808cb65266 100644
--- a/nixpkgs/nixos/tests/searx.nix
+++ b/nixpkgs/nixos/tests/searx.nix
@@ -108,7 +108,7 @@ import ./make-test-python.nix ({ pkgs, ...} :
               "${pkgs.curl}/bin/curl --fail http://localhost/searx >&2"
           )
           fancy.succeed(
-              "${pkgs.curl}/bin/curl --fail http://localhost/searx/static/js/bootstrap.min.js >&2"
+              "${pkgs.curl}/bin/curl --fail http://localhost/searx/static/themes/oscar/js/bootstrap.min.js >&2"
           )
     '';
 })
diff --git a/nixpkgs/nixos/tests/shadow.nix b/nixpkgs/nixos/tests/shadow.nix
index e5755e8e0878..c51961e1fc68 100644
--- a/nixpkgs/nixos/tests/shadow.nix
+++ b/nixpkgs/nixos/tests/shadow.nix
@@ -13,14 +13,17 @@ in import ./make-test-python.nix ({ pkgs, ... }: {
     users = {
       mutableUsers = true;
       users.emma = {
+        isNormalUser = true;
         password = password1;
         shell = pkgs.bash;
       };
       users.layla = {
+        isNormalUser = true;
         password = password2;
         shell = pkgs.shadow;
       };
       users.ash = {
+        isNormalUser = true;
         password = password4;
         shell = pkgs.bash;
       };
diff --git a/nixpkgs/nixos/tests/spacecookie.nix b/nixpkgs/nixos/tests/spacecookie.nix
index 5b5022a74278..a640657d8a6b 100644
--- a/nixpkgs/nixos/tests/spacecookie.nix
+++ b/nixpkgs/nixos/tests/spacecookie.nix
@@ -1,47 +1,52 @@
 let
-  gopherRoot  = "/tmp/gopher";
-  gopherHost  = "gopherd";
-  fileContent = "Hello Gopher!";
-  fileName    = "file.txt";
+  gopherRoot   = "/tmp/gopher";
+  gopherHost   = "gopherd";
+  gopherClient = "client";
+  fileContent  = "Hello Gopher!\n";
+  fileName     = "file.txt";
 in
   import ./make-test-python.nix ({...}: {
     name = "spacecookie";
     nodes = {
       ${gopherHost} = {
-        networking.firewall.allowedTCPPorts = [ 70 ];
         systemd.services.spacecookie = {
           preStart = ''
             mkdir -p ${gopherRoot}/directory
-            echo "${fileContent}" > ${gopherRoot}/${fileName}
+            printf "%s" "${fileContent}" > ${gopherRoot}/${fileName}
           '';
         };
 
         services.spacecookie = {
           enable = true;
-          root = gopherRoot;
-          hostname = gopherHost;
+          openFirewall = true;
+          settings = {
+            root = gopherRoot;
+            hostname = gopherHost;
+          };
         };
       };
 
-      client = {};
+      ${gopherClient} = {};
     };
 
     testScript = ''
       start_all()
-      ${gopherHost}.wait_for_open_port(70)
+
+      # with daemon type notify, the unit being started
+      # should also mean the port is open
       ${gopherHost}.wait_for_unit("spacecookie.service")
-      client.wait_for_unit("network.target")
+      ${gopherClient}.wait_for_unit("network.target")
 
-      fileResponse = client.succeed("curl -f -s gopher://${gopherHost}//${fileName}")
+      fileResponse = ${gopherClient}.succeed("curl -f -s gopher://${gopherHost}/0/${fileName}")
 
       # the file response should return our created file exactly
-      if not (fileResponse == "${fileContent}\n"):
+      if not (fileResponse == "${builtins.replaceStrings [ "\n" ] [ "\\n" ] fileContent}"):
           raise Exception("Unexpected file response")
 
       # sanity check on the directory listing: we serve a directory and a file
       # via gopher, so the directory listing should have exactly two entries,
       # one with gopher file type 0 (file) and one with file type 1 (directory).
-      dirResponse = client.succeed("curl -f -s gopher://${gopherHost}")
+      dirResponse = ${gopherClient}.succeed("curl -f -s gopher://${gopherHost}")
       dirEntries = [l[0] for l in dirResponse.split("\n") if len(l) > 0]
       dirEntries.sort()
 
diff --git a/nixpkgs/nixos/tests/systemd-confinement.nix b/nixpkgs/nixos/tests/systemd-confinement.nix
index ebf6d218fd68..d04e4a3f867c 100644
--- a/nixpkgs/nixos/tests/systemd-confinement.nix
+++ b/nixpkgs/nixos/tests/systemd-confinement.nix
@@ -150,6 +150,7 @@ import ./make-test-python.nix {
 
     config.users.groups.chroot-testgroup = {};
     config.users.users.chroot-testuser = {
+      isSystemUser = true;
       description = "Chroot Test User";
       group = "chroot-testgroup";
     };
diff --git a/nixpkgs/nixos/tests/unbound.nix b/nixpkgs/nixos/tests/unbound.nix
index d4b8bb15ced6..ca9718ac633e 100644
--- a/nixpkgs/nixos/tests/unbound.nix
+++ b/nixpkgs/nixos/tests/unbound.nix
@@ -132,12 +132,15 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
 
         users.users = {
           # user that is permitted to access the unix socket
-          someuser.extraGroups = [
-            config.users.users.unbound.group
-          ];
+          someuser = {
+            isSystemUser = true;
+            extraGroups = [
+              config.users.users.unbound.group
+            ];
+          };
 
           # user that is not permitted to access the unix socket
-          unauthorizeduser = {};
+          unauthorizeduser = { isSystemUser = true; };
         };
 
         environment.etc = {
diff --git a/nixpkgs/nixos/tests/wmderland.nix b/nixpkgs/nixos/tests/wmderland.nix
new file mode 100644
index 000000000000..d121ed98b7ac
--- /dev/null
+++ b/nixpkgs/nixos/tests/wmderland.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "wmderland";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ takagiy ];
+  };
+
+  machine = { lib, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = lib.mkForce "none+wmderland";
+    services.xserver.windowManager.wmderland.enable = true;
+
+    systemd.services.setupWmderlandConfig = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "multi-user.target" ];
+      environment = {
+        HOME = "/home/alice";
+      };
+      unitConfig = {
+        type = "oneshot";
+        RemainAfterExit = true;
+        user = "alice";
+      };
+      script = let
+        config = pkgs.writeText "config" ''
+             set $Mod = Mod1
+             bindsym $Mod+Return exec ${pkgs.xterm}/bin/xterm -cm -pc
+        '';
+      in ''
+        mkdir -p $HOME/.config/wmderland
+        cp ${config} $HOME/.config/wmderland/config
+      '';
+    };
+  };
+
+  testScript = { ... }: ''
+    with subtest("ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("/home/alice/.Xauthority")
+        machine.succeed("xauth merge ~alice/.Xauthority")
+
+    with subtest("ensure we can open a new terminal"):
+        machine.send_key("alt-ret")
+        machine.wait_until_succeeds("pgrep xterm")
+        machine.wait_for_window(r"alice.*?machine")
+        machine.screenshot("terminal")
+
+    with subtest("ensure we can communicate through ipc with wmderlandc"):
+        # Kills the previously open xterm
+        machine.succeed("pgrep xterm")
+        machine.execute("DISPLAY=:0 wmderlandc kill")
+        machine.fail("pgrep xterm")
+  '';
+})