about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/file-systems.xml11
-rw-r--r--nixos/doc/manual/release-notes/rl-2009.xml37
-rw-r--r--nixos/lib/testing-python.nix6
-rw-r--r--nixos/lib/testing.nix6
-rw-r--r--nixos/lib/testing/jquery-ui.nix24
-rw-r--r--nixos/lib/testing/jquery.nix36
-rw-r--r--nixos/modules/config/ldap.nix4
-rw-r--r--nixos/modules/config/nsswitch.nix48
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/rename.nix1
-rw-r--r--nixos/modules/security/google_oslogin.nix1
-rw-r--r--nixos/modules/services/databases/mysql.nix7
-rw-r--r--nixos/modules/services/databases/postgresql.nix2
-rw-r--r--nixos/modules/services/databases/redis.nix3
-rw-r--r--nixos/modules/services/misc/sssd.nix10
-rw-r--r--nixos/modules/services/misc/zoneminder.nix8
-rw-r--r--nixos/modules/services/network-filesystems/samba.nix1
-rw-r--r--nixos/modules/services/networking/avahi-daemon.nix4
-rw-r--r--nixos/modules/services/web-servers/uwsgi.nix20
-rw-r--r--nixos/modules/services/x11/window-managers/berry.nix25
-rw-r--r--nixos/modules/services/x11/window-managers/default.nix4
-rw-r--r--nixos/modules/services/x11/window-managers/lwm.nix25
-rw-r--r--nixos/modules/services/x11/window-managers/smallwm.nix25
-rw-r--r--nixos/modules/services/x11/window-managers/yeahwm.nix25
-rw-r--r--nixos/modules/system/boot/binfmt.nix9
-rw-r--r--nixos/modules/system/boot/systemd.nix18
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix11
-rw-r--r--nixos/modules/virtualisation/rkt.nix64
-rw-r--r--nixos/tests/agda.nix41
-rw-r--r--nixos/tests/all-tests.nix5
-rw-r--r--nixos/tests/docker-tools.nix10
-rw-r--r--nixos/tests/kubernetes/base.nix8
-rw-r--r--nixos/tests/kubernetes/dns.nix74
-rw-r--r--nixos/tests/kubernetes/rbac.nix86
-rw-r--r--nixos/tests/mysql/mariadb-galera-mariabackup.nix32
-rw-r--r--nixos/tests/mysql/mariadb-galera-rsync.nix32
-rw-r--r--nixos/tests/mysql/mysql-replication.nix12
-rw-r--r--nixos/tests/systemd-binfmt.nix24
-rw-r--r--nixos/tests/zoneminder.nix23
39 files changed, 474 insertions, 309 deletions
diff --git a/nixos/doc/manual/configuration/file-systems.xml b/nixos/doc/manual/configuration/file-systems.xml
index e4c03de71b72..3ac02a975ebf 100644
--- a/nixos/doc/manual/configuration/file-systems.xml
+++ b/nixos/doc/manual/configuration/file-systems.xml
@@ -16,6 +16,17 @@
     fsType = "ext4";
   };
 </programlisting>
+  This will create an entry in <filename>/etc/fstab</filename>, which will
+  generate a corresponding
+  <link xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.mount.html">systemd.mount</link>
+  unit via
+  <link xlink:href="https://www.freedesktop.org/software/systemd/man/systemd-fstab-generator.html">systemd-fstab-generator</link>.
+  The filesystem will be mounted automatically unless
+  <literal>"noauto"</literal> is present in <link
+  linkend="opt-fileSystems._name__.options">options</link>.
+  <literal>"noauto"</literal> filesystems can be mounted explicitly using
+  <command>systemctl</command> e.g. <command>systemctl start
+  data.mount</command>.
   Mount points are created automatically if they don’t already exist. For
   <option><link linkend="opt-fileSystems._name__.device">device</link></option>,
   it’s best to use the topology-independent device aliases in
diff --git a/nixos/doc/manual/release-notes/rl-2009.xml b/nixos/doc/manual/release-notes/rl-2009.xml
index 5b1d04e4bc16..1d24553b08db 100644
--- a/nixos/doc/manual/release-notes/rl-2009.xml
+++ b/nixos/doc/manual/release-notes/rl-2009.xml
@@ -61,6 +61,28 @@
       This is to make it possible to use <literal>podman</literal> instead of <literal>docker</literal>.
     </para>
    </listitem>
+   <listitem>
+    <para>
+      MariaDB has been updated to 10.4, MariaDB Galera to 26.4.
+      Before you upgrade, it would be best to take a backup of your database.
+      For MariaDB Galera Cluster, see <link xlink:href="https://mariadb.com/kb/en/upgrading-from-mariadb-103-to-mariadb-104-with-galera-cluster/">Upgrading
+      from MariaDB 10.3 to MariaDB 10.4 with Galera Cluster</link> instead.
+      Before doing the upgrade read <link xlink:href="https://mariadb.com/kb/en/upgrading-from-mariadb-103-to-mariadb-104/#incompatible-changes-between-103-and-104">Incompatible
+      Changes Between 10.3 and 10.4</link>.
+      After the upgrade you will need to run <literal>mysql_upgrade</literal>.
+      MariaDB 10.4 introduces a number of changes to the authentication process, intended to make things easier and more
+      intuitive. See <link xlink:href="https://mariadb.com/kb/en/authentication-from-mariadb-104/">Authentication from MariaDB 10.4</link>.
+      unix_socket auth plugin does not use a password, and uses the connecting user's UID instead. When a new MariaDB data directory is initialized, two MariaDB users are
+      created and can be used with new unix_socket auth plugin, as well as traditional mysql_native_password plugin: root@localhost and mysql@localhost. To actually use
+      the traditional mysql_native_password plugin method, one must run the following:
+<programlisting>
+services.mysql.initialScript = pkgs.writeText "mariadb-init.sql" ''
+  ALTER USER root@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD("verysecret");
+'';
+</programlisting>
+      When MariaDB data directory is just upgraded (not initialized), the users are not created or modified.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 
@@ -100,6 +122,12 @@
   <itemizedlist>
    <listitem>
     <para>
+     The go-modules builder now uses vendorSha256 instead of modSha256 to pin
+     fetched version data. This is currently a warning, but will be removed in the next release.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
      Grafana is now built without support for phantomjs by default. Phantomjs support has been
      <link xlink:href="https://grafana.com/docs/grafana/latest/guides/whats-new-in-v6-4/">deprecated in Grafana</link>
      and the <package>phantomjs</package> project is
@@ -189,13 +217,13 @@ environment.systemPackages = [
              customizable to your liking by using
              <literal>php.withExtensions</literal> or
              <literal>php.buildEnv</literal> instead of writing config files
-             or changing configure flags.             
+             or changing configure flags.
            </para>
          </listitem>
          <listitem>
            <para>
              The remaining configuration flags can now be set directly on
-             the <literal>php</literal> attribute. For example, instead of 
+             the <literal>php</literal> attribute. For example, instead of
 
              <programlisting>
 php.override {
@@ -353,6 +381,11 @@ systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
       will have changed.
     </para>
    </listitem>
+   <listitem>
+    <para>
+      The rkt module has been removed, it was archived by upstream.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 
diff --git a/nixos/lib/testing-python.nix b/nixos/lib/testing-python.nix
index 88801f20517d..123323711a7c 100644
--- a/nixos/lib/testing-python.nix
+++ b/nixos/lib/testing-python.nix
@@ -10,11 +10,7 @@
 with import ./build-vms.nix { inherit system pkgs minimal extraConfigurations; };
 with pkgs;
 
-let
-  jquery-ui = callPackage ./testing/jquery-ui.nix { };
-  jquery = callPackage ./testing/jquery.nix { };
-
-in rec {
+rec {
 
   inherit pkgs;
 
diff --git a/nixos/lib/testing.nix b/nixos/lib/testing.nix
index cbb7faf039ec..5c784c2f0abe 100644
--- a/nixos/lib/testing.nix
+++ b/nixos/lib/testing.nix
@@ -10,11 +10,7 @@
 with import ./build-vms.nix { inherit system pkgs minimal extraConfigurations; };
 with pkgs;
 
-let
-  jquery-ui = callPackage ./testing/jquery-ui.nix { };
-  jquery = callPackage ./testing/jquery.nix { };
-
-in rec {
+rec {
 
   inherit pkgs;
 
diff --git a/nixos/lib/testing/jquery-ui.nix b/nixos/lib/testing/jquery-ui.nix
deleted file mode 100644
index 05bd8dc2f946..000000000000
--- a/nixos/lib/testing/jquery-ui.nix
+++ /dev/null
@@ -1,24 +0,0 @@
-{ stdenv, fetchurl, unzip }:
-
-stdenv.mkDerivation rec {
-  name = "jquery-ui-1.11.4";
-
-  src = fetchurl {
-    url = "https://jqueryui.com/resources/download/${name}.zip";
-    sha256 = "0ciyaj1acg08g8hpzqx6whayq206fvf4whksz2pjgxlv207lqgjh";
-  };
-
-  buildInputs = [ unzip ];
-
-  installPhase =
-    ''
-      mkdir -p "$out/js"
-      cp -rv . "$out/js"
-    '';
-
-  meta = {
-    homepage = "https://jqueryui.com/";
-    description = "A library of JavaScript widgets and effects";
-    platforms = stdenv.lib.platforms.all;
-  };
-}
diff --git a/nixos/lib/testing/jquery.nix b/nixos/lib/testing/jquery.nix
deleted file mode 100644
index 732fdb3ba879..000000000000
--- a/nixos/lib/testing/jquery.nix
+++ /dev/null
@@ -1,36 +0,0 @@
-{ stdenv, fetchurl, compressed ? true }:
-
-with stdenv.lib;
-
-stdenv.mkDerivation rec {
-  name = "jquery-1.11.3";
-
-  src = if compressed then
-    fetchurl {
-      url = "http://code.jquery.com/${name}.min.js";
-      sha256 = "1f4glgxxn3jnvry3dpzmazj3207baacnap5w20gr2xlk789idfgc";
-    }
-    else
-    fetchurl {
-      url = "http://code.jquery.com/${name}.js";
-      sha256 = "1v956yf5spw0156rni5z77hzqwmby7ajwdcd6mkhb6zvl36awr90";
-    };
-
-  dontUnpack = true;
-
-  installPhase =
-    ''
-      mkdir -p "$out/js"
-      cp -v "$src" "$out/js/jquery.js"
-      ${optionalString compressed ''
-        (cd "$out/js" && ln -s jquery.js jquery.min.js)
-      ''}
-    '';
-
-  meta = with stdenv.lib; {
-    description = "JavaScript library designed to simplify the client-side scripting of HTML";
-    homepage = "http://jquery.com/";
-    license = licenses.mit;
-    platforms = platforms.all;
-  };
-}
diff --git a/nixos/modules/config/ldap.nix b/nixos/modules/config/ldap.nix
index 4c8b527676b2..1a5dbcd4e26b 100644
--- a/nixos/modules/config/ldap.nix
+++ b/nixos/modules/config/ldap.nix
@@ -244,6 +244,10 @@ in
       if cfg.daemon.enable then nss_pam_ldapd else nss_ldap
     );
 
+    system.nssDatabases.group = optional cfg.nsswitch "ldap";
+    system.nssDatabases.passwd = optional cfg.nsswitch "ldap";
+    system.nssDatabases.shadow = optional cfg.nsswitch "ldap";
+
     users = mkIf cfg.daemon.enable {
       groups.nslcd = {
         gid = config.ids.gids.nslcd;
diff --git a/nixos/modules/config/nsswitch.nix b/nixos/modules/config/nsswitch.nix
index 22ddb3490c8e..d19d35a48906 100644
--- a/nixos/modules/config/nsswitch.nix
+++ b/nixos/modules/config/nsswitch.nix
@@ -4,34 +4,7 @@
 
 with lib;
 
-let
-
-  # only with nscd up and running we can load NSS modules that are not integrated in NSS
-  canLoadExternalModules = config.services.nscd.enable;
-  # XXX Move these to their respective modules
-  nssmdns = canLoadExternalModules && config.services.avahi.nssmdns;
-  nsswins = canLoadExternalModules && config.services.samba.nsswins;
-  ldap = canLoadExternalModules && (config.users.ldap.enable && config.users.ldap.nsswitch);
-
-  hostArray = mkMerge [
-    (mkBefore [ "files" ])
-    (mkIf nssmdns [ "mdns_minimal [NOTFOUND=return]" ])
-    (mkIf nsswins [ "wins" ])
-    (mkAfter [ "dns" ])
-    (mkIf nssmdns (mkOrder 1501 [ "mdns" ])) # 1501 to ensure it's after dns
-  ];
-
-  passwdArray = mkMerge [
-    (mkBefore [ "files" ])
-    (mkIf ldap [ "ldap" ])
-  ];
-
-  shadowArray = mkMerge [
-    (mkBefore [ "files" ])
-    (mkIf ldap [ "ldap" ])
-  ];
-
-in {
+{
   options = {
 
     # NSS modules.  Hacky!
@@ -122,9 +95,11 @@ in {
   config = {
     assertions = [
       {
-        # generic catch if the NixOS module adding to nssModules does not prevent it with specific message.
-        assertion = config.system.nssModules.path != "" -> canLoadExternalModules;
-        message = "Loading NSS modules from path ${config.system.nssModules.path} requires nscd being enabled.";
+        # Prevent users from disabling nscd, with nssModules being set.
+        # If disabling nscd is really necessary, it's still possible to opt out
+        # by forcing config.system.nssModules to [].
+        assertion = config.system.nssModules.path != "" -> config.services.nscd.enable;
+        message = "Loading NSS modules from system.nssModules (${config.system.nssModules.path}), requires services.nscd.enable being set to true.";
       }
     ];
 
@@ -145,10 +120,13 @@ in {
     '';
 
     system.nssDatabases = {
-      passwd = passwdArray;
-      group = passwdArray;
-      shadow = shadowArray;
-      hosts = hostArray;
+      passwd = mkBefore [ "files" ];
+      group = mkBefore [ "files" ];
+      shadow = mkBefore [ "files" ];
+      hosts = mkMerge [
+        (mkBefore [ "files" ])
+        (mkAfter [ "dns" ])
+      ];
       services = mkBefore [ "files" ];
     };
   };
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 5adbc26522cf..89677970dd9a 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -1002,7 +1002,6 @@
   ./virtualisation/podman.nix
   ./virtualisation/qemu-guest-agent.nix
   ./virtualisation/railcar.nix
-  ./virtualisation/rkt.nix
   ./virtualisation/virtualbox-guest.nix
   ./virtualisation/virtualbox-host.nix
   ./virtualisation/vmware-guest.nix
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index a946268494ed..7776c648af8a 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -48,6 +48,7 @@ with lib;
       systemd-logind API). Instead of using the module you can now
       simply add the brightnessctl package to environment.systemPackages.
     '')
+    (mkRemovedOptionModule [ "virtualisation" "rkt" ] "The rkt module has been removed, it was archived by upstream")
 
     (mkRemovedOptionModule ["services" "prey" ] ''
       prey-bash-client is deprecated upstream
diff --git a/nixos/modules/security/google_oslogin.nix b/nixos/modules/security/google_oslogin.nix
index 78c2089baeb9..c2889a0f0d1d 100644
--- a/nixos/modules/security/google_oslogin.nix
+++ b/nixos/modules/security/google_oslogin.nix
@@ -50,6 +50,7 @@ in
     # enable the nss module, so user lookups etc. work
     system.nssModules = [ package ];
     system.nssDatabases.passwd = [ "cache_oslogin" "oslogin" ];
+    system.nssDatabases.group = [ "cache_oslogin" "oslogin" ];
 
     # Ugly: sshd refuses to start if a store path is given because /nix/store is group-writable.
     # So indirect by a symlink.
diff --git a/nixos/modules/services/databases/mysql.nix b/nixos/modules/services/databases/mysql.nix
index f9e657f57742..44183788d936 100644
--- a/nixos/modules/services/databases/mysql.nix
+++ b/nixos/modules/services/databases/mysql.nix
@@ -87,7 +87,6 @@ in
             datadir = /var/lib/mysql
             bind-address = 127.0.0.1
             port = 3336
-            plugin-load-add = auth_socket.so
 
             !includedir /etc/mysql/conf.d/
           ''';
@@ -315,13 +314,16 @@ in
         datadir = cfg.dataDir;
         bind-address = mkIf (cfg.bind != null) cfg.bind;
         port = cfg.port;
-        plugin-load-add = optional (cfg.ensureUsers != []) "auth_socket.so";
       }
       (mkIf (cfg.replication.role == "master" || cfg.replication.role == "slave") {
         log-bin = "mysql-bin-${toString cfg.replication.serverId}";
         log-bin-index = "mysql-bin-${toString cfg.replication.serverId}.index";
         relay-log = "mysql-relay-bin";
         server-id = cfg.replication.serverId;
+        binlog-ignore-db = [ "information_schema" "performance_schema" "mysql" ];
+      })
+      (mkIf (!isMariaDB) {
+        plugin-load-add = optional (cfg.ensureUsers != []) "auth_socket.so";
       })
     ];
 
@@ -444,7 +446,6 @@ in
 
                         ( echo "stop slave;"
                           echo "change master to master_host='${cfg.replication.masterHost}', master_user='${cfg.replication.masterUser}', master_password='${cfg.replication.masterPassword}';"
-                          echo "set global slave_exec_mode='IDEMPOTENT';"
                           echo "start slave;"
                         ) | ${mysql}/bin/mysql -u root -N
                       ''}
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 982480fbd99c..579b6a4d9c67 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -343,7 +343,7 @@ in
         # Wait for PostgreSQL to be ready to accept connections.
         postStart =
           ''
-            PSQL="${pkgs.sudo}/bin/sudo -u ${cfg.superUser} psql --port=${toString cfg.port}"
+            PSQL="${pkgs.utillinux}/bin/runuser -u ${cfg.superUser} -- psql --port=${toString cfg.port}"
 
             while ! $PSQL -d postgres -c "" 2> /dev/null; do
                 if ! kill -0 "$MAINPID"; then exit 1; fi
diff --git a/nixos/modules/services/databases/redis.nix b/nixos/modules/services/databases/redis.nix
index 5c817422aae5..799c3db62166 100644
--- a/nixos/modules/services/databases/redis.nix
+++ b/nixos/modules/services/databases/redis.nix
@@ -11,12 +11,11 @@ let
     port ${toString cfg.port}
     ${condOption "bind" cfg.bind}
     ${condOption "unixsocket" cfg.unixSocket}
-    daemonize yes
+    daemonize no
     supervised systemd
     loglevel ${cfg.logLevel}
     logfile ${cfg.logfile}
     syslog-enabled ${redisBool cfg.syslog}
-    pidfile /run/redis/redis.pid
     databases ${toString cfg.databases}
     ${concatMapStrings (d: "save ${toString (builtins.elemAt d 0)} ${toString (builtins.elemAt d 1)}\n") cfg.save}
     dbfilename dump.rdb
diff --git a/nixos/modules/services/misc/sssd.nix b/nixos/modules/services/misc/sssd.nix
index 77f6ccfe64f0..3da99a3b38c1 100644
--- a/nixos/modules/services/misc/sssd.nix
+++ b/nixos/modules/services/misc/sssd.nix
@@ -42,11 +42,6 @@ in {
   };
   config = mkMerge [
     (mkIf cfg.enable {
-      assertions = singleton {
-        assertion = nscd.enable;
-        message = "nscd must be enabled through `services.nscd.enable` for SSSD to work.";
-      };
-
       systemd.services.sssd = {
         description = "System Security Services Daemon";
         wantedBy    = [ "multi-user.target" ];
@@ -74,11 +69,12 @@ in {
         mode = "0400";
       };
 
-      system.nssModules = optional cfg.enable pkgs.sssd;
+      system.nssModules = pkgs.sssd;
       system.nssDatabases = {
+        group = [ "sss" ];
         passwd = [ "sss" ];
-        shadow = [ "sss" ];
         services = [ "sss" ];
+        shadow = [ "sss" ];
       };
       services.dbus.packages = [ pkgs.sssd ];
     })
diff --git a/nixos/modules/services/misc/zoneminder.nix b/nixos/modules/services/misc/zoneminder.nix
index d5b3537068d3..d9d34b7fac9b 100644
--- a/nixos/modules/services/misc/zoneminder.nix
+++ b/nixos/modules/services/misc/zoneminder.nix
@@ -63,10 +63,6 @@ let
     ${cfg.extraConfig}
   '';
 
-  phpExtensions = with pkgs.phpPackages; [
-    { pkg = apcu; name = "apcu"; }
-  ];
-
 in {
   options = {
     services.zoneminder = with lib; {
@@ -289,11 +285,9 @@ in {
       phpfpm = lib.mkIf useNginx {
         pools.zoneminder = {
           inherit user group;
+          phpPackage = pkgs.php.withExtensions ({ enabled, all }: enabled ++ [ all.apcu ]);
           phpOptions = ''
             date.timezone = "${config.time.timeZone}"
-
-            ${lib.concatStringsSep "\n" (map (e:
-            "extension=${e.pkg}/lib/php/extensions/${e.name}.so") phpExtensions)}
           '';
           settings = lib.mapAttrs (name: lib.mkDefault) {
             "listen.owner" = user;
diff --git a/nixos/modules/services/network-filesystems/samba.nix b/nixos/modules/services/network-filesystems/samba.nix
index a115590ccaa0..08c912e0fcd4 100644
--- a/nixos/modules/services/network-filesystems/samba.nix
+++ b/nixos/modules/services/network-filesystems/samba.nix
@@ -224,6 +224,7 @@ in
       (mkIf cfg.enable {
 
         system.nssModules = optional cfg.nsswins samba;
+        system.nssDatabases.hosts = optional cfg.nsswins "wins";
 
         systemd = {
           targets.samba = {
diff --git a/nixos/modules/services/networking/avahi-daemon.nix b/nixos/modules/services/networking/avahi-daemon.nix
index ddcfe3d77e2f..c876b252e8cd 100644
--- a/nixos/modules/services/networking/avahi-daemon.nix
+++ b/nixos/modules/services/networking/avahi-daemon.nix
@@ -238,6 +238,10 @@ in
     users.groups.avahi = {};
 
     system.nssModules = optional cfg.nssmdns pkgs.nssmdns;
+    system.nssDatabases.hosts = optionals cfg.nssmdns (mkMerge [
+      [ "mdns_minimal [NOTFOUND=return]" ]
+      (mkOrder 1501 [ "mdns" ]) # 1501 to ensure it's after dns
+    ]);
 
     environment.systemPackages = [ pkgs.avahi ];
 
diff --git a/nixos/modules/services/web-servers/uwsgi.nix b/nixos/modules/services/web-servers/uwsgi.nix
index 4b74c329e3dc..936e211ec713 100644
--- a/nixos/modules/services/web-servers/uwsgi.nix
+++ b/nixos/modules/services/web-servers/uwsgi.nix
@@ -79,7 +79,25 @@ in {
       };
 
       instance = mkOption {
-        type = types.attrs;
+        type =  with lib.types; let
+          valueType = nullOr (oneOf [
+            bool
+            int
+            float
+            str
+            (lazyAttrsOf valueType)
+            (listOf valueType)
+            (mkOptionType {
+              name = "function";
+              description = "function";
+              check = x: isFunction x;
+              merge = mergeOneOption;
+            })
+          ]) // {
+            description = "Json value or lambda";
+            emptyValue.value = {};
+          };
+        in valueType;
         default = {
           type = "normal";
         };
diff --git a/nixos/modules/services/x11/window-managers/berry.nix b/nixos/modules/services/x11/window-managers/berry.nix
new file mode 100644
index 000000000000..0d2285e7a60e
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/berry.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.berry;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.berry.enable = mkEnableOption "berry";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "berry";
+      start = ''
+        ${pkgs.berry}/bin/berry &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.berry ];
+  };
+}
diff --git a/nixos/modules/services/x11/window-managers/default.nix b/nixos/modules/services/x11/window-managers/default.nix
index b815c5f16a1e..87702c58727a 100644
--- a/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixos/modules/services/x11/window-managers/default.nix
@@ -10,6 +10,7 @@ in
   imports = [
     ./2bwm.nix
     ./afterstep.nix
+    ./berry.nix
     ./bspwm.nix
     ./cwm.nix
     ./dwm.nix
@@ -21,6 +22,7 @@ in
     ./i3.nix
     ./jwm.nix
     ./leftwm.nix
+    ./lwm.nix
     ./metacity.nix
     ./mwm.nix
     ./openbox.nix
@@ -28,6 +30,7 @@ in
     ./notion.nix
     ./ratpoison.nix
     ./sawfish.nix
+    ./smallwm.nix
     ./stumpwm.nix
     ./spectrwm.nix
     ./tinywm.nix
@@ -35,6 +38,7 @@ in
     ./windowmaker.nix
     ./wmii.nix
     ./xmonad.nix
+    ./yeahwm.nix
     ./qtile.nix
     ./none.nix ];
 
diff --git a/nixos/modules/services/x11/window-managers/lwm.nix b/nixos/modules/services/x11/window-managers/lwm.nix
new file mode 100644
index 000000000000..e2aa062fd13b
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/lwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.lwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.lwm.enable = mkEnableOption "lwm";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "lwm";
+      start = ''
+        ${pkgs.lwm}/bin/lwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.lwm ];
+  };
+}
diff --git a/nixos/modules/services/x11/window-managers/smallwm.nix b/nixos/modules/services/x11/window-managers/smallwm.nix
new file mode 100644
index 000000000000..091ba4f92b94
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/smallwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.smallwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.smallwm.enable = mkEnableOption "smallwm";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "smallwm";
+      start = ''
+        ${pkgs.smallwm}/bin/smallwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.smallwm ];
+  };
+}
diff --git a/nixos/modules/services/x11/window-managers/yeahwm.nix b/nixos/modules/services/x11/window-managers/yeahwm.nix
new file mode 100644
index 000000000000..351bd7dfe48b
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/yeahwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.yeahwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.yeahwm.enable = mkEnableOption "yeahwm";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "yeahwm";
+      start = ''
+        ${pkgs.yeahwm}/bin/yeahwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.yeahwm ];
+  };
+}
diff --git a/nixos/modules/system/boot/binfmt.nix b/nixos/modules/system/boot/binfmt.nix
index a677ab4cb71a..9eeae0c3ef44 100644
--- a/nixos/modules/system/boot/binfmt.nix
+++ b/nixos/modules/system/boot/binfmt.nix
@@ -268,9 +268,10 @@ in {
       mkdir -p -m 0755 /run/binfmt
       ${lib.concatStringsSep "\n" (lib.mapAttrsToList activationSnippet config.boot.binfmt.registrations)}
     '';
-    systemd.additionalUpstreamSystemUnits = lib.mkIf (config.boot.binfmt.registrations != {})
-      [ "proc-sys-fs-binfmt_misc.automount"
-        "proc-sys-fs-binfmt_misc.mount"
-      ];
+    systemd.additionalUpstreamSystemUnits = lib.mkIf (config.boot.binfmt.registrations != {}) [
+      "proc-sys-fs-binfmt_misc.automount"
+      "proc-sys-fs-binfmt_misc.mount"
+      "systemd-binfmt.service"
+    ];
   };
 }
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 36a25c4e6c3a..99892a28115c 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -164,7 +164,6 @@ let
       "systemd-timedated.service"
       "systemd-localed.service"
       "systemd-hostnamed.service"
-      "systemd-binfmt.service"
       "systemd-exit.service"
       "systemd-update-done.service"
     ] ++ optionals config.services.journald.enableHttpGateway [
@@ -832,16 +831,8 @@ in
 
     system.build.units = cfg.units;
 
-    # Systemd provides various NSS modules to look up dynamic users, locally
-    # configured IP adresses and local container hostnames.
-    # On NixOS, these can only be passed to the NSS system via nscd (and its
-    # LD_LIBRARY_PATH), which is why it's usually a very good idea to have nscd
-    # enabled (also see the config.nscd.enable description).
-    # While there is already an assertion in place complaining loudly about
-    # having nssModules configured and nscd disabled, for some reason we still
-    # check for nscd being enabled before adding to nssModules.
-    system.nssModules = optional config.services.nscd.enable systemd.out;
-    system.nssDatabases = mkIf config.services.nscd.enable {
+    system.nssModules = [ systemd.out ];
+    system.nssDatabases = {
       hosts = (mkMerge [
         [ "mymachines" ]
         (mkOrder 1600 [ "myhostname" ] # 1600 to ensure it's always the last
@@ -851,6 +842,10 @@ in
         [ "mymachines" ]
         (mkAfter [ "systemd" ])
       ]);
+      group = (mkMerge [
+        [ "mymachines" ]
+        (mkAfter [ "systemd" ])
+      ]);
     };
 
     environment.systemPackages = [ systemd ];
@@ -1060,7 +1055,6 @@ in
     systemd.targets.local-fs.unitConfig.X-StopOnReconfiguration = true;
     systemd.targets.remote-fs.unitConfig.X-StopOnReconfiguration = true;
     systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
-    systemd.services.systemd-binfmt.wants = [ "proc-sys-fs-binfmt_misc.mount" ];
     systemd.services.systemd-importd.environment = proxy_env;
 
     # Don't bother with certain units in containers.
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index 43347161a84c..71eed4d6f1a4 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -433,7 +433,16 @@ in
 
       services.zfs.zed.settings = {
         ZED_EMAIL_PROG = mkDefault "${pkgs.mailutils}/bin/mail";
-        PATH = lib.makeBinPath [ packages.zfsUser pkgs.utillinux pkgs.gawk pkgs.gnused pkgs.gnugrep pkgs.coreutils pkgs.curl ];
+        PATH = lib.makeBinPath [
+          packages.zfsUser
+          pkgs.coreutils
+          pkgs.curl
+          pkgs.gawk
+          pkgs.gnugrep
+          pkgs.gnused
+          pkgs.nettools
+          pkgs.utillinux
+        ];
       };
 
       environment.etc = genAttrs
diff --git a/nixos/modules/virtualisation/rkt.nix b/nixos/modules/virtualisation/rkt.nix
deleted file mode 100644
index fd662b52df52..000000000000
--- a/nixos/modules/virtualisation/rkt.nix
+++ /dev/null
@@ -1,64 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-  cfg = config.virtualisation.rkt;
-in
-{
-  options.virtualisation.rkt = {
-    enable = mkEnableOption "rkt metadata service";
-
-    gc = {
-      automatic = mkOption {
-        default = true;
-        type = types.bool;
-        description = "Automatically run the garbage collector at a specific time.";
-      };
-
-      dates = mkOption {
-        default = "03:15";
-        type = types.str;
-        description = ''
-          Specification (in the format described by
-          <citerefentry><refentrytitle>systemd.time</refentrytitle>
-          <manvolnum>7</manvolnum></citerefentry>) of the time at
-          which the garbage collector will run.
-        '';
-      };
-
-      options = mkOption {
-        default = "--grace-period=24h";
-        type = types.str;
-        description = ''
-          Options given to <filename>rkt gc</filename> when the
-          garbage collector is run automatically.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    environment.systemPackages = [ pkgs.rkt ];
-
-    systemd.services.rkt = {
-      description = "rkt metadata service";
-      wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" ];
-      serviceConfig = {
-        ExecStart = "${pkgs.rkt}/bin/rkt metadata-service";
-      };
-    };
-
-    systemd.services.rkt-gc = {
-      description = "rkt garbage collection";
-      startAt = optionalString cfg.gc.automatic cfg.gc.dates;
-      serviceConfig = {
-        Type = "oneshot";
-        ExecStart = "${pkgs.rkt}/bin/rkt gc ${cfg.gc.options}";
-      };
-    };
-
-    users.groups.rkt = {};
-  };
-}
diff --git a/nixos/tests/agda.nix b/nixos/tests/agda.nix
new file mode 100644
index 000000000000..e158999e57d1
--- /dev/null
+++ b/nixos/tests/agda.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  hello-world = pkgs.writeText "hello-world" ''
+    open import IO
+
+    main = run(putStrLn "Hello World!")
+  '';
+in
+{
+  name = "agda";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ alexarice turion ];
+  };
+
+  machine = { pkgs, ... }: {
+    environment.systemPackages = [
+      (pkgs.agda.withPackages {
+        pkgs = p: [ p.standard-library ];
+      })
+    ];
+    virtualisation.memorySize = 2000; # Agda uses a lot of memory
+  };
+
+  testScript = ''
+    # Minimal script that typechecks
+    machine.succeed("touch TestEmpty.agda")
+    machine.succeed("agda TestEmpty.agda")
+
+    # Minimal script that actually uses the standard library
+    machine.succeed('echo "import IO" > TestIO.agda')
+    machine.succeed("agda -l standard-library -i . TestIO.agda")
+
+    # # Hello world
+    machine.succeed(
+        "cp ${hello-world} HelloWorld.agda"
+    )
+    machine.succeed("agda -l standard-library -i . -c HelloWorld.agda")
+  '';
+}
+)
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index f3e90f9bfa70..0acded892c7a 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -23,6 +23,7 @@ in
 {
   _3proxy = handleTest ./3proxy.nix {};
   acme = handleTest ./acme.nix {};
+  agda = handleTest ./agda.nix {};
   atd = handleTest ./atd.nix {};
   avahi = handleTest ./avahi.nix {};
   babeld = handleTest ./babeld.nix {};
@@ -303,6 +304,7 @@ in
   syncthing-relay = handleTest ./syncthing-relay.nix {};
   systemd = handleTest ./systemd.nix {};
   systemd-analyze = handleTest ./systemd-analyze.nix {};
+  systemd-binfmt = handleTestOn ["x86_64-linux"] ./systemd-binfmt.nix {};
   systemd-boot = handleTestOn ["x86_64-linux"] ./systemd-boot.nix {};
   systemd-confinement = handleTest ./systemd-confinement.nix {};
   systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
@@ -347,6 +349,7 @@ in
   yabar = handleTest ./yabar.nix {};
   yggdrasil = handleTest ./yggdrasil.nix {};
   zfs = handleTest ./zfs.nix {};
-  zsh-history = handleTest ./zsh-history.nix {};
+  zoneminder = handleTest ./zoneminder.nix {};
   zookeeper = handleTest ./zookeeper.nix {};
+  zsh-history = handleTest ./zsh-history.nix {};
 }
diff --git a/nixos/tests/docker-tools.nix b/nixos/tests/docker-tools.nix
index 51b472fcf9ce..2375d15b3813 100644
--- a/nixos/tests/docker-tools.nix
+++ b/nixos/tests/docker-tools.nix
@@ -124,6 +124,16 @@ import ./make-test-python.nix ({ pkgs, ... }: {
                 f"docker run --rm  ${examples.layersOrder.imageName} cat /tmp/layer{index}"
             )
 
+    with subtest("Ensure environment variables are correctly inherited"):
+        docker.succeed(
+            "docker load --input='${examples.environmentVariables}'"
+        )
+        out = docker.succeed("docker run --rm ${examples.environmentVariables.imageName} env")
+        env = out.splitlines()
+        assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
+        assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
+        assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
+
     with subtest("Ensure image with only 2 layers can be loaded"):
         docker.succeed(
             "docker load --input='${examples.two-layered-image}'"
diff --git a/nixos/tests/kubernetes/base.nix b/nixos/tests/kubernetes/base.nix
index adb736506895..86de9455e737 100644
--- a/nixos/tests/kubernetes/base.nix
+++ b/nixos/tests/kubernetes/base.nix
@@ -3,7 +3,7 @@
   pkgs ? import ../../.. { inherit system config; }
 }:
 
-with import ../../lib/testing.nix { inherit system pkgs; };
+with import ../../lib/testing-python.nix { inherit system pkgs; };
 with pkgs.lib;
 
 let
@@ -75,10 +75,8 @@ let
       ) machines;
 
       testScript = ''
-        startAll;
-
-        ${test}
-      '';
+        start_all()
+      '' + test;
     };
 
   mkKubernetesMultiNodeTest = attrs: mkKubernetesBaseTest ({
diff --git a/nixos/tests/kubernetes/dns.nix b/nixos/tests/kubernetes/dns.nix
index 638942e15407..890499a0fb8a 100644
--- a/nixos/tests/kubernetes/dns.nix
+++ b/nixos/tests/kubernetes/dns.nix
@@ -75,51 +75,75 @@ let
   singleNodeTest = {
     test = ''
       # prepare machine1 for test
-      $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
-      $machine1->waitUntilSucceeds("docker load < ${redisImage}");
-      $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
-      $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
-      $machine1->waitUntilSucceeds("docker load < ${probeImage}");
-      $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
+      machine1.wait_until_succeeds("kubectl get node machine1.${domain} | grep -w Ready")
+      machine1.wait_until_succeeds(
+          "docker load < ${redisImage}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisPod}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisService}"
+      )
+      machine1.wait_until_succeeds(
+          "docker load < ${probeImage}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${probePod}"
+      )
 
       # check if pods are running
-      $machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
-      $machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
-      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
+      machine1.wait_until_succeeds("kubectl get pod redis | grep Running")
+      machine1.wait_until_succeeds("kubectl get pod probe | grep Running")
+      machine1.wait_until_succeeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'")
 
       # check dns on host (dnsmasq)
-      $machine1->succeed("host redis.default.svc.cluster.local");
+      machine1.succeed("host redis.default.svc.cluster.local")
 
       # check dns inside the container
-      $machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
+      machine1.succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local")
     '';
   };
 
   multiNodeTest = {
     test = ''
       # Node token exchange
-      $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
-      $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
+      machine1.wait_until_succeeds(
+          "cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"
+      )
+      machine2.wait_until_succeeds(
+          "cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"
+      )
 
       # prepare machines for test
-      $machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
-      $machine2->waitUntilSucceeds("docker load < ${redisImage}");
-      $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
-      $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
-      $machine2->waitUntilSucceeds("docker load < ${probeImage}");
-      $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
+      machine1.wait_until_succeeds("kubectl get node machine2.${domain} | grep -w Ready")
+      machine2.wait_until_succeeds(
+          "docker load < ${redisImage}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisPod}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisService}"
+      )
+      machine2.wait_until_succeeds(
+          "docker load < ${probeImage}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${probePod}"
+      )
 
       # check if pods are running
-      $machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
-      $machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
-      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
+      machine1.wait_until_succeeds("kubectl get pod redis | grep Running")
+      machine1.wait_until_succeeds("kubectl get pod probe | grep Running")
+      machine1.wait_until_succeeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'")
 
       # check dns on hosts (dnsmasq)
-      $machine1->succeed("host redis.default.svc.cluster.local");
-      $machine2->succeed("host redis.default.svc.cluster.local");
+      machine1.succeed("host redis.default.svc.cluster.local")
+      machine2.succeed("host redis.default.svc.cluster.local")
 
       # check dns inside the container
-      $machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
+      machine1.succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local")
     '';
   };
 in {
diff --git a/nixos/tests/kubernetes/rbac.nix b/nixos/tests/kubernetes/rbac.nix
index 3ce7adcd0d71..c922da515d91 100644
--- a/nixos/tests/kubernetes/rbac.nix
+++ b/nixos/tests/kubernetes/rbac.nix
@@ -94,43 +94,67 @@ let
 
   singlenode = base // {
     test = ''
-      $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
-
-      $machine1->waitUntilSucceeds("docker load < ${kubectlImage}");
-
-      $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
-      $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
-      $machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
-      $machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
-
-      $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
-
-      $machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
-      $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
-      $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
+      machine1.wait_until_succeeds("kubectl get node machine1.my.zyx | grep -w Ready")
+
+      machine1.wait_until_succeeds(
+          "docker load < ${kubectlImage}"
+      )
+
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roServiceAccount}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRole}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRoleBinding}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${kubectlPod}"
+      )
+
+      machine1.wait_until_succeeds("kubectl get pod kubectl | grep Running")
+
+      machine1.wait_until_succeeds("kubectl exec -ti kubectl -- kubectl get pods")
+      machine1.fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json")
+      machine1.fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl")
     '';
   };
 
   multinode = base // {
     test = ''
       # Node token exchange
-      $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
-      $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
-
-      $machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
-
-      $machine2->waitUntilSucceeds("docker load < ${kubectlImage}");
-
-      $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
-      $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
-      $machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
-      $machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
-
-      $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
-
-      $machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
-      $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
-      $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
+      machine1.wait_until_succeeds(
+          "cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"
+      )
+      machine2.wait_until_succeeds(
+          "cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"
+      )
+
+      machine1.wait_until_succeeds("kubectl get node machine2.my.zyx | grep -w Ready")
+
+      machine2.wait_until_succeeds(
+          "docker load < ${kubectlImage}"
+      )
+
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roServiceAccount}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRole}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRoleBinding}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${kubectlPod}"
+      )
+
+      machine1.wait_until_succeeds("kubectl get pod kubectl | grep Running")
+
+      machine1.wait_until_succeeds("kubectl exec -ti kubectl -- kubectl get pods")
+      machine1.fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json")
+      machine1.fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl")
     '';
   };
 
diff --git a/nixos/tests/mysql/mariadb-galera-mariabackup.nix b/nixos/tests/mysql/mariadb-galera-mariabackup.nix
index 73abf6c555f9..cae55878060c 100644
--- a/nixos/tests/mysql/mariadb-galera-mariabackup.nix
+++ b/nixos/tests/mysql/mariadb-galera-mariabackup.nix
@@ -55,9 +55,9 @@ in {
           };
           galera = {
             wsrep_on = "ON";
-            wsrep_debug = "OFF";
+            wsrep_debug = "NONE";
             wsrep_retry_autocommit = "3";
-            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_provider = "${pkgs.mariadb-galera}/lib/galera/libgalera_smm.so";
             wsrep_cluster_address = "gcomm://";
             wsrep_cluster_name = "galera";
             wsrep_node_address = "192.168.1.1";
@@ -102,9 +102,9 @@ in {
           };
           galera = {
             wsrep_on = "ON";
-            wsrep_debug = "OFF";
+            wsrep_debug = "NONE";
             wsrep_retry_autocommit = "3";
-            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_provider = "${pkgs.mariadb-galera}/lib/galera/libgalera_smm.so";
             wsrep_cluster_address = "gcomm://galera_01,galera_02,galera_03";
             wsrep_cluster_name = "galera";
             wsrep_node_address = "192.168.1.2";
@@ -149,9 +149,9 @@ in {
           };
           galera = {
             wsrep_on = "ON";
-            wsrep_debug = "OFF";
+            wsrep_debug = "NONE";
             wsrep_retry_autocommit = "3";
-            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_provider = "${pkgs.mariadb-galera}/lib/galera/libgalera_smm.so";
             wsrep_cluster_address = "gcomm://galera_01,galera_02,galera_03";
             wsrep_cluster_name = "galera";
             wsrep_node_address = "192.168.1.3";
@@ -184,17 +184,17 @@ in {
     galera_03.wait_for_unit("mysql")
     galera_03.wait_for_open_port(3306)
     galera_02.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 37"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 37"
     )
     galera_02.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
     )
     galera_02.succeed("systemctl stop mysql")
     galera_01.succeed(
         "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (38);'"
     )
     galera_03.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
     )
     galera_01.succeed(
         "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (39);'"
@@ -202,22 +202,22 @@ in {
     galera_02.succeed("systemctl start mysql")
     galera_02.wait_for_open_port(3306)
     galera_02.succeed(
-        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+        "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
     )
     galera_03.succeed(
-        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+        "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
     )
     galera_01.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db3;' -N | grep 39"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db3;' -N | grep 39"
     )
     galera_02.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db2;' -N | grep 38"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db2;' -N | grep 38"
     )
     galera_03.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 37"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 37"
     )
     galera_01.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
-    galera_02.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db2;'")
-    galera_03.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db1;'")
+    galera_02.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db2;'")
+    galera_03.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db1;'")
   '';
 })
diff --git a/nixos/tests/mysql/mariadb-galera-rsync.nix b/nixos/tests/mysql/mariadb-galera-rsync.nix
index cacae4569b57..4318efae8a93 100644
--- a/nixos/tests/mysql/mariadb-galera-rsync.nix
+++ b/nixos/tests/mysql/mariadb-galera-rsync.nix
@@ -51,9 +51,9 @@ in {
           };
           galera = {
             wsrep_on = "ON";
-            wsrep_debug = "OFF";
+            wsrep_debug = "NONE";
             wsrep_retry_autocommit = "3";
-            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_provider = "${pkgs.mariadb-galera}/lib/galera/libgalera_smm.so";
             wsrep_cluster_address = "gcomm://";
             wsrep_cluster_name = "galera-rsync";
             wsrep_node_address = "192.168.2.1";
@@ -97,9 +97,9 @@ in {
           };
           galera = {
             wsrep_on = "ON";
-            wsrep_debug = "OFF";
+            wsrep_debug = "NONE";
             wsrep_retry_autocommit = "3";
-            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_provider = "${pkgs.mariadb-galera}/lib/galera/libgalera_smm.so";
             wsrep_cluster_address = "gcomm://galera_04,galera_05,galera_06";
             wsrep_cluster_name = "galera-rsync";
             wsrep_node_address = "192.168.2.2";
@@ -143,9 +143,9 @@ in {
           };
           galera = {
             wsrep_on = "ON";
-            wsrep_debug = "OFF";
+            wsrep_debug = "NONE";
             wsrep_retry_autocommit = "3";
-            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_provider = "${pkgs.mariadb-galera}/lib/galera/libgalera_smm.so";
             wsrep_cluster_address = "gcomm://galera_04,galera_05,galera_06";
             wsrep_cluster_name = "galera-rsync";
             wsrep_node_address = "192.168.2.3";
@@ -177,17 +177,17 @@ in {
     galera_06.wait_for_unit("mysql")
     galera_06.wait_for_open_port(3306)
     galera_05.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 41"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 41"
     )
     galera_05.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
     )
     galera_05.succeed("systemctl stop mysql")
     galera_04.succeed(
         "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (42);'"
     )
     galera_06.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
     )
     galera_04.succeed(
         "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (43);'"
@@ -195,22 +195,22 @@ in {
     galera_05.succeed("systemctl start mysql")
     galera_05.wait_for_open_port(3306)
     galera_05.succeed(
-        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+        "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
     )
     galera_06.succeed(
-        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+        "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
     )
     galera_04.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db3;' -N | grep 43"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db3;' -N | grep 43"
     )
     galera_05.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db2;' -N | grep 42"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db2;' -N | grep 42"
     )
     galera_06.succeed(
-        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 41"
+        "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 41"
     )
     galera_04.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
-    galera_05.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db2;'")
-    galera_06.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db1;'")
+    galera_05.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db2;'")
+    galera_06.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db1;'")
   '';
 })
diff --git a/nixos/tests/mysql/mysql-replication.nix b/nixos/tests/mysql/mysql-replication.nix
index 81038dccd947..b5e003250193 100644
--- a/nixos/tests/mysql/mysql-replication.nix
+++ b/nixos/tests/mysql/mysql-replication.nix
@@ -59,7 +59,7 @@ in
     master.wait_for_open_port(3306)
     # Wait for testdb to be fully populated (5 rows).
     master.wait_until_succeeds(
-        "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+        "sudo -u mysql mysql -u mysql -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
     )
 
     slave1.start()
@@ -71,19 +71,21 @@ in
 
     # wait for replications to finish
     slave1.wait_until_succeeds(
-        "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+        "sudo -u mysql mysql -u mysql -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
     )
     slave2.wait_until_succeeds(
-        "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+        "sudo -u mysql mysql -u mysql -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
     )
 
     slave2.succeed("systemctl stop mysql")
-    master.succeed("echo 'insert into testdb.tests values (123, 456);' | mysql -u root -N")
+    master.succeed(
+        "echo 'insert into testdb.tests values (123, 456);' | sudo -u mysql mysql -u mysql -N"
+    )
     slave2.succeed("systemctl start mysql")
     slave2.wait_for_unit("mysql")
     slave2.wait_for_open_port(3306)
     slave2.wait_until_succeeds(
-        "echo 'select * from testdb.tests where Id = 123;' | mysql -u root -N | grep 456"
+        "echo 'select * from testdb.tests where Id = 123;' | sudo -u mysql mysql -u mysql -N | grep 456"
     )
   '';
 })
diff --git a/nixos/tests/systemd-binfmt.nix b/nixos/tests/systemd-binfmt.nix
new file mode 100644
index 000000000000..2a676f3da98b
--- /dev/null
+++ b/nixos/tests/systemd-binfmt.nix
@@ -0,0 +1,24 @@
+# Teach the kernel how to run armv7l and aarch64-linux binaries,
+# and run GNU Hello for these architectures.
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "systemd-binfmt";
+  machine = {
+    boot.binfmt.emulatedSystems = [
+      "armv7l-linux"
+      "aarch64-linux"
+    ];
+  };
+
+  testScript = let
+    helloArmv7l = pkgs.pkgsCross.armv7l-hf-multiplatform.hello;
+    helloAarch64 = pkgs.pkgsCross.aarch64-multiplatform.hello;
+  in ''
+    machine.start()
+    assert "world" in machine.succeed(
+        "${helloArmv7l}/bin/hello"
+    )
+    assert "world" in machine.succeed(
+        "${helloAarch64}/bin/hello"
+    )
+  '';
+})
diff --git a/nixos/tests/zoneminder.nix b/nixos/tests/zoneminder.nix
new file mode 100644
index 000000000000..a4e1a05ec0ee
--- /dev/null
+++ b/nixos/tests/zoneminder.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ lib, ...}:
+
+{
+  name = "zoneminder";
+  meta.maintainers = with lib.maintainers; [ danielfullmer ];
+
+  machine = { ... }:
+  {
+    services.zoneminder = {
+      enable = true;
+      database.createLocally = true;
+      database.username = "zoneminder";
+    };
+    time.timeZone = "America/New_York";
+  };
+
+  testScript = ''
+    machine.wait_for_unit("zoneminder.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_open_port(8095)
+    machine.succeed("curl --fail http://localhost:8095/")
+  '';
+})