about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/release-notes/rl-1903.xml33
-rw-r--r--nixos/lib/testing.nix4
-rw-r--r--nixos/modules/misc/ids.nix2
-rw-r--r--nixos/modules/module-list.nix2
-rw-r--r--nixos/modules/programs/dmrconfig.nix38
-rw-r--r--nixos/modules/services/cluster/kubernetes/dns.nix386
-rw-r--r--nixos/modules/services/development/jupyter/default.nix1
-rw-r--r--nixos/modules/services/mail/rspamd.nix107
-rw-r--r--nixos/modules/services/misc/gitea.nix12
-rw-r--r--nixos/modules/services/misc/packagekit.nix3
-rw-r--r--nixos/modules/services/monitoring/alerta.nix116
-rw-r--r--nixos/modules/services/monitoring/kapacitor.nix38
-rw-r--r--nixos/modules/system/boot/stage-1-init.sh5
-rw-r--r--nixos/modules/tasks/filesystems.nix8
-rw-r--r--nixos/modules/tasks/filesystems/bcachefs.nix61
-rw-r--r--nixos/release.nix14
-rw-r--r--nixos/tests/gitea.nix74
-rwxr-xr-xnixos/tests/hydra/create-trivial-project.sh3
-rw-r--r--nixos/tests/kubernetes/dns.nix4
-rw-r--r--nixos/tests/opensmtpd.nix2
-rw-r--r--nixos/tests/rspamd.nix100
21 files changed, 744 insertions, 269 deletions
diff --git a/nixos/doc/manual/release-notes/rl-1903.xml b/nixos/doc/manual/release-notes/rl-1903.xml
index 189961476b32..a0d0b40748c7 100644
--- a/nixos/doc/manual/release-notes/rl-1903.xml
+++ b/nixos/doc/manual/release-notes/rl-1903.xml
@@ -97,20 +97,20 @@
        start org.nixos.nix-daemon</command>.
       </para>
      </listitem>
-     <listitem>
-      <para>
-        The Syncthing state and configuration data has been moved from
-        <varname>services.syncthing.dataDir</varname> to the newly defined
-        <varname>services.syncthing.configDir</varname>, which default to
-        <literal>/var/lib/syncthing/.config/syncthing</literal>.
-        This change makes possible to share synced directories using ACLs
-        without Syncthing resetting the permission on every start.
-      </para>
-     </listitem>
     </itemizedlist>
    </listitem>
    <listitem>
     <para>
+      The Syncthing state and configuration data has been moved from
+      <varname>services.syncthing.dataDir</varname> to the newly defined
+      <varname>services.syncthing.configDir</varname>, which default to
+      <literal>/var/lib/syncthing/.config/syncthing</literal>.
+      This change makes possible to share synced directories using ACLs
+      without Syncthing resetting the permission on every start.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
      Package <varname>rabbitmq_server</varname> is renamed to
      <varname>rabbitmq-server</varname>.
     </para>
@@ -224,6 +224,19 @@
      supports loading TrueCrypt volumes.
     </para>
    </listitem>
+   <listitem>
+    <para>
+      The Kubernetes DNS addons, kube-dns, has been replaced with CoreDNS.
+      This change is made in accordance with Kubernetes making CoreDNS the official default
+      starting from
+      <link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#sig-cluster-lifecycle">Kubernetes v1.11</link>.
+      Please beware that upgrading DNS-addon on existing clusters might induce
+      minor downtime while the DNS-addon terminates and re-initializes.
+      Also note that the DNS-service now runs with 2 pod replicas by default.
+      The desired number of replicas can be configured using:
+      <option>services.kubernetes.addons.dns.replicas</option>.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 </section>
diff --git a/nixos/lib/testing.nix b/nixos/lib/testing.nix
index 42a0c60c7e19..8cdf4150057e 100644
--- a/nixos/lib/testing.nix
+++ b/nixos/lib/testing.nix
@@ -69,7 +69,9 @@ in rec {
             mkdir -p $out/coverage-data
             mv $i $out/coverage-data/$(dirname $(dirname $i))
           done
-        ''; # */
+        '';
+
+        meta.needsVMSupport = true;
     };
 
 
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index 6e7f0a007bc2..446a311807cc 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -334,6 +334,7 @@
       slurm = 307;
       kapacitor = 308;
       solr = 309;
+      alerta = 310;
 
       # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
 
@@ -628,6 +629,7 @@
       slurm = 307;
       kapacitor = 308;
       solr = 309;
+      alerta = 310;
 
       # When adding a gid, make sure it doesn't match an existing
       # uid. Users and groups with the same name should have equal
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 37e90232da2a..0bbf9be37ceb 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -90,6 +90,7 @@
   ./programs/criu.nix
   ./programs/dconf.nix
   ./programs/digitalbitbox/default.nix
+  ./programs/dmrconfig.nix
   ./programs/environment.nix
   ./programs/firejail.nix
   ./programs/fish.nix
@@ -419,6 +420,7 @@
   ./services/misc/weechat.nix
   ./services/misc/xmr-stak.nix
   ./services/misc/zookeeper.nix
+  ./services/monitoring/alerta.nix
   ./services/monitoring/apcupsd.nix
   ./services/monitoring/arbtt.nix
   ./services/monitoring/bosun.nix
diff --git a/nixos/modules/programs/dmrconfig.nix b/nixos/modules/programs/dmrconfig.nix
new file mode 100644
index 000000000000..e48a4f318370
--- /dev/null
+++ b/nixos/modules/programs/dmrconfig.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.dmrconfig;
+
+in {
+  meta.maintainers = [ maintainers.etu ];
+
+  ###### interface
+  options = {
+    programs.dmrconfig = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Whether to configure system to enable use of dmrconfig. This
+          enables the required udev rules and installs the program.
+        '';
+        relatedPackages = [ "dmrconfig" ];
+      };
+
+      package = mkOption {
+        default = pkgs.dmrconfig;
+        type = types.package;
+        defaultText = "pkgs.dmrconfig";
+        description = "dmrconfig derivation to use";
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    services.udev.packages = [ cfg.package ];
+  };
+}
diff --git a/nixos/modules/services/cluster/kubernetes/dns.nix b/nixos/modules/services/cluster/kubernetes/dns.nix
index 43bbb50a48d4..5a3e281ea694 100644
--- a/nixos/modules/services/cluster/kubernetes/dns.nix
+++ b/nixos/modules/services/cluster/kubernetes/dns.nix
@@ -3,8 +3,13 @@
 with lib;
 
 let
-  version = "1.14.10";
+  version = "1.2.5";
   cfg = config.services.kubernetes.addons.dns;
+  ports = {
+    dns = 10053;
+    health = 10054;
+    metrics = 10055;
+  };
 in {
   options.services.kubernetes.addons.dns = {
     enable = mkEnableOption "kubernetes dns addon";
@@ -27,49 +32,130 @@ in {
       type = types.str;
     };
 
-    kube-dns = mkOption {
-      description = "Docker image to seed for the kube-dns main container.";
-      type = types.attrs;
-      default = {
-        imageName = "k8s.gcr.io/k8s-dns-kube-dns-amd64";
-        imageDigest = "sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8";
-        finalImageTag = version;
-        sha256 = "0x583znk9smqn0fix7ld8sm5jgaxhqhx3fq97b1wkqm7iwhvl3pj";
-      };
-    };
-
-    dnsmasq-nanny = mkOption {
-      description = "Docker image to seed for the kube-dns dnsmasq container.";
-      type = types.attrs;
-      default = {
-        imageName = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64";
-        imageDigest = "sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8";
-        finalImageTag = version;
-        sha256 = "1fihml7s2mfwgac51cbqpylkwbivc8nyhgi4vb820s83zvl8a6y1";
-      };
+    replicas = mkOption {
+      description = "Number of DNS pod replicas to deploy in the cluster.";
+      default = 2;
+      type = types.int;
     };
 
-    sidecar = mkOption {
-      description = "Docker image to seed for the kube-dns sidecar container.";
+    coredns = mkOption {
+      description = "Docker image to seed for the CoreDNS container.";
       type = types.attrs;
       default = {
-        imageName = "k8s.gcr.io/k8s-dns-sidecar-amd64";
-        imageDigest = "sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4";
+        imageName = "coredns/coredns";
+        imageDigest = "sha256:33c8da20b887ae12433ec5c40bfddefbbfa233d5ce11fb067122e68af30291d6";
         finalImageTag = version;
-        sha256 = "08l1bv5jgrhvjzpqpbinrkgvv52snc4fzyd8ya9v18ns2klyz7m0";
+        sha256 = "13q19rgwapv27xcs664dw502254yw4zw63insf6g2danidv2mg6i";
       };
     };
   };
 
   config = mkIf cfg.enable {
-    services.kubernetes.kubelet.seedDockerImages = with pkgs.dockerTools; [
-      (pullImage cfg.kube-dns)
-      (pullImage cfg.dnsmasq-nanny)
-      (pullImage cfg.sidecar)
-    ];
+    services.kubernetes.kubelet.seedDockerImages =
+      singleton (pkgs.dockerTools.pullImage cfg.coredns);
 
     services.kubernetes.addonManager.addons = {
-      kubedns-deployment = {
+      coredns-sa = {
+        apiVersion = "v1";
+        kind = "ServiceAccount";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+      };
+
+      coredns-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRole";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/bootstrapping" = "rbac-defaults";
+          };
+          name = "system:coredns";
+        };
+        rules = [
+          {
+            apiGroups = [ "" ];
+            resources = [ "endpoints" "services" "pods" "namespaces" ];
+            verbs = [ "list" "watch" ];
+          }
+          {
+            apiGroups = [ "" ];
+            resources = [ "nodes" ];
+            verbs = [ "get" ];
+          }
+        ];
+      };
+
+      coredns-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRoleBinding";
+        metadata = {
+          annotations = {
+            "rbac.authorization.kubernetes.io/autoupdate" = "true";
+          };
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/bootstrapping" = "rbac-defaults";
+          };
+          name = "system:coredns";
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "system:coredns";
+        };
+        subjects = [
+          {
+            kind = "ServiceAccount";
+            name = "coredns";
+            namespace = "kube-system";
+          }
+        ];
+      };
+
+      coredns-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+        data = {
+          Corefile = ".:${toString ports.dns} {
+            errors
+            health :${toString ports.health}
+            kubernetes ${cfg.clusterDomain} in-addr.arpa ip6.arpa {
+              pods insecure
+              upstream
+              fallthrough in-addr.arpa ip6.arpa
+            }
+            prometheus :${toString ports.metrics}
+            proxy . /etc/resolv.conf
+            cache 30
+            loop
+            reload
+            loadbalance
+          }";
+        };
+      };
+
+      coredns-deploy = {
         apiVersion = "extensions/v1beta1";
         kind = "Deployment";
         metadata = {
@@ -77,183 +163,97 @@ in {
             "addonmanager.kubernetes.io/mode" = "Reconcile";
             "k8s-app" = "kube-dns";
             "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/name" = "CoreDNS";
           };
-          name = "kube-dns";
+          name = "coredns";
           namespace = "kube-system";
         };
         spec = {
-          selector.matchLabels."k8s-app" = "kube-dns";
+          replicas = cfg.replicas;
+          selector = {
+            matchLabels = { k8s-app = "kube-dns"; };
+          };
           strategy = {
-            rollingUpdate = {
-              maxSurge = "10%";
-              maxUnavailable = 0;
-            };
+            rollingUpdate = { maxUnavailable = 1; };
+            type = "RollingUpdate";
           };
           template = {
             metadata = {
-              annotations."scheduler.alpha.kubernetes.io/critical-pod" = "";
-              labels.k8s-app = "kube-dns";
+              labels = {
+                k8s-app = "kube-dns";
+              };
             };
             spec = {
-              priorityClassName = "system-cluster-critical";
               containers = [
                 {
-                  name = "kubedns";
-                  image = with cfg.kube-dns; "${imageName}:${finalImageTag}";
-                  resources = {
-                    limits.memory = "170Mi";
-                    requests = {
-                      cpu = "100m";
-                      memory = "70Mi";
-                    };
-                  };
+                  args = [ "-conf" "/etc/coredns/Corefile" ];
+                  image = with cfg.coredns; "${imageName}:${finalImageTag}";
+                  imagePullPolicy = "Never";
                   livenessProbe = {
                     failureThreshold = 5;
                     httpGet = {
-                      path = "/healthcheck/kubedns";
-                      port = 10054;
+                      path = "/health";
+                      port = ports.health;
                       scheme = "HTTP";
                     };
                     initialDelaySeconds = 60;
                     successThreshold = 1;
                     timeoutSeconds = 5;
                   };
-                  readinessProbe = {
-                    httpGet = {
-                      path = "/readiness";
-                      port = 8081;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 3;
-                    timeoutSeconds = 5;
-                  };
-                  args = [
-                    "--domain=${cfg.clusterDomain}"
-                    "--dns-port=10053"
-                    "--config-dir=/kube-dns-config"
-                    "--v=2"
-                  ];
-                  env = [
-                    {
-                      name = "PROMETHEUS_PORT";
-                      value = "10055";
-                    }
-                  ];
+                  name = "coredns";
                   ports = [
                     {
-                      containerPort = 10053;
-                      name = "dns-local";
+                      containerPort = ports.dns;
+                      name = "dns";
                       protocol = "UDP";
                     }
                     {
-                      containerPort = 10053;
-                      name = "dns-tcp-local";
+                      containerPort = ports.dns;
+                      name = "dns-tcp";
                       protocol = "TCP";
                     }
                     {
-                      containerPort = 10055;
+                      containerPort = ports.metrics;
                       name = "metrics";
                       protocol = "TCP";
                     }
                   ];
-                  volumeMounts = [
-                    {
-                      mountPath = "/kube-dns-config";
-                      name = "kube-dns-config";
-                    }
-                  ];
-                }
-                {
-                  name = "dnsmasq";
-                  image = with cfg.dnsmasq-nanny; "${imageName}:${finalImageTag}";
-                  livenessProbe = {
-                    httpGet = {
-                      path = "/healthcheck/dnsmasq";
-                      port = 10054;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 60;
-                    timeoutSeconds = 5;
-                    successThreshold = 1;
-                    failureThreshold = 5;
-                  };
-                  args = [
-                    "-v=2"
-                    "-logtostderr"
-                    "-configDir=/etc/k8s/dns/dnsmasq-nanny"
-                    "-restartDnsmasq=true"
-                    "--"
-                    "-k"
-                    "--cache-size=1000"
-                    "--log-facility=-"
-                    "--server=/${cfg.clusterDomain}/127.0.0.1#10053"
-                    "--server=/in-addr.arpa/127.0.0.1#10053"
-                    "--server=/ip6.arpa/127.0.0.1#10053"
-                  ];
-                  ports = [
-                    {
-                      containerPort = 53;
-                      name = "dns";
-                      protocol = "UDP";
-                    }
-                    {
-                      containerPort = 53;
-                      name = "dns-tcp";
-                      protocol = "TCP";
-                    }
-                  ];
                   resources = {
+                    limits = {
+                      memory = "170Mi";
+                    };
                     requests = {
-                      cpu = "150m";
-                      memory = "20Mi";
+                      cpu = "100m";
+                      memory = "70Mi";
                     };
                   };
-                  volumeMounts = [
-                    {
-                      mountPath = "/etc/k8s/dns/dnsmasq-nanny";
-                      name = "kube-dns-config";
-                    }
-                  ];
-                }
-                {
-                  name = "sidecar";
-                  image = with cfg.sidecar; "${imageName}:${finalImageTag}";
-                  livenessProbe = {
-                    httpGet = {
-                      path = "/metrics";
-                      port = 10054;
-                      scheme = "HTTP";
+                  securityContext = {
+                    allowPrivilegeEscalation = false;
+                    capabilities = {
+                      drop = [ "all" ];
                     };
-                    initialDelaySeconds = 60;
-                    timeoutSeconds = 5;
-                    successThreshold = 1;
-                    failureThreshold = 5;
+                    readOnlyRootFilesystem = true;
                   };
-                  args = [
-                    "--v=2"
-                    "--logtostderr"
-                    "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
-                    "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
-                  ];
-                  ports = [
+                  volumeMounts = [
                     {
-                      containerPort = 10054;
-                      name = "metrics";
-                      protocol = "TCP";
+                      mountPath = "/etc/coredns";
+                      name = "config-volume";
+                      readOnly = true;
                     }
                   ];
-                  resources = {
-                    requests = {
-                      cpu = "10m";
-                      memory = "20Mi";
-                    };
-                  };
                 }
               ];
               dnsPolicy = "Default";
-              serviceAccountName = "kube-dns";
+              nodeSelector = {
+                "beta.kubernetes.io/os" = "linux";
+              };
+              serviceAccountName = "coredns";
               tolerations = [
                 {
+                  effect = "NoSchedule";
+                  key = "node-role.kubernetes.io/master";
+                }
+                {
                   key = "CriticalAddonsOnly";
                   operator = "Exists";
                 }
@@ -261,10 +261,15 @@ in {
               volumes = [
                 {
                   configMap = {
-                    name = "kube-dns";
-                    optional = true;
+                    items = [
+                      {
+                        key = "Corefile";
+                        path = "Corefile";
+                      }
+                    ];
+                    name = "coredns";
                   };
-                  name = "kube-dns-config";
+                  name = "config-volume";
                 }
               ];
             };
@@ -272,51 +277,40 @@ in {
         };
       };
 
-      kubedns-svc = {
+      coredns-svc = {
         apiVersion = "v1";
         kind = "Service";
         metadata = {
+          annotations = {
+            "prometheus.io/port" = toString ports.metrics;
+            "prometheus.io/scrape" = "true";
+          };
           labels = {
             "addonmanager.kubernetes.io/mode" = "Reconcile";
             "k8s-app" = "kube-dns";
             "kubernetes.io/cluster-service" = "true";
-            "kubernetes.io/name" = "KubeDNS";
+            "kubernetes.io/name" = "CoreDNS";
           };
           name = "kube-dns";
-          namespace  = "kube-system";
+          namespace = "kube-system";
         };
         spec = {
           clusterIP = cfg.clusterIp;
           ports = [
-            {name = "dns"; port = 53; protocol = "UDP";}
-            {name = "dns-tcp"; port = 53; protocol = "TCP";}
+            {
+              name = "dns";
+              port = 53;
+              targetPort = ports.dns;
+              protocol = "UDP";
+            }
+            {
+              name = "dns-tcp";
+              port = 53;
+              targetPort = ports.dns;
+              protocol = "TCP";
+            }
           ];
-          selector.k8s-app = "kube-dns";
-        };
-      };
-
-      kubedns-sa = {
-        apiVersion = "v1";
-        kind = "ServiceAccount";
-        metadata = {
-          name = "kube-dns";
-          namespace = "kube-system";
-          labels = {
-            "kubernetes.io/cluster-service" = "true";
-            "addonmanager.kubernetes.io/mode" = "Reconcile";
-          };
-        };
-      };
-
-      kubedns-cm = {
-        apiVersion = "v1";
-        kind = "ConfigMap";
-        metadata = {
-          name = "kube-dns";
-          namespace = "kube-system";
-          labels = {
-            "addonmanager.kubernetes.io/mode" = "EnsureExists";
-          };
+          selector = { k8s-app = "kube-dns"; };
         };
       };
     };
diff --git a/nixos/modules/services/development/jupyter/default.nix b/nixos/modules/services/development/jupyter/default.nix
index 9fcc00431865..f20860af6e12 100644
--- a/nixos/modules/services/development/jupyter/default.nix
+++ b/nixos/modules/services/development/jupyter/default.nix
@@ -145,6 +145,7 @@ in {
       systemd.services.jupyter = {
         description = "Jupyter development server";
 
+        after = [ "network.target" ];
         wantedBy = [ "multi-user.target" ];
 
         # TODO: Patch notebook so we can explicitly pass in a shell
diff --git a/nixos/modules/services/mail/rspamd.nix b/nixos/modules/services/mail/rspamd.nix
index d83d6f1f750c..1c37ae41e07d 100644
--- a/nixos/modules/services/mail/rspamd.nix
+++ b/nixos/modules/services/mail/rspamd.nix
@@ -6,6 +6,7 @@ let
 
   cfg = config.services.rspamd;
   opts = options.services.rspamd;
+  postfixCfg = config.services.postfix;
 
   bindSocketOpts = {options, config, ... }: {
     options = {
@@ -58,7 +59,7 @@ let
       };
       type = mkOption {
         type = types.nullOr (types.enum [
-          "normal" "controller" "fuzzy_storage" "proxy" "lua"
+          "normal" "controller" "fuzzy_storage" "rspamd_proxy" "lua"
         ]);
         description = "The type of this worker";
       };
@@ -99,19 +100,21 @@ let
         description = "Additional entries to put verbatim into worker section of rspamd config file.";
       };
     };
-    config = mkIf (name == "normal" || name == "controller" || name == "fuzzy") {
+    config = mkIf (name == "normal" || name == "controller" || name == "fuzzy" || name == "rspamd_proxy") {
       type = mkDefault name;
-      includes = mkDefault [ "$CONFDIR/worker-${name}.inc" ];
-      bindSockets = mkDefault (if name == "normal"
-        then [{
-              socket = "/run/rspamd/rspamd.sock";
-              mode = "0660";
-              owner = cfg.user;
-              group = cfg.group;
-            }]
-        else if name == "controller"
-        then [ "localhost:11334" ]
-        else [] );
+      includes = mkDefault [ "$CONFDIR/worker-${if name == "rspamd_proxy" then "proxy" else name}.inc" ];
+      bindSockets =
+        let
+          unixSocket = name: {
+            mode = "0660";
+            socket = "/run/rspamd/${name}.sock";
+            owner = cfg.user;
+            group = cfg.group;
+          };
+        in mkDefault (if name == "normal" then [(unixSocket "rspamd")]
+          else if name == "controller" then [ "localhost:11334" ]
+          else if name == "rspamd_proxy" then [ (unixSocket "proxy") ]
+          else [] );
     };
   };
 
@@ -138,24 +141,31 @@ let
         .include(try=true; priority=10) "$LOCAL_CONFDIR/override.d/logging.inc"
       }
 
-      ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
-        worker ${optionalString (value.name != "normal" && value.name != "controller") "${value.name}"} {
+      ${concatStringsSep "\n" (mapAttrsToList (name: value: let
+          includeName = if name == "rspamd_proxy" then "proxy" else name;
+          tryOverride = if value.extraConfig == "" then "true" else "false";
+        in ''
+        worker "${value.type}" {
           type = "${value.type}";
           ${optionalString (value.enable != null)
             "enabled = ${if value.enable != false then "yes" else "no"};"}
           ${mkBindSockets value.enable value.bindSockets}
           ${optionalString (value.count != null) "count = ${toString value.count};"}
           ${concatStringsSep "\n  " (map (each: ".include \"${each}\"") value.includes)}
-          ${value.extraConfig}
+          .include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/worker-${includeName}.inc"
+          .include(try=${tryOverride}; priority=10) "$LOCAL_CONFDIR/override.d/worker-${includeName}.inc"
         }
       '') cfg.workers)}
 
-      ${cfg.extraConfig}
+      ${optionalString (cfg.extraConfig != "") ''
+        .include(priority=10) "$LOCAL_CONFDIR/override.d/extra-config.inc"
+      ''}
    '';
 
+  filterFiles = files: filterAttrs (n: v: v.enable) files;
   rspamdDir = pkgs.linkFarm "etc-rspamd-dir" (
-    (mapAttrsToList (name: file: { name = "local.d/${name}"; path = file.source; }) cfg.locals) ++
-    (mapAttrsToList (name: file: { name = "override.d/${name}"; path = file.source; }) cfg.overrides) ++
+    (mapAttrsToList (name: file: { name = "local.d/${name}"; path = file.source; }) (filterFiles cfg.locals)) ++
+    (mapAttrsToList (name: file: { name = "override.d/${name}"; path = file.source; }) (filterFiles cfg.overrides)) ++
     (optional (cfg.localLuaRules != null) { name = "rspamd.local.lua"; path = cfg.localLuaRules; }) ++
     [ { name = "rspamd.conf"; path = rspamdConfFile; } ]
   );
@@ -188,6 +198,15 @@ let
         in mkDefault (pkgs.writeText name' config.text));
     };
   };
+
+  configOverrides =
+    (mapAttrs' (n: v: nameValuePair "worker-${if n == "rspamd_proxy" then "proxy" else n}.inc" {
+      text = v.extraConfig;
+    })
+    (filterAttrs (n: v: v.extraConfig != "") cfg.workers))
+    // (if cfg.extraConfig == "" then {} else {
+      "extra-config.inc".text = cfg.extraConfig;
+    });
 in
 
 {
@@ -207,7 +226,7 @@ in
       };
 
       locals = mkOption {
-        type = with types; loaOf (submodule (configFileModule "locals"));
+        type = with types; attrsOf (submodule (configFileModule "locals"));
         default = {};
         description = ''
           Local configuration files, written into <filename>/etc/rspamd/local.d/{name}</filename>.
@@ -220,7 +239,7 @@ in
       };
 
       overrides = mkOption {
-        type = with types; loaOf (submodule (configFileModule "overrides"));
+        type = with types; attrsOf (submodule (configFileModule "overrides"));
         default = {};
         description = ''
           Overridden configuration files, written into <filename>/etc/rspamd/override.d/{name}</filename>.
@@ -284,7 +303,7 @@ in
         description = ''
           User to use when no root privileges are required.
         '';
-       };
+      };
 
       group = mkOption {
         type = types.string;
@@ -292,7 +311,30 @@ in
         description = ''
           Group to use when no root privileges are required.
         '';
-       };
+      };
+
+      postfix = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = "Add rspamd milter to postfix main.conf";
+        };
+
+        config = mkOption {
+          type = with types; attrsOf (either bool (either str (listOf str)));
+          description = ''
+            Addon to postfix configuration
+          '';
+          default = {
+            smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
+            non_smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
+          };
+          example = {
+            smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
+            non_smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
+          };
+        };
+      };
     };
   };
 
@@ -300,6 +342,25 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
+    services.rspamd.overrides = configOverrides;
+    services.rspamd.workers = mkIf cfg.postfix.enable {
+      controller = {};
+      rspamd_proxy = {
+        bindSockets = [ {
+          mode = "0660";
+          socket = "/run/rspamd/rspamd-milter.sock";
+          owner = cfg.user;
+          group = postfixCfg.group;
+        } ];
+        extraConfig = ''
+          upstream "local" {
+            default = yes; # Self-scan upstreams are always default
+            self_scan = yes; # Enable self-scan
+          }
+        '';
+      };
+    };
+    services.postfix.config = mkIf cfg.postfix.enable cfg.postfix.config;
 
     # Allow users to run 'rspamc' and 'rspamadm'.
     environment.systemPackages = [ pkgs.rspamd ];
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index a222325579fe..7a10bd872994 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -6,6 +6,7 @@ let
   cfg = config.services.gitea;
   gitea = cfg.package;
   pg = config.services.postgresql;
+  useMysql = cfg.database.type == "mysql";
   usePostgresql = cfg.database.type == "postgres";
   configFile = pkgs.writeText "app.ini" ''
     APP_NAME = ${cfg.appName}
@@ -14,7 +15,7 @@ let
 
     [database]
     DB_TYPE = ${cfg.database.type}
-    HOST = ${cfg.database.host}:${toString cfg.database.port}
+    HOST = ${if cfg.database.socket != null then cfg.database.socket else cfg.database.host + ":" + toString cfg.database.port}
     NAME = ${cfg.database.name}
     USER = ${cfg.database.user}
     PASSWD = #dbpass#
@@ -148,6 +149,13 @@ in
           '';
         };
 
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/mysqld/mysqld.sock";
+          description = "Path to the unix socket file to use for authentication.";
+        };
+
         path = mkOption {
           type = types.str;
           default = "${cfg.stateDir}/data/gitea.db";
@@ -253,7 +261,7 @@ in
 
     systemd.services.gitea = {
       description = "gitea";
-      after = [ "network.target" "postgresql.service" ];
+      after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
       wantedBy = [ "multi-user.target" ];
       path = [ gitea.bin ];
 
diff --git a/nixos/modules/services/misc/packagekit.nix b/nixos/modules/services/misc/packagekit.nix
index 2d1ff7bb4117..bce21e8acff3 100644
--- a/nixos/modules/services/misc/packagekit.nix
+++ b/nixos/modules/services/misc/packagekit.nix
@@ -6,11 +6,8 @@ let
 
   cfg = config.services.packagekit;
 
-  backend = "nix";
-
   packagekitConf = ''
 [Daemon]
-DefaultBackend=${backend}
 KeepCache=false
     '';
 
diff --git a/nixos/modules/services/monitoring/alerta.nix b/nixos/modules/services/monitoring/alerta.nix
new file mode 100644
index 000000000000..8f4258e26ded
--- /dev/null
+++ b/nixos/modules/services/monitoring/alerta.nix
@@ -0,0 +1,116 @@
+{ options, config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.alerta;
+
+  alertaConf = pkgs.writeTextFile {
+    name = "alertad.conf";
+    text = ''
+      DATABASE_URL = '${cfg.databaseUrl}'
+      DATABASE_NAME = '${cfg.databaseName}'
+      LOG_FILE = '${cfg.logDir}/alertad.log'
+      LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+      CORS_ORIGINS = [ ${concatMapStringsSep ", " (s: "\"" + s + "\"") cfg.corsOrigins} ];
+      AUTH_REQUIRED = ${if cfg.authenticationRequired then "True" else "False"}
+      SIGNUP_ENABLED = ${if cfg.signupEnabled then "True" else "False"}
+      ${cfg.extraConfig}
+    '';
+  };
+in
+{
+  options.services.alerta = {
+    enable = mkEnableOption "alerta";
+
+    port = mkOption {
+      type = types.int;
+      default = 5000;
+      description = "Port of Alerta";
+    };
+
+    bind = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      example = literalExample "0.0.0.0";
+      description = "Address to bind to. The default is to bind to all addresses";
+    };
+
+    logDir = mkOption {
+      type = types.path;
+      description = "Location where the logfiles are stored";
+      default = "/var/log/alerta";
+    };
+
+    databaseUrl = mkOption {
+      type = types.str;
+      description = "URL of the MongoDB or PostgreSQL database to connect to";
+      default = "mongodb://localhost";
+      example = "mongodb://localhost";
+    };
+
+    databaseName = mkOption {
+      type = types.str;
+      description = "Name of the database instance to connect to";
+      default = "monitoring";
+      example = "monitoring";
+    };
+
+    corsOrigins = mkOption {
+      type = types.listOf types.str;
+      description = "List of URLs that can access the API for Cross-Origin Resource Sharing (CORS)";
+      example = [ "http://localhost" "http://localhost:5000" ];
+      default = [ "http://localhost" "http://localhost:5000" ];
+    };
+
+    authenticationRequired = mkOption {
+      type = types.bool;
+      description = "Whether users must authenticate when using the web UI or command-line tool";
+      default = false;
+    };
+
+    signupEnabled = mkOption {
+      type = types.bool;
+      description = "Whether to prevent sign-up of new users via the web UI";
+      default = true;
+    };
+
+    extraConfig = mkOption {
+      description = "These lines go into alertad.conf verbatim.";
+      default = "";
+      type = types.lines;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.alerta = {
+      description = "Alerta Monitoring System";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      environment = {
+        ALERTA_SVR_CONF_FILE = alertaConf;
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.python36Packages.alerta-server}/bin/alertad run --port ${toString cfg.port} --host ${cfg.bind}";
+        User = "alerta";
+        Group = "alerta";
+        PermissionsStartOnly = true;
+      };
+      preStart = ''
+        mkdir -p ${cfg.logDir}
+        chown alerta:alerta ${cfg.logDir}
+      '';
+    };
+
+    environment.systemPackages = [ pkgs.python36Packages.alerta ];
+
+    users.users.alerta = {
+      uid = config.ids.uids.alerta;
+      description = "Alerta user";
+    };
+
+    users.groups.alerta = {
+      gid = config.ids.gids.alerta;
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/kapacitor.nix b/nixos/modules/services/monitoring/kapacitor.nix
index 1de0a8d5af2f..a4bdfa8f8053 100644
--- a/nixos/modules/services/monitoring/kapacitor.nix
+++ b/nixos/modules/services/monitoring/kapacitor.nix
@@ -42,6 +42,15 @@ let
           password = "${cfg.defaultDatabase.password}"
       ''}
 
+      ${optionalString (cfg.alerta.enable) ''
+        [alerta]
+          enabled = true
+          url = "${cfg.alerta.url}"
+          token = "${cfg.alerta.token}"
+          environment = "${cfg.alerta.environment}"
+          origin = "${cfg.alerta.origin}"
+      ''}
+
       ${cfg.extraConfig}
     '';
   };
@@ -120,6 +129,35 @@ in
         type = types.string;
       };
     };
+
+    alerta = {
+      enable = mkEnableOption "kapacitor alerta integration";
+
+      url = mkOption {
+        description = "The URL to the Alerta REST API";
+        default = "http://localhost:5000";
+        example = "http://localhost:5000";
+        type = types.string;
+      };
+
+      token = mkOption {
+        description = "Default Alerta authentication token";
+        type = types.str;
+        default = "";
+      };
+
+      environment = mkOption {
+        description = "Default Alerta environment";
+        type = types.str;
+        default = "Production";
+      };
+
+      origin = mkOption {
+        description = "Default origin of alert";
+        type = types.str;
+        default = "kapacitor";
+      };
+    };
   };
 
   config = mkIf cfg.enable {
diff --git a/nixos/modules/system/boot/stage-1-init.sh b/nixos/modules/system/boot/stage-1-init.sh
index 3bc33a20a09f..6a4ac8128ab3 100644
--- a/nixos/modules/system/boot/stage-1-init.sh
+++ b/nixos/modules/system/boot/stage-1-init.sh
@@ -246,10 +246,7 @@ checkFS() {
     if [ "$fsType" = iso9660 -o "$fsType" = udf ]; then return 0; fi
 
     # Don't check resilient COWs as they validate the fs structures at mount time
-    if [ "$fsType" = btrfs -o "$fsType" = zfs ]; then return 0; fi
-
-    # Skip fsck for bcachefs - not implemented yet.
-    if [ "$fsType" = bcachefs ]; then return 0; fi
+    if [ "$fsType" = btrfs -o "$fsType" = zfs -o "$fsType" = bcachefs ]; then return 0; fi
 
     # Skip fsck for nilfs2 - not needed by design and no fsck tool for this filesystem.
     if [ "$fsType" = nilfs2 ]; then return 0; fi
diff --git a/nixos/modules/tasks/filesystems.nix b/nixos/modules/tasks/filesystems.nix
index b3690fad1a6a..9e4057b50897 100644
--- a/nixos/modules/tasks/filesystems.nix
+++ b/nixos/modules/tasks/filesystems.nix
@@ -230,6 +230,8 @@ in
       let
         fsToSkipCheck = [ "none" "bindfs" "btrfs" "zfs" "tmpfs" "nfs" "vboxsf" "glusterfs" ];
         skipCheck = fs: fs.noCheck || fs.device == "none" || builtins.elem fs.fsType fsToSkipCheck;
+        # https://wiki.archlinux.org/index.php/fstab#Filepath_spaces
+        escape = string: builtins.replaceStrings [ " " ] [ "\\040" ] string;
       in ''
         # This is a generated file.  Do not edit!
         #
@@ -238,10 +240,10 @@ in
 
         # Filesystems.
         ${concatMapStrings (fs:
-            (if fs.device != null then fs.device
-             else if fs.label != null then "/dev/disk/by-label/${fs.label}"
+            (if fs.device != null then escape fs.device
+             else if fs.label != null then "/dev/disk/by-label/${escape fs.label}"
              else throw "No device specified for mount point ‘${fs.mountPoint}’.")
-            + " " + fs.mountPoint
+            + " " + escape fs.mountPoint
             + " " + fs.fsType
             + " " + builtins.concatStringsSep "," fs.options
             + " 0"
diff --git a/nixos/modules/tasks/filesystems/bcachefs.nix b/nixos/modules/tasks/filesystems/bcachefs.nix
index 227707173a3d..5fda24adb978 100644
--- a/nixos/modules/tasks/filesystems/bcachefs.nix
+++ b/nixos/modules/tasks/filesystems/bcachefs.nix
@@ -1,26 +1,65 @@
-{ config, lib, pkgs, ... }:
+{ config, lib, pkgs, utils, ... }:
 
 with lib;
 
 let
 
-  inInitrd = any (fs: fs == "bcachefs") config.boot.initrd.supportedFilesystems;
+  bootFs = filterAttrs (n: fs: (fs.fsType == "bcachefs") && (utils.fsNeededForBoot fs)) config.fileSystems;
+
+  commonFunctions = ''
+    prompt() {
+        local name="$1"
+        printf "enter passphrase for $name: "
+    }
+    tryUnlock() {
+        local name="$1"
+        local path="$2"
+        if bcachefs unlock -c $path > /dev/null 2> /dev/null; then    # test for encryption
+            prompt $name
+            until bcachefs unlock $path 2> /dev/null; do              # repeat until sucessfully unlocked
+                printf "unlocking failed!\n"
+                prompt $name
+            done
+            printf "unlocking successful.\n"
+        fi
+    }
+  '';
+
+  openCommand = name: fs:
+    let
+      # we need only unlock one device manually, and cannot pass multiple at once
+      # remove this adaptation when bcachefs implements mounting by filesystem uuid
+      # also, implement automatic waiting for the constituent devices when that happens
+      # bcachefs does not support mounting devices with colons in the path, ergo we don't (see #49671)
+      firstDevice = head (splitString ":" fs.device);
+    in
+      ''
+        tryUnlock ${name} ${firstDevice}
+      '';
 
 in
 
 {
-  config = mkIf (any (fs: fs == "bcachefs") config.boot.supportedFilesystems) {
+  config = mkIf (elem "bcachefs" config.boot.supportedFilesystems) (mkMerge [
+    {
+      system.fsPackages = [ pkgs.bcachefs-tools ];
 
-    system.fsPackages = [ pkgs.bcachefs-tools ];
+      # use kernel package with bcachefs support until it's in mainline
+      boot.kernelPackages = pkgs.linuxPackages_testing_bcachefs;
+    }
 
-    # use kernel package with bcachefs support until it's in mainline
-    boot.kernelPackages = pkgs.linuxPackages_testing_bcachefs;
-    boot.initrd.availableKernelModules = mkIf inInitrd [ "bcachefs" ];
+    (mkIf ((elem "bcachefs" config.boot.initrd.supportedFilesystems) || (bootFs != {})) {
+      # the cryptographic modules are required only for decryption attempts
+      boot.initrd.availableKernelModules = [ "bcachefs" "chacha20" "poly1305" ];
 
-    boot.initrd.extraUtilsCommands = mkIf inInitrd
-      ''
-        copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/fsck.bcachefs
+      boot.initrd.extraUtilsCommands = ''
+        copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/bcachefs
+      '';
+      boot.initrd.extraUtilsCommandsTest = ''
+        $out/bin/bcachefs version
       '';
 
-  };
+      boot.initrd.postDeviceCommands = commonFunctions + concatStrings (mapAttrsToList openCommand bootFs);
+    })
+  ]);
 }
diff --git a/nixos/release.nix b/nixos/release.nix
index 4647f28be186..b7f8c01bb000 100644
--- a/nixos/release.nix
+++ b/nixos/release.nix
@@ -45,6 +45,7 @@ let
       system.nixos.revision = nixpkgs.rev or nixpkgs.shortRev;
     };
 
+  makeModules = module: rest: [ configuration versionModule module rest ];
 
   makeIso =
     { module, type, system, ... }:
@@ -53,7 +54,9 @@ let
 
     hydraJob ((import lib/eval-config.nix {
       inherit system;
-      modules = [ configuration module versionModule { isoImage.isoBaseName = "nixos-${type}"; } ];
+      modules = makeModules module {
+        isoImage.isoBaseName = "nixos-${type}";
+      };
     }).config.system.build.isoImage);
 
 
@@ -64,7 +67,7 @@ let
 
     hydraJob ((import lib/eval-config.nix {
       inherit system;
-      modules = [ configuration module versionModule ];
+      modules = makeModules module {};
     }).config.system.build.sdImage);
 
 
@@ -77,7 +80,7 @@ let
 
       config = (import lib/eval-config.nix {
         inherit system;
-        modules = [ configuration module versionModule ];
+        modules = makeModules module {};
       }).config;
 
       tarball = config.system.build.tarball;
@@ -97,7 +100,7 @@ let
 
   buildFromConfig = module: sel: forAllSystems (system: hydraJob (sel (import ./lib/eval-config.nix {
     inherit system;
-    modules = [ configuration module versionModule ] ++ singleton
+    modules = makeModules module
       ({ ... }:
       { fileSystems."/".device  = mkDefault "/dev/sda1";
         boot.loader.grub.device = mkDefault "/dev/sda";
@@ -108,7 +111,7 @@ let
     let
       configEvaled = import lib/eval-config.nix {
         inherit system;
-        modules = [ module versionModule ];
+        modules = makeModules module {};
       };
       build = configEvaled.config.system.build;
       kernelTarget = configEvaled.pkgs.stdenv.hostPlatform.platform.kernelTarget;
@@ -301,6 +304,7 @@ in rec {
   tests.fsck = callTest tests/fsck.nix {};
   tests.fwupd = callTest tests/fwupd.nix {};
   tests.gdk-pixbuf = callTest tests/gdk-pixbuf.nix {};
+  tests.gitea = callSubTests tests/gitea.nix {};
   tests.gitlab = callTest tests/gitlab.nix {};
   tests.gitolite = callTest tests/gitolite.nix {};
   tests.gjs = callTest tests/gjs.nix {};
diff --git a/nixos/tests/gitea.nix b/nixos/tests/gitea.nix
new file mode 100644
index 000000000000..7ffe05ef3f1f
--- /dev/null
+++ b/nixos/tests/gitea.nix
@@ -0,0 +1,74 @@
+{ system ? builtins.currentSystem }:
+
+with import ../lib/testing.nix { inherit system; };
+with pkgs.lib;
+
+{
+  mysql = makeTest {
+    name = "gitea-mysql";
+    meta.maintainers = [ maintainers.aanderse ];
+
+    machine =
+      { config, pkgs, ... }:
+      { services.mysql.enable = true;
+        services.mysql.package = pkgs.mariadb;
+        services.mysql.ensureDatabases = [ "gitea" ];
+        services.mysql.ensureUsers = [
+          { name = "gitea";
+            ensurePermissions = { "gitea.*" = "ALL PRIVILEGES"; };
+          }
+        ];
+
+        services.gitea.enable = true;
+        services.gitea.database.type = "mysql";
+        services.gitea.database.socket = "/run/mysqld/mysqld.sock";
+      };
+
+    testScript = ''
+      startAll;
+
+      $machine->waitForUnit('gitea.service');
+      $machine->waitForOpenPort('3000');
+      $machine->succeed("curl --fail http://localhost:3000/");
+    '';
+  };
+
+  postgres = makeTest {
+    name = "gitea-postgres";
+    meta.maintainers = [ maintainers.aanderse ];
+
+    machine =
+      { config, pkgs, ... }:
+      {
+        services.gitea.enable = true;
+        services.gitea.database.type = "postgres";
+        services.gitea.database.password = "secret";
+      };
+
+    testScript = ''
+      startAll;
+
+      $machine->waitForUnit('gitea.service');
+      $machine->waitForOpenPort('3000');
+      $machine->succeed("curl --fail http://localhost:3000/");
+    '';
+  };
+
+  sqlite = makeTest {
+    name = "gitea-sqlite";
+    meta.maintainers = [ maintainers.aanderse ];
+
+    machine =
+      { config, pkgs, ... }:
+      { services.gitea.enable = true;
+      };
+
+    testScript = ''
+      startAll;
+
+      $machine->waitForUnit('gitea.service');
+      $machine->waitForOpenPort('3000');
+      $machine->succeed("curl --fail http://localhost:3000/");
+    '';
+  };
+}
diff --git a/nixos/tests/hydra/create-trivial-project.sh b/nixos/tests/hydra/create-trivial-project.sh
index 3cca5665acc5..39122c9b473a 100755
--- a/nixos/tests/hydra/create-trivial-project.sh
+++ b/nixos/tests/hydra/create-trivial-project.sh
@@ -31,7 +31,8 @@ mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
 cat >data.json <<EOF
 {
   "displayname":"Trivial",
-  "enabled":"1"
+  "enabled":"1",
+  "visible":"1"
 }
 EOF
 mycurl --silent -X PUT $URL/project/$PROJECT_NAME -d @data.json -b hydra-cookie.txt
diff --git a/nixos/tests/kubernetes/dns.nix b/nixos/tests/kubernetes/dns.nix
index 30e1acd255a0..f25ea5b9ed84 100644
--- a/nixos/tests/kubernetes/dns.nix
+++ b/nixos/tests/kubernetes/dns.nix
@@ -87,7 +87,7 @@ let
       # check if pods are running
       $machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
       $machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
-      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'kube-dns.*3/3'");
+      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
 
       # check dns on host (dnsmasq)
       $machine1->succeed("host redis.default.svc.cluster.local");
@@ -111,7 +111,7 @@ let
       # check if pods are running
       $machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
       $machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
-      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'kube-dns.*3/3'");
+      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
 
       # check dns on hosts (dnsmasq)
       $machine1->succeed("host redis.default.svc.cluster.local");
diff --git a/nixos/tests/opensmtpd.nix b/nixos/tests/opensmtpd.nix
index 4d3479168f70..883ad7604941 100644
--- a/nixos/tests/opensmtpd.nix
+++ b/nixos/tests/opensmtpd.nix
@@ -120,4 +120,6 @@ import ./make-test.nix {
     $smtp2->waitUntilFails('smtpctl show queue | egrep .');
     $client->succeed('check-mail-landed >&2');
   '';
+
+  meta.timeout = 30;
 }
diff --git a/nixos/tests/rspamd.nix b/nixos/tests/rspamd.nix
index af765f37b91b..c2175f1bc257 100644
--- a/nixos/tests/rspamd.nix
+++ b/nixos/tests/rspamd.nix
@@ -28,6 +28,8 @@ let
       ${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "660" }
       sleep 10;
       $machine->log($machine->succeed("cat /etc/rspamd/rspamd.conf"));
+      $machine->log($machine->succeed("grep 'CONFDIR/worker-controller.inc' /etc/rspamd/rspamd.conf"));
+      $machine->log($machine->succeed("grep 'CONFDIR/worker-normal.inc' /etc/rspamd/rspamd.conf"));
       $machine->log($machine->succeed("systemctl cat rspamd.service"));
       $machine->log($machine->succeed("curl http://localhost:11334/auth"));
       $machine->log($machine->succeed("curl http://127.0.0.1:11334/auth"));
@@ -56,6 +58,8 @@ in
       ${checkSocket "/run/rspamd.sock" "root" "root" "600" }
       ${checkSocket "/run/rspamd-worker.sock" "root" "root" "666" }
       $machine->log($machine->succeed("cat /etc/rspamd/rspamd.conf"));
+      $machine->log($machine->succeed("grep 'CONFDIR/worker-controller.inc' /etc/rspamd/rspamd.conf"));
+      $machine->log($machine->succeed("grep 'CONFDIR/worker-normal.inc' /etc/rspamd/rspamd.conf"));
       $machine->log($machine->succeed("rspamc -h /run/rspamd-worker.sock stat"));
       $machine->log($machine->succeed("curl --unix-socket /run/rspamd-worker.sock http://localhost/ping"));
     '';
@@ -78,6 +82,15 @@ in
           owner = "root";
           group = "root";
         }];
+        workers.controller2 = {
+          type = "controller";
+          bindSockets = [ "0.0.0.0:11335" ];
+          extraConfig = ''
+            static_dir = "''${WWWDIR}";
+            secure_ip = null;
+            password = "verysecretpassword";
+          '';
+        };
       };
     };
 
@@ -87,8 +100,14 @@ in
       ${checkSocket "/run/rspamd.sock" "root" "root" "600" }
       ${checkSocket "/run/rspamd-worker.sock" "root" "root" "666" }
       $machine->log($machine->succeed("cat /etc/rspamd/rspamd.conf"));
+      $machine->log($machine->succeed("grep 'CONFDIR/worker-controller.inc' /etc/rspamd/rspamd.conf"));
+      $machine->log($machine->succeed("grep 'CONFDIR/worker-normal.inc' /etc/rspamd/rspamd.conf"));
+      $machine->log($machine->succeed("grep 'LOCAL_CONFDIR/override.d/worker-controller2.inc' /etc/rspamd/rspamd.conf"));
+      $machine->log($machine->succeed("grep 'verysecretpassword' /etc/rspamd/override.d/worker-controller2.inc"));
+      $machine->waitUntilSucceeds("journalctl -u rspamd | grep -i 'starting controller process' >&2");
       $machine->log($machine->succeed("rspamc -h /run/rspamd-worker.sock stat"));
       $machine->log($machine->succeed("curl --unix-socket /run/rspamd-worker.sock http://localhost/ping"));
+      $machine->log($machine->succeed("curl http://localhost:11335/ping"));
     '';
   };
   customLuaRules = makeTest {
@@ -110,16 +129,33 @@ in
       '';
       services.rspamd = {
         enable = true;
-        locals."groups.conf".text = ''
-          group "cows" {
-            symbol {
-              NO_MUH = {
-                weight = 1.0;
-                description = "Mails should not muh";
+        locals = {
+          "antivirus.conf" = mkIf false { text = ''
+              clamav {
+                action = "reject";
+                symbol = "CLAM_VIRUS";
+                type = "clamav";
+                log_clean = true;
+                servers = "/run/clamav/clamd.ctl";
+              }
+            '';};
+          "redis.conf" = {
+            enable = false;
+            text = ''
+              servers = "127.0.0.1";
+            '';
+          };
+          "groups.conf".text = ''
+            group "cows" {
+              symbol {
+                NO_MUH = {
+                  weight = 1.0;
+                  description = "Mails should not muh";
+                }
               }
             }
-          }
-        '';
+          '';
+        };
         localLuaRules = pkgs.writeText "rspamd.local.lua" ''
           local rspamd_logger = require "rspamd_logger"
           rspamd_config.NO_MUH = {
@@ -152,6 +188,10 @@ in
       $machine->log($machine->succeed("cat /etc/rspamd/rspamd.conf"));
       $machine->log($machine->succeed("cat /etc/rspamd/rspamd.local.lua"));
       $machine->log($machine->succeed("cat /etc/rspamd/local.d/groups.conf"));
+      # Verify that redis.conf was not written
+      $machine->fail("cat /etc/rspamd/local.d/redis.conf >&2");
+      # Verify that antivirus.conf was not written
+      $machine->fail("cat /etc/rspamd/local.d/antivirus.conf >&2");
       ${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "660" }
       $machine->log($machine->succeed("curl --unix-socket /run/rspamd/rspamd.sock http://localhost/ping"));
       $machine->log($machine->succeed("rspamc -h 127.0.0.1:11334 stat"));
@@ -162,4 +202,48 @@ in
       $machine->log($machine->succeed("cat /etc/tests/muh.eml | rspamc -h 127.0.0.1:11334 symbols | grep NO_MUH"));
     '';
   };
+  postfixIntegration = makeTest {
+    name = "rspamd-postfix-integration";
+    machine = {
+      environment.systemPackages = with pkgs; [ msmtp ];
+      environment.etc."tests/gtube.eml".text = ''
+        From: Sheep1<bah@example.com>
+        To: Sheep2<tester@example.com>
+        Subject: Evil cows
+
+        I find cows to be evil don't you?
+
+        XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-EMAIL*C.34X
+      '';
+      environment.etc."tests/example.eml".text = ''
+        From: Sheep1<bah@example.com>
+        To: Sheep2<tester@example.com>
+        Subject: Evil cows
+
+        I find cows to be evil don't you?
+      '';
+      users.users.tester.password = "test";
+      services.postfix = {
+        enable = true;
+        destination = ["example.com"];
+      };
+      services.rspamd = {
+        enable = true;
+        postfix.enable = true;
+      };
+    };
+    testScript = ''
+      ${initMachine}
+      $machine->waitForOpenPort(11334);
+      $machine->waitForOpenPort(25);
+      ${checkSocket "/run/rspamd/rspamd-milter.sock" "rspamd" "postfix" "660" }
+      $machine->log($machine->succeed("rspamc -h 127.0.0.1:11334 stat"));
+      $machine->log($machine->succeed("msmtp --host=localhost -t --read-envelope-from < /etc/tests/example.eml"));
+      $machine->log($machine->fail("msmtp --host=localhost -t --read-envelope-from < /etc/tests/gtube.eml"));
+
+      $machine->waitUntilFails('[ "$(postqueue -p)" != "Mail queue is empty" ]');
+      $machine->fail("journalctl -u postfix | grep -i error >&2");
+      $machine->fail("journalctl -u postfix | grep -i warning >&2");
+    '';
+  };
 }