about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/administration/container-networking.xml3
-rw-r--r--nixos/doc/manual/administration/imperative-containers.xml18
-rw-r--r--nixos/doc/manual/installation/installing.xml54
-rw-r--r--nixos/modules/module-list.nix2
-rw-r--r--nixos/modules/services/hardware/triggerhappy.nix114
-rw-r--r--nixos/modules/services/logging/journaldriver.nix2
-rw-r--r--nixos/modules/services/misc/emby.nix2
-rw-r--r--nixos/modules/services/misc/nix-optimise.nix2
-rw-r--r--nixos/modules/services/misc/redmine.nix168
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix21
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/varnish.nix1
-rw-r--r--nixos/modules/services/networking/murmur.nix2
-rw-r--r--nixos/modules/services/printing/cupsd.nix2
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix21
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix1
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix159
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix1
-rw-r--r--nixos/modules/services/x11/display-managers/startx.nix44
-rw-r--r--nixos/modules/tasks/network-interfaces-scripted.nix3
-rw-r--r--nixos/modules/virtualisation/container-config.nix7
-rw-r--r--nixos/modules/virtualisation/virtualbox-image.nix2
-rw-r--r--nixos/release.nix2
-rw-r--r--nixos/tests/codimd.nix6
-rw-r--r--nixos/tests/containers-imperative.nix3
-rw-r--r--nixos/tests/prometheus-exporters.nix297
-rw-r--r--nixos/tests/redmine.nix40
26 files changed, 886 insertions, 91 deletions
diff --git a/nixos/doc/manual/administration/container-networking.xml b/nixos/doc/manual/administration/container-networking.xml
index 4b977d1d82eb..8aca329c8f1f 100644
--- a/nixos/doc/manual/administration/container-networking.xml
+++ b/nixos/doc/manual/administration/container-networking.xml
@@ -52,4 +52,7 @@ $ ping -c1 10.233.4.2
 networking.networkmanager.unmanaged = [ "interface-name:ve-*" ];
 </programlisting>
  </para>
+ <para>
+  You may need to restart your system for the changes to take effect.
+ </para>
 </section>
diff --git a/nixos/doc/manual/administration/imperative-containers.xml b/nixos/doc/manual/administration/imperative-containers.xml
index fa380477f6cb..9bb62bc2ece9 100644
--- a/nixos/doc/manual/administration/imperative-containers.xml
+++ b/nixos/doc/manual/administration/imperative-containers.xml
@@ -73,7 +73,8 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
  </para>
 
  <para>
-  To change the configuration of the container, you can edit
+  There are several ways to change the configuration of the container. First,
+  on the host, you can edit
   <literal>/var/lib/container/<replaceable>name</replaceable>/etc/nixos/configuration.nix</literal>,
   and run
 <screen>
@@ -86,7 +87,8 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
   <xref linkend="opt-services.httpd.enable"/> = true;
   <xref linkend="opt-services.httpd.adminAddr"/> = "foo@example.org";
   <xref linkend="opt-networking.firewall.allowedTCPPorts"/> = [ 80 ];
-  '
+'
+
 # curl http://$(nixos-container show-ip foo)/
 &lt;!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">…
 </screen>
@@ -95,13 +97,11 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
  </para>
 
  <para>
-  Note that in previous versions of NixOS (17.09 and earlier) one could also
-  use all nix-related commands (like <command>nixos-rebuild switch</command>)
-  from inside the container. However, since the release of Nix 2.0 this is not
-  supported anymore. Supporting Nix commands inside the container might be
-  possible again in future versions. See
-  <link xlink:href="https://github.com/NixOS/nixpkgs/issues/40355">the github
-  issue</link> for tracking progress on this issue.
+  Alternatively, you can change the configuration from within the container
+  itself by running <command>nixos-rebuild switch</command> inside the
+  container. Note that the container by default does not have a copy of the
+  NixOS channel, so you should run <command>nix-channel --update</command>
+  first.
  </para>
 
  <para>
diff --git a/nixos/doc/manual/installation/installing.xml b/nixos/doc/manual/installation/installing.xml
index 2b68def95b70..8e94f946c5ee 100644
--- a/nixos/doc/manual/installation/installing.xml
+++ b/nixos/doc/manual/installation/installing.xml
@@ -115,10 +115,17 @@
      </listitem>
      <listitem>
       <para>
-       Add a <emphasis>swap</emphasis> partition. The size required will vary
-       according to needs, here a 8GiB one is created. The space left in front
-       (512MiB) will be used by the boot partition.
-<screen language="commands"># parted /dev/sda -- mkpart primary linux-swap 512MiB 8.5GiB</screen>
+       Add the <emphasis>root</emphasis> partition. This will fill the disk
+       except for the end part, where the swap will live, and the space left in
+       front (512MiB) which will be used by the boot partition.
+<screen language="commands"># parted /dev/sda -- mkpart primary 512MiB -8GiB</screen>
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       Next, add a <emphasis>swap</emphasis> partition. The size required will
+       vary according to needs, here a 8GiB one is created.
+<screen language="commands"># parted /dev/sda -- mkpart primary linux-swap -8GiB 100%</screen>
        <note>
         <para>
          The swap partition size rules are no different than for other Linux
@@ -129,18 +136,11 @@
      </listitem>
      <listitem>
       <para>
-       Next, add the <emphasis>root</emphasis> partition. This will fill the
-       remainder ending part of the disk.
-<screen language="commands"># parted /dev/sda -- mkpart primary 8.5GiB -1MiB</screen>
-      </para>
-     </listitem>
-     <listitem>
-      <para>
        Finally, the <emphasis>boot</emphasis> partition. NixOS by default uses
        the ESP (EFI system partition) as its <emphasis>/boot</emphasis>
        partition. It uses the initially reserved 512MiB at the start of the
        disk.
-<screen language="commands"># parted /dev/sda -- mkpart ESP fat32 1M 512MiB
+<screen language="commands"># parted /dev/sda -- mkpart ESP fat32 1MiB 512MiB
 # parted /dev/sda -- set 3 boot on</screen>
       </para>
      </listitem>
@@ -177,9 +177,16 @@
      </listitem>
      <listitem>
       <para>
-       Add a <emphasis>swap</emphasis> partition. The size required will vary
-       according to needs, here a 8GiB one is created.
-<screen language="commands"># parted /dev/sda -- mkpart primary linux-swap 1M 8GiB</screen>
+       Add the <emphasis>root</emphasis> partition. This will fill the the disk
+       except for the end part, where the swap will live.
+<screen language="commands"># parted /dev/sda -- mkpart primary 1MiB -8GiB</screen>
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       Finally, add a <emphasis>swap</emphasis> partition. The size required
+       will vary according to needs, here a 8GiB one is created.
+<screen language="commands"># parted /dev/sda -- mkpart primary linux-swap -8GiB 100%</screen>
        <note>
         <para>
          The swap partition size rules are no different than for other Linux
@@ -188,13 +195,6 @@
        </note>
       </para>
      </listitem>
-     <listitem>
-      <para>
-       Finally, add the <emphasis>root</emphasis> partition. This will fill the
-       remainder of the disk.
-<screen language="commands"># parted /dev/sda -- mkpart primary 8GiB -1s</screen>
-      </para>
-     </listitem>
     </orderedlist>
    </para>
 
@@ -486,17 +486,17 @@ $ nix-env -i w3m</screen>
    <title>Example partition schemes for NixOS on <filename>/dev/sda</filename> (MBR)</title>
 <screen language="commands">
 # parted /dev/sda -- mklabel msdos
-# parted /dev/sda -- mkpart primary linux-swap 1M 8GiB
-# parted /dev/sda -- mkpart primary 8GiB -1s</screen>
+# parted /dev/sda -- mkpart primary 1MiB -8GiB
+# parted /dev/sda -- mkpart primary linux-swap -8GiB 100%</screen>
   </example>
 
   <example xml:id="ex-partition-scheme-UEFI">
    <title>Example partition schemes for NixOS on <filename>/dev/sda</filename> (UEFI)</title>
 <screen language="commands">
 # parted /dev/sda -- mklabel gpt
-# parted /dev/sda -- mkpart primary linux-swap 512MiB 8.5GiB
-# parted /dev/sda -- mkpart primary 8.5GiB -1MiB
-# parted /dev/sda -- mkpart ESP fat32 1M 512MiB
+# parted /dev/sda -- mkpart primary 512MiB -8GiB
+# parted /dev/sda -- mkpart primary linux-swap -8GiB 100%
+# parted /dev/sda -- mkpart ESP fat32 1MiB 512MiB
 # parted /dev/sda -- set 3 boot on</screen>
   </example>
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index af630178a919..261de6955ec2 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -284,6 +284,7 @@
   ./services/hardware/tlp.nix
   ./services/hardware/thinkfan.nix
   ./services/hardware/trezord.nix
+  ./services/hardware/triggerhappy.nix
   ./services/hardware/u2f.nix
   ./services/hardware/udev.nix
   ./services/hardware/udisks2.nix
@@ -734,6 +735,7 @@
   ./services/x11/display-managers/lightdm.nix
   ./services/x11/display-managers/sddm.nix
   ./services/x11/display-managers/slim.nix
+  ./services/x11/display-managers/startx.nix
   ./services/x11/display-managers/xpra.nix
   ./services/x11/fractalart.nix
   ./services/x11/hardware/libinput.nix
diff --git a/nixos/modules/services/hardware/triggerhappy.nix b/nixos/modules/services/hardware/triggerhappy.nix
new file mode 100644
index 000000000000..81d4a1ae65bf
--- /dev/null
+++ b/nixos/modules/services/hardware/triggerhappy.nix
@@ -0,0 +1,114 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.triggerhappy;
+
+  socket = "/run/thd.socket";
+
+  configFile = pkgs.writeText "triggerhappy.conf" ''
+    ${concatMapStringsSep "\n"
+      ({ keys, event, cmd, ... }:
+        ''${concatMapStringsSep "+" (x: "KEY_" + x) keys} ${toString { press = 1; hold = 2; release = 0; }.${event}} ${cmd}''
+      )
+      cfg.bindings}
+    ${cfg.extraConfig}
+  '';
+
+  bindingCfg = { config, ... }: {
+    options = {
+
+      keys = mkOption {
+        type = types.listOf types.str;
+        description = "List of keys to match.  Key names as defined in linux/input-event-codes.h";
+      };
+
+      event = mkOption {
+        type = types.enum ["press" "hold" "release"];
+        default = "press";
+        description = "Event to match.";
+      };
+
+      cmd = mkOption {
+        type = types.str;
+        description = "What to run.";
+      };
+
+    };
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.triggerhappy = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable the <command>triggerhappy</command> hotkey daemon.
+        '';
+      };
+
+      bindings = mkOption {
+        type = types.listOf (types.submodule bindingCfg);
+        default = [];
+        example = lib.literalExample ''
+          [ { keys = ["PLAYPAUSE"];  cmd = "''${pkgs.mpc_cli}/bin/mpc -q toggle"; } ]
+        '';
+        description = ''
+          Key bindings for <command>triggerhappy</command>.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Literal contents to append to the end of <command>triggerhappy</command> configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.sockets.triggerhappy = {
+      description = "Triggerhappy Socket";
+      wantedBy = [ "sockets.target" ];
+      socketConfig.ListenDatagram = socket;
+    };
+
+    systemd.services.triggerhappy = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "local-fs.target" ];
+      description = "Global hotkey daemon";
+      serviceConfig = {
+        ExecStart = "${pkgs.triggerhappy}/bin/thd --user nobody --socket ${socket} --triggers ${configFile} --deviceglob /dev/input/event*";
+      };
+    };
+
+    services.udev.packages = lib.singleton (pkgs.writeTextFile {
+      name = "triggerhappy-udev-rules";
+      destination = "/etc/udev/rules.d/61-triggerhappy.rules";
+      text = ''
+        ACTION=="add", SUBSYSTEM=="input", KERNEL=="event[0-9]*", ATTRS{name}!="triggerhappy", \
+          RUN+="${pkgs.triggerhappy}/bin/th-cmd --socket ${socket} --passfd --udev"
+      '';
+    });
+
+  };
+
+}
diff --git a/nixos/modules/services/logging/journaldriver.nix b/nixos/modules/services/logging/journaldriver.nix
index 74ac3d4c2365..9bd581e9ec0e 100644
--- a/nixos/modules/services/logging/journaldriver.nix
+++ b/nixos/modules/services/logging/journaldriver.nix
@@ -7,7 +7,7 @@
 # to be set.
 #
 # For further information please consult the documentation in the
-# upstream repository at: https://github.com/aprilabank/journaldriver/
+# upstream repository at: https://github.com/tazjin/journaldriver/
 
 { config, lib, pkgs, ...}:
 
diff --git a/nixos/modules/services/misc/emby.nix b/nixos/modules/services/misc/emby.nix
index 151edd0e761a..0ad4a3f7376f 100644
--- a/nixos/modules/services/misc/emby.nix
+++ b/nixos/modules/services/misc/emby.nix
@@ -55,7 +55,7 @@ in
         User = cfg.user;
         Group = cfg.group;
         PermissionsStartOnly = "true";
-        ExecStart = "${pkgs.emby}/bin/emby";
+        ExecStart = "${pkgs.emby}/bin/emby -programdata ${cfg.dataDir}";
         Restart = "on-failure";
       };
     };
diff --git a/nixos/modules/services/misc/nix-optimise.nix b/nixos/modules/services/misc/nix-optimise.nix
index 6f75e4dd03ea..416529f690e0 100644
--- a/nixos/modules/services/misc/nix-optimise.nix
+++ b/nixos/modules/services/misc/nix-optimise.nix
@@ -40,6 +40,8 @@ in
 
     systemd.services.nix-optimise =
       { description = "Nix Store Optimiser";
+        # No point running it inside a nixos-container. It should be on the host instead.
+        unitConfig.ConditionVirtualization = "!container";
         serviceConfig.ExecStart = "${config.nix.package}/bin/nix-store --optimise";
         startAt = optionals cfg.automatic cfg.dates;
       };
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
index f763ba21d0b2..8d25ac5cb76f 100644
--- a/nixos/modules/services/misc/redmine.nix
+++ b/nixos/modules/services/misc/redmine.nix
@@ -5,7 +5,7 @@ with lib;
 let
   cfg = config.services.redmine;
 
-  bundle = "${pkgs.redmine}/share/redmine/bin/bundle";
+  bundle = "${cfg.package}/share/redmine/bin/bundle";
 
   databaseYml = pkgs.writeText "database.yml" ''
     production:
@@ -15,6 +15,7 @@ let
       port: ${toString cfg.database.port}
       username: ${cfg.database.user}
       password: #dbpass#
+      ${optionalString (cfg.database.socket != null) "socket: ${cfg.database.socket}"}
   '';
 
   configurationYml = pkgs.writeText "configuration.yml" ''
@@ -29,6 +30,19 @@ let
     ${cfg.extraConfig}
   '';
 
+  unpackTheme = unpack "theme";
+  unpackPlugin = unpack "plugin";
+  unpack = id: (name: source:
+    pkgs.stdenv.mkDerivation {
+      name = "redmine-${id}-${name}";
+      buildInputs = [ pkgs.unzip ];
+      buildCommand = ''
+        mkdir -p $out
+        cd $out
+        unpackFile ${source}
+      '';
+  });
+
 in
 
 {
@@ -40,6 +54,14 @@ in
         description = "Enable the Redmine service.";
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.redmine;
+        defaultText = "pkgs.redmine";
+        description = "Which Redmine package to use.";
+        example = "pkgs.redmine.override { ruby = pkgs.ruby_2_3; }";
+      };
+
       user = mkOption {
         type = types.str;
         default = "redmine";
@@ -52,6 +74,12 @@ in
         description = "Group under which Redmine is ran.";
       };
 
+      port = mkOption {
+        type = types.int;
+        default = 3000;
+        description = "Port on which Redmine is ran.";
+      };
+
       stateDir = mkOption {
         type = types.str;
         default = "/var/lib/redmine";
@@ -66,6 +94,41 @@ in
 
           See https://guides.rubyonrails.org/action_mailer_basics.html#action-mailer-configuration
         '';
+        example = literalExample ''
+          email_delivery:
+            delivery_method: smtp
+            smtp_settings:
+              address: mail.example.com
+              port: 25
+        '';
+      };
+
+      themes = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = "Set of themes.";
+        example = literalExample ''
+          {
+            dkuk-redmine_alex_skin = builtins.fetchurl {
+              url = https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip;
+              sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
+            };
+          }
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = "Set of plugins.";
+        example = literalExample ''
+          {
+            redmine_env_auth = builtins.fetchurl {
+              url = https://github.com/Intera/redmine_env_auth/archive/0.6.zip;
+              sha256 = "0yyr1yjd8gvvh832wdc8m3xfnhhxzk2pk3gm2psg5w9jdvd6skak";
+            };
+          }
+        '';
       };
 
       database = {
@@ -78,7 +141,7 @@ in
 
         host = mkOption {
           type = types.str;
-          default = "127.0.0.1";
+          default = (if cfg.database.socket != null then "localhost" else "127.0.0.1");
           description = "Database host address.";
         };
 
@@ -119,6 +182,13 @@ in
             <option>database.user</option>.
           '';
         };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/mysqld/mysqld.sock";
+          description = "Path to the unix socket file to use for authentication.";
+        };
       };
     };
   };
@@ -126,17 +196,20 @@ in
   config = mkIf cfg.enable {
 
     assertions = [
-      { assertion = cfg.database.passwordFile != null || cfg.database.password != "";
-        message = "either services.redmine.database.passwordFile or services.redmine.database.password must be set";
+      { assertion = cfg.database.passwordFile != null || cfg.database.password != "" || cfg.database.socket != null;
+        message = "one of services.redmine.database.socket, services.redmine.database.passwordFile, or services.redmine.database.password must be set";
+      }
+      { assertion = cfg.database.socket != null -> (cfg.database.type == "mysql2");
+        message = "Socket authentication is only available for the mysql2 database type";
       }
     ];
 
-    environment.systemPackages = [ pkgs.redmine ];
+    environment.systemPackages = [ cfg.package ];
 
     systemd.services.redmine = {
       after = [ "network.target" (if cfg.database.type == "mysql2" then "mysql.service" else "postgresql.service") ];
       wantedBy = [ "multi-user.target" ];
-      environment.HOME = "${pkgs.redmine}/share/redmine";
+      environment.HOME = "${cfg.package}/share/redmine";
       environment.RAILS_ENV = "production";
       environment.RAILS_CACHE = "${cfg.stateDir}/cache";
       environment.REDMINE_LANG = "en";
@@ -151,43 +224,80 @@ in
         subversion
       ];
       preStart = ''
-        # start with a fresh config directory every time
-        rm -rf ${cfg.stateDir}/config
-        cp -r ${pkgs.redmine}/share/redmine/config.dist ${cfg.stateDir}/config
+        # ensure cache directory exists for db:migrate command
+        mkdir -p "${cfg.stateDir}/cache"
 
-        # create the basic state directory layout pkgs.redmine expects
-        mkdir -p /run/redmine
+        # create the basic directory layout the redmine package expects
+        mkdir -p /run/redmine/public
 
         for i in config files log plugins tmp; do
-          mkdir -p ${cfg.stateDir}/$i
-          ln -fs ${cfg.stateDir}/$i /run/redmine/$i
+          mkdir -p "${cfg.stateDir}/$i"
+          ln -fs "${cfg.stateDir}/$i" /run/redmine/
+        done
+
+        for i in plugin_assets themes; do
+          mkdir -p "${cfg.stateDir}/public/$i"
+          ln -fs "${cfg.stateDir}/public/$i" /run/redmine/public/
         done
 
-        # ensure cache directory exists for db:migrate command
-        mkdir -p ${cfg.stateDir}/cache
+
+        # start with a fresh config directory
+        # the config directory is copied instead of linked as some mutable data is stored in there
+        rm -rf "${cfg.stateDir}/config/"*
+        cp -r ${cfg.package}/share/redmine/config.dist/* "${cfg.stateDir}/config/"
 
         # link in the application configuration
-        ln -fs ${configurationYml} ${cfg.stateDir}/config/configuration.yml
+        ln -fs ${configurationYml} "${cfg.stateDir}/config/configuration.yml"
+
+
+        # link in all user specified themes
+        rm -rf "${cfg.stateDir}/public/themes/"*
+        for theme in ${concatStringsSep " " (mapAttrsToList unpackTheme cfg.themes)}; do
+          ln -fs $theme/* "${cfg.stateDir}/public/themes"
+        done
+
+        # link in redmine provided themes
+        ln -sf ${cfg.package}/share/redmine/public/themes.dist/* "${cfg.stateDir}/public/themes/"
+
 
-        chmod -R ug+rwX,o-rwx+x ${cfg.stateDir}/
+        # link in all user specified plugins
+        rm -rf "${cfg.stateDir}/plugins/"*
+        for plugin in ${concatStringsSep " " (mapAttrsToList unpackPlugin cfg.plugins)}; do
+          ln -fs $plugin/* "${cfg.stateDir}/plugins/''${plugin##*-redmine-plugin-}"
+        done
+
+
+        # ensure correct permissions for most files
+        chmod -R ug+rwX,o-rwx+x "${cfg.stateDir}/"
 
-        # handle database.passwordFile
+
+        # handle database.passwordFile & permissions
         DBPASS=$(head -n1 ${cfg.database.passwordFile})
-        cp -f ${databaseYml} ${cfg.stateDir}/config/database.yml
-        sed -e "s,#dbpass#,$DBPASS,g" -i ${cfg.stateDir}/config/database.yml
-        chmod 440 ${cfg.stateDir}/config/database.yml
+        cp -f ${databaseYml} "${cfg.stateDir}/config/database.yml"
+        sed -e "s,#dbpass#,$DBPASS,g" -i "${cfg.stateDir}/config/database.yml"
+        chmod 440 "${cfg.stateDir}/config/database.yml"
+
 
         # generate a secret token if required
         if ! test -e "${cfg.stateDir}/config/initializers/secret_token.rb"; then
           ${bundle} exec rake generate_secret_token
-          chmod 440 ${cfg.stateDir}/config/initializers/secret_token.rb
+          chmod 440 "${cfg.stateDir}/config/initializers/secret_token.rb"
         fi
 
+
         # ensure everything is owned by ${cfg.user}
-        chown -R ${cfg.user}:${cfg.group} ${cfg.stateDir}
+        chown -R ${cfg.user}:${cfg.group} "${cfg.stateDir}"
+
+
+        # execute redmine required commands prior to starting the application
+        # NOTE: su required in case using mysql socket authentication
+        /run/wrappers/bin/su -s ${pkgs.bash}/bin/bash -m -l redmine -c '${bundle} exec rake db:migrate'
+        /run/wrappers/bin/su -s ${pkgs.bash}/bin/bash -m -l redmine -c '${bundle} exec rake redmine:load_default_data'
+
 
-        ${bundle} exec rake db:migrate
-        ${bundle} exec rake redmine:load_default_data
+        # log files don't exist until after first command has been executed
+        # correct ownership of files generated by calling exec rake ...
+        chown -R ${cfg.user}:${cfg.group} "${cfg.stateDir}/log"
       '';
 
       serviceConfig = {
@@ -196,13 +306,13 @@ in
         User = cfg.user;
         Group = cfg.group;
         TimeoutSec = "300";
-        WorkingDirectory = "${pkgs.redmine}/share/redmine";
-        ExecStart="${bundle} exec rails server webrick -e production -P ${cfg.stateDir}/redmine.pid";
+        WorkingDirectory = "${cfg.package}/share/redmine";
+        ExecStart="${bundle} exec rails server webrick -e production -p ${toString cfg.port} -P '${cfg.stateDir}/redmine.pid'";
       };
 
     };
 
-    users.extraUsers = optionalAttrs (cfg.user == "redmine") (singleton
+    users.users = optionalAttrs (cfg.user == "redmine") (singleton
       { name = "redmine";
         group = cfg.group;
         home = cfg.stateDir;
@@ -210,7 +320,7 @@ in
         uid = config.ids.uids.redmine;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "redmine") (singleton
+    users.groups = optionalAttrs (cfg.group == "redmine") (singleton
       { name = "redmine";
         gid = config.ids.gids.redmine;
       });
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 1d5f400250fd..ae8caac436da 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -123,15 +123,13 @@ let
       systemd.services."prometheus-${name}-exporter" = mkMerge ([{
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" ];
-        serviceConfig = {
-          Restart = mkDefault "always";
-          PrivateTmp = mkDefault true;
-          WorkingDirectory = mkDefault /tmp;
-        } // mkIf (!(serviceOpts.serviceConfig.DynamicUser or false)) {
-          User = conf.user;
-          Group = conf.group;
-        };
-      } serviceOpts ]);
+        serviceConfig.Restart = mkDefault "always";
+        serviceConfig.PrivateTmp = mkDefault true;
+        serviceConfig.WorkingDirectory = mkDefault /tmp;
+      } serviceOpts ] ++ optional (serviceOpts.serviceConfig.DynamicUser or false) {
+        serviceConfig.User = conf.user;
+        serviceConfig.Group = conf.group;
+      });
   };
 in
 {
@@ -172,5 +170,8 @@ in
     }) exporterOpts)
   );
 
-  meta.doc = ./exporters.xml;
+  meta = {
+    doc = ./exporters.xml;
+    maintainers = [ maintainers.willibutz ];
+  };
 }
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix b/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
index 8dbf2d735ab9..aaed76175b84 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
@@ -69,6 +69,7 @@ in
     path = [ pkgs.varnish ];
     serviceConfig = {
       DynamicUser = true;
+      RestartSec = mkDefault 1;
       ExecStart = ''
         ${pkgs.prometheus-varnish-exporter}/bin/prometheus_varnish_exporter \
           --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
diff --git a/nixos/modules/services/networking/murmur.nix b/nixos/modules/services/networking/murmur.nix
index fcc813e6898f..a6e90feff7ea 100644
--- a/nixos/modules/services/networking/murmur.nix
+++ b/nixos/modules/services/networking/murmur.nix
@@ -50,7 +50,7 @@ in
       enable = mkOption {
         type = types.bool;
         default = false;
-        description = "If enabled, start the Murmur Service.";
+        description = "If enabled, start the Murmur Mumble server.";
       };
 
       autobanAttempts = mkOption {
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index dbf18ec1d114..1031d6f3d7e2 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -250,7 +250,7 @@ in
       drivers = mkOption {
         type = types.listOf types.path;
         default = [];
-        example = literalExample "[ pkgs.gutenprint pkgs.hplip pkgs.splix ]";
+        example = literalExample "with pkgs; [ gutenprint hplip splix cups-googlecloudprint ]";
         description = ''
           CUPS drivers to use. Drivers provided by CUPS, cups-filters,
           Ghostscript and Samba are added unconditionally. If this list contains
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 44c3df1d057b..db4c8e1a3d85 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -114,6 +114,21 @@ in {
       '';
     };
 
+    poolConfig = mkOption {
+      type = types.lines;
+      default = ''
+        pm = dynamic
+        pm.max_children = 32
+        pm.start_servers = 2
+        pm.min_spare_servers = 2
+        pm.max_spare_servers = 4
+        pm.max_requests = 500
+      '';
+      description = ''
+        Options for nextcloud's PHP pool. See the documentation on <literal>php-fpm.conf</literal> for details on configuration directives.
+      '';
+    };
+
     config = {
       dbtype = mkOption {
         type = types.enum [ "sqlite" "pgsql" "mysql" ];
@@ -339,11 +354,7 @@ in {
             listen.group = nginx
             user = nextcloud
             group = nginx
-            pm = dynamic
-            pm.max_children = 32
-            pm.start_servers = 2
-            pm.min_spare_servers = 2
-            pm.max_spare_servers = 4
+            ${cfg.poolConfig}
             env[NEXTCLOUD_CONFIG_DIR] = ${cfg.home}/config
             env[PATH] = /run/wrappers/bin:/nix/var/nix/profiles/default/bin:/run/current-system/sw/bin:/usr/bin:/bin
             ${phpAdminValues}
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index eb86f7b53bb6..0d5b860d4617 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -133,7 +133,6 @@ in {
 
     fonts.fonts = [ pkgs.dejavu_fonts pkgs.cantarell-fonts ];
 
-    services.xserver.displayManager.gdm.enable = mkDefault true;
     services.xserver.displayManager.extraSessionFilePackages = [ pkgs.gnome3.gnome-session ];
 
     services.xserver.displayManager.sessionCommands = ''
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix
new file mode 100644
index 000000000000..7c794b1ba175
--- /dev/null
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix
@@ -0,0 +1,159 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.enso;
+
+  theme = cfg.theme.package;
+  icons = cfg.iconTheme.package;
+  cursors = cfg.cursorTheme.package;
+
+  # We need a few things in the environment for the greeter to run with
+  # fonts/icons.
+  wrappedEnsoGreeter = pkgs.runCommand "lightdm-enso-os-greeter"
+    { buildInputs = [ pkgs.makeWrapper ]; }
+    ''
+      # This wrapper ensures that we actually get themes
+      makeWrapper ${pkgs.lightdm-enso-os-greeter}/bin/pantheon-greeter \
+        $out/greeter \
+        --prefix PATH : "${pkgs.glibc.bin}/bin" \
+        --set GDK_PIXBUF_MODULE_FILE "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache" \
+        --set GTK_PATH "${theme}:${pkgs.gtk3.out}" \
+        --set GTK_EXE_PREFIX "${theme}" \
+        --set GTK_DATA_PREFIX "${theme}" \
+        --set XDG_DATA_DIRS "${theme}/share:${icons}/share:${cursors}/share" \
+        --set XDG_CONFIG_HOME "${theme}/share"
+
+      cat - > $out/lightdm-enso-os-greeter.desktop << EOF
+      [Desktop Entry]
+      Name=LightDM Greeter
+      Comment=This runs the LightDM Greeter
+      Exec=$out/greeter
+      Type=Application
+      EOF
+    '';
+
+  ensoGreeterConf = pkgs.writeText "lightdm-enso-os-greeter.conf" ''
+    [greeter]
+    default-wallpaper=${ldmcfg.background}
+    gtk-theme=${cfg.theme.name}
+    icon-theme=${cfg.iconTheme.name}
+    cursor-theme=${cfg.cursorTheme.name}
+    blur=${toString cfg.blur}
+    brightness=${toString cfg.brightness}
+    ${cfg.extraConfig}
+  '';
+in {
+  options = {
+    services.xserver.displayManager.lightdm.greeters.enso = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable enso-os-greeter as the lightdm greeter
+        '';
+      };
+
+      theme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.gnome3.gnome-themes-extra;
+          defaultText = "pkgs.gnome3.gnome-themes-extra";
+          description = ''
+            The package path that contains the theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Adwaita";
+          description = ''
+            Name of the theme to use for the lightdm-enso-os-greeter
+          '';
+        };
+      };
+
+      iconTheme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.papirus-icon-theme;
+          defaultText = "pkgs.papirus-icon-theme";
+          description = ''
+            The package path that contains the icon theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "ePapirus";
+          description = ''
+            Name of the icon theme to use for the lightdm-enso-os-greeter
+          '';
+        };
+      };
+
+      cursorTheme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.capitaine-cursors;
+          defaultText = "pkgs.capitaine-cursors";
+          description = ''
+            The package path that contains the cursor theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "capitane-cursors";
+          description = ''
+            Name of the cursor theme to use for the lightdm-enso-os-greeter
+          '';
+        };
+      };
+
+      blur = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether or not to enable blur
+        '';
+      };
+
+      brightness = mkOption {
+        type = types.int;
+        default = 7;
+        description = ''
+          Brightness
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra configuration that should be put in the greeter.conf
+          configuration file
+        '';
+      };
+    };
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+    environment.etc."lightdm/greeter.conf".source = ensoGreeterConf;
+
+    services.xserver.displayManager.lightdm = {
+      greeter = mkDefault {
+        package = wrappedEnsoGreeter;
+        name = "lightdm-enso-os-greeter";
+      };
+
+      greeters = {
+        gtk = {
+          enable = mkDefault false;
+        };
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 16f1ddea1a75..a685dbfff2a0 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -80,6 +80,7 @@ in
   imports = [
     ./lightdm-greeters/gtk.nix
     ./lightdm-greeters/mini.nix
+    ./lightdm-greeters/enso-os.nix
   ];
 
   options = {
diff --git a/nixos/modules/services/x11/display-managers/startx.nix b/nixos/modules/services/x11/display-managers/startx.nix
new file mode 100644
index 000000000000..15609540a6e7
--- /dev/null
+++ b/nixos/modules/services/x11/display-managers/startx.nix
@@ -0,0 +1,44 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.displayManager.startx;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.xserver.displayManager.startx = {
+      enable = mkOption {
+        default = false;
+        description = ''
+          Whether to enable the dummy "startx" pseudo-display manager,
+          which allows users to start X manually via the "startx" command
+          from a vt shell. The X server runs under the user's id, not as root.
+          The user must provide a ~/.xinintrc file containing session startup
+          commands, see startx(1). This is not autmatically generated
+          from the desktopManager and windowManager settings.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.xserver = {
+      exportConfiguration = true;
+      displayManager.job.execCmd = "";
+      displayManager.lightdm.enable = lib.mkForce false;
+    };
+    systemd.services.display-manager.enable = false;
+    environment.systemPackages =  with pkgs; [ xorg.xinit ];
+  };
+
+}
diff --git a/nixos/modules/tasks/network-interfaces-scripted.nix b/nixos/modules/tasks/network-interfaces-scripted.nix
index af61c95da0af..93dfefdce902 100644
--- a/nixos/modules/tasks/network-interfaces-scripted.nix
+++ b/nixos/modules/tasks/network-interfaces-scripted.nix
@@ -85,7 +85,8 @@ let
             after = [ "network-pre.target" "systemd-udevd.service" "systemd-sysctl.service" ];
             before = [ "network.target" "shutdown.target" ];
             wants = [ "network.target" ];
-            partOf = map (i: "network-addresses-${i.name}.service") interfaces;
+            # exclude bridges from the partOf relationship to fix container networking bug #47210
+            partOf = map (i: "network-addresses-${i.name}.service") (filter (i: !(hasAttr i.name cfg.bridges)) interfaces);
             conflicts = [ "shutdown.target" ];
             wantedBy = [ "multi-user.target" ] ++ optional hasDefaultGatewaySet "network-online.target";
 
diff --git a/nixos/modules/virtualisation/container-config.nix b/nixos/modules/virtualisation/container-config.nix
index 5e368acd6d8b..561db7cabcfb 100644
--- a/nixos/modules/virtualisation/container-config.nix
+++ b/nixos/modules/virtualisation/container-config.nix
@@ -22,6 +22,13 @@ with lib;
     # Not supported in systemd-nspawn containers.
     security.audit.enable = false;
 
+    # Make sure that root user in container will talk to host nix-daemon
+    environment.etc."profile".text = ''
+    export NIX_REMOTE=daemon
+    '';
+
+
+
   };
 
 }
diff --git a/nixos/modules/virtualisation/virtualbox-image.nix b/nixos/modules/virtualisation/virtualbox-image.nix
index 3d4c06f1f23f..60048911658c 100644
--- a/nixos/modules/virtualisation/virtualbox-image.nix
+++ b/nixos/modules/virtualisation/virtualbox-image.nix
@@ -12,7 +12,7 @@ in {
     virtualbox = {
       baseImageSize = mkOption {
         type = types.int;
-        default = 100 * 1024;
+        default = 10 * 1024;
         description = ''
           The size of the VirtualBox base image in MiB.
         '';
diff --git a/nixos/release.nix b/nixos/release.nix
index 8016dba09152..86383f502a72 100644
--- a/nixos/release.nix
+++ b/nixos/release.nix
@@ -390,12 +390,14 @@ in rec {
   tests.predictable-interface-names = callSubTests tests/predictable-interface-names.nix {};
   tests.printing = callTest tests/printing.nix {};
   tests.prometheus = callTest tests/prometheus.nix {};
+  tests.prometheus-exporters = callTest tests/prometheus-exporters.nix {};
   tests.prosody = callTest tests/prosody.nix {};
   tests.proxy = callTest tests/proxy.nix {};
   tests.quagga = callTest tests/quagga.nix {};
   tests.quake3 = callTest tests/quake3.nix {};
   tests.rabbitmq = callTest tests/rabbitmq.nix {};
   tests.radicale = callTest tests/radicale.nix {};
+  tests.redmine = callTest tests/redmine.nix {};
   tests.rspamd = callSubTests tests/rspamd.nix {};
   tests.runInMachine = callTest tests/run-in-machine.nix {};
   tests.rxe = callTest tests/rxe.nix {};
diff --git a/nixos/tests/codimd.nix b/nixos/tests/codimd.nix
index 9dedac96844a..562f6f24f999 100644
--- a/nixos/tests/codimd.nix
+++ b/nixos/tests/codimd.nix
@@ -40,8 +40,7 @@ import ./make-test.nix ({ pkgs, lib, ... }:
     subtest "CodiMD sqlite", sub {
       $codimdSqlite->waitForUnit("codimd.service");
       $codimdSqlite->waitForOpenPort(3000);
-      $codimdSqlite->sleep(10); # avoid 503 during startup
-      $codimdSqlite->succeed("curl -sSf http://localhost:3000/new");
+      $codimdSqlite->waitUntilSucceeds("curl -sSf http://localhost:3000/new");
     };
 
     subtest "CodiMD postgres", sub {
@@ -49,8 +48,7 @@ import ./make-test.nix ({ pkgs, lib, ... }:
       $codimdPostgres->waitForUnit("codimd.service");
       $codimdPostgres->waitForOpenPort(5432);
       $codimdPostgres->waitForOpenPort(3000);
-      $codimdPostgres->sleep(10); # avoid 503 during startup
-      $codimdPostgres->succeed("curl -sSf http://localhost:3000/new");
+      $codimdPostgres->waitUntilSucceeds("curl -sSf http://localhost:3000/new");
     };
   '';
 })
diff --git a/nixos/tests/containers-imperative.nix b/nixos/tests/containers-imperative.nix
index 6f86819f4e88..782095a09dad 100644
--- a/nixos/tests/containers-imperative.nix
+++ b/nixos/tests/containers-imperative.nix
@@ -86,6 +86,9 @@ import ./make-test.nix ({ pkgs, ...} : {
       # Execute commands via the root shell.
       $machine->succeed("nixos-container run $id1 -- uname") =~ /Linux/ or die;
 
+      # Execute a nix command via the root shell. (regression test for #40355)
+      $machine->succeed("nixos-container run $id1 -- nix-instantiate -E 'derivation { name = \"empty\"; builder = \"false\"; system = \"false\"; }'");
+
       # Stop and start (regression test for #4989)
       $machine->succeed("nixos-container stop $id1");
       $machine->succeed("nixos-container start $id1");
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
new file mode 100644
index 000000000000..2f2c06dcb7d6
--- /dev/null
+++ b/nixos/tests/prometheus-exporters.nix
@@ -0,0 +1,297 @@
+import ./make-test.nix ({ lib, pkgs, ... }:
+let
+  escape' = str: lib.replaceChars [''"'' "$" "\n"] [''\\\"'' "\\$" ""] str;
+
+/*
+ * The attrset `exporterTests` contains one attribute
+ * for each exporter test. Each of these attributes
+ * is expected to be an attrset containing:
+ *
+ *  `exporterConfig`:
+ *    this attribute set contains config for the exporter itself
+ *
+ *  `exporterTest`
+ *    this attribute set contains test instructions
+ *
+ *  `metricProvider` (optional)
+ *    this attribute contains additional machine config
+ *
+ *  Example:
+ *    exporterTests.<exporterName> = {
+ *      exporterConfig = {
+ *        enable = true;
+ *      };
+ *      metricProvider = {
+ *        services.<metricProvider>.enable = true;
+ *      };
+ *      exporterTest = ''
+ *        waitForUnit("prometheus-<exporterName>-exporter.service");
+ *        waitForOpenPort("1234");
+ *        succeed("curl -sSf 'localhost:1234/metrics'");
+ *      '';
+ *    };
+ *
+ *  # this would generate the following test config:
+ *
+ *    nodes.<exporterName> = {
+ *      services.prometheus.<exporterName> = {
+ *        enable = true;
+ *      };
+ *      services.<metricProvider>.enable = true;
+ *    };
+ *
+ *    testScript = ''
+ *      $<exporterName>->start();
+ *      $<exporterName>->waitForUnit("prometheus-<exporterName>-exporter.service");
+ *      $<exporterName>->waitForOpenPort("1234");
+ *      $<exporterName>->succeed("curl -sSf 'localhost:1234/metrics'");
+ *      $<exporterName>->shutdown();
+ *    '';
+ */
+
+  exporterTests = {
+
+    blackbox = {
+      exporterConfig = {
+        enable = true;
+        configFile = pkgs.writeText "config.yml" (builtins.toJSON {
+          modules.icmp_v6 = {
+            prober = "icmp";
+            icmp.preferred_ip_protocol = "ip6";
+          };
+        });
+      };
+      exporterTest = ''
+        waitForUnit("prometheus-blackbox-exporter.service");
+        waitForOpenPort(9115);
+        succeed("curl -sSf 'http://localhost:9115/probe?target=localhost&module=icmp_v6' | grep -q 'probe_success 1'");
+      '';
+    };
+
+    collectd = {
+      exporterConfig = {
+        enable = true;
+        extraFlags = [ "--web.collectd-push-path /collectd" ];
+      };
+      exporterTest =let postData = escape' ''
+        [{
+          "values":[23],
+          "dstypes":["gauge"],
+          "type":"gauge",
+          "interval":1000,
+          "host":"testhost",
+          "plugin":"testplugin",
+          "time":$(date +%s)
+        }]
+        ''; in ''
+        waitForUnit("prometheus-collectd-exporter.service");
+        waitForOpenPort(9103);
+        succeed("curl -sSfH 'Content-Type: application/json' -X POST --data \"${postData}\" localhost:9103/collectd");
+        succeed("curl -sSf localhost:9103/metrics | grep -q 'collectd_testplugin_gauge{instance=\"testhost\"} 23'");
+      '';
+    };
+
+    dnsmasq = {
+      exporterConfig = {
+        enable = true;
+        leasesPath = "/var/lib/dnsmasq/dnsmasq.leases";
+      };
+      metricProvider = {
+        services.dnsmasq.enable = true;
+      };
+      exporterTest = ''
+        waitForUnit("prometheus-dnsmasq-exporter.service");
+        waitForOpenPort(9153);
+        succeed("curl -sSf http://localhost:9153/metrics | grep -q 'dnsmasq_leases 0'");
+      '';
+    };
+
+    dovecot = {
+      exporterConfig = {
+        enable = true;
+        scopes = [ "global" ];
+        socketPath = "/var/run/dovecot2/old-stats";
+        user = "root"; # <- don't use user root in production
+      };
+      metricProvider = {
+        services.dovecot2.enable = true;
+      };
+      exporterTest = ''
+        waitForUnit("prometheus-dovecot-exporter.service");
+        waitForOpenPort(9166);
+        succeed("curl -sSf http://localhost:9166/metrics | grep -q 'dovecot_up{scope=\"global\"} 1'");
+      '';
+    };
+
+    fritzbox = { # TODO add proper test case
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        waitForUnit("prometheus-fritzbox-exporter.service");
+        waitForOpenPort(9133);
+        succeed("curl -sSf http://localhost:9133/metrics | grep -q 'fritzbox_exporter_collect_errors 0'");
+      '';
+    };
+
+    json = {
+      exporterConfig = {
+        enable = true;
+        url = "http://localhost";
+        configFile = pkgs.writeText "json-exporter-conf.json" (builtins.toJSON [{
+          name = "json_test_metric";
+          path = "$.test";
+        }]);
+      };
+      metricProvider = {
+        systemd.services.prometheus-json-exporter.after = [ "nginx.service" ];
+        services.nginx = {
+          enable = true;
+          virtualHosts.localhost.locations."/".extraConfig = ''
+            return 200 "{\"test\":1}";
+          '';
+        };
+      };
+      exporterTest = ''
+        waitForUnit("nginx.service");
+        waitForOpenPort(80);
+        waitForUnit("prometheus-json-exporter.service");
+        waitForOpenPort(7979);
+        succeed("curl -sSf localhost:7979/metrics | grep -q 'json_test_metric 1'");
+      '';
+    };
+
+    nginx = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.nginx = {
+          enable = true;
+          statusPage = true;
+          virtualHosts."/".extraConfig = "return 204;";
+        };
+      };
+      exporterTest = ''
+        waitForUnit("nginx.service")
+        waitForUnit("prometheus-nginx-exporter.service")
+        waitForOpenPort(9113)
+        succeed("curl -sSf http://localhost:9113/metrics | grep -q 'nginx_up 1'")
+      '';
+    };
+
+    node = {
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        waitForUnit("prometheus-node-exporter.service");
+        waitForOpenPort(9100);
+        succeed("curl -sSf http://localhost:9100/metrics | grep -q 'node_exporter_build_info{.\\+} 1'");
+      '';
+    };
+
+    postfix = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.postfix.enable = true;
+      };
+      exporterTest = ''
+        waitForUnit("prometheus-postfix-exporter.service");
+        waitForOpenPort(9154);
+        succeed("curl -sSf http://localhost:9154/metrics | grep -q 'postfix_smtpd_connects_total 0'");
+      '';
+    };
+
+    snmp = {
+      exporterConfig = {
+        enable = true;
+        configuration.default = {
+          version = 2;
+          auth.community = "public";
+        };
+      };
+      exporterTest = ''
+        waitForUnit("prometheus-snmp-exporter.service");
+        waitForOpenPort(9116);
+        succeed("curl -sSf localhost:9116/metrics | grep -q 'snmp_request_errors_total 0'");
+      '';
+    };
+
+    surfboard = {
+      exporterConfig = {
+        enable = true;
+        modemAddress = "localhost";
+      };
+      metricProvider = {
+        systemd.services.prometheus-surfboard-exporter.after = [ "nginx.service" ];
+        services.nginx = {
+          enable = true;
+          virtualHosts.localhost.locations."/cgi-bin/status".extraConfig = ''
+            return 204;
+          '';
+        };
+      };
+      exporterTest = ''
+        waitForUnit("nginx.service");
+        waitForOpenPort(80);
+        waitForUnit("prometheus-surfboard-exporter.service");
+        waitForOpenPort(9239);
+        succeed("curl -sSf localhost:9239/metrics | grep -q 'surfboard_up 1'");
+      '';
+    };
+
+    varnish = {
+      exporterConfig = {
+        enable = true;
+        instance = "/var/spool/varnish/varnish";
+        group = "varnish";
+      };
+      metricProvider = {
+        systemd.services.prometheus-varnish-exporter.after = [
+          "varnish.service"
+        ];
+        services.varnish = {
+          enable = true;
+          config = ''
+            vcl 4.0;
+            backend default {
+              .host = "127.0.0.1";
+              .port = "80";
+            }
+          '';
+        };
+      };
+      exporterTest = ''
+        waitForUnit("prometheus-varnish-exporter.service");
+        waitForOpenPort(9131);
+        succeed("curl -sSf http://localhost:9131/metrics | grep -q 'varnish_up 1'");
+      '';
+    };
+  };
+
+  nodes = lib.mapAttrs (exporter: testConfig: lib.mkMerge [{
+    services.prometheus.exporters.${exporter} = testConfig.exporterConfig;
+  } testConfig.metricProvider or {}]) exporterTests;
+
+  testScript = lib.concatStrings (lib.mapAttrsToList (exporter: testConfig: (''
+    subtest "${exporter}", sub {
+      ${"$"+exporter}->start();
+      ${lib.concatStringsSep "  " (map (line: ''
+        ${"$"+exporter}->${line};
+      '') (lib.splitString "\n" (lib.removeSuffix "\n" testConfig.exporterTest)))}
+      ${"$"+exporter}->shutdown();
+    };
+  '')) exporterTests);
+in
+{
+  name = "prometheus-exporters";
+
+  inherit nodes testScript;
+
+  meta = with lib.maintainers; {
+    maintainers = [ willibutz ];
+  };
+})
diff --git a/nixos/tests/redmine.nix b/nixos/tests/redmine.nix
new file mode 100644
index 000000000000..330f72854cac
--- /dev/null
+++ b/nixos/tests/redmine.nix
@@ -0,0 +1,40 @@
+import ./make-test.nix ({ pkgs, lib, ... }:
+{
+  name = "redmine";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  machine =
+    { config, pkgs, ... }:
+    { services.mysql.enable = true;
+      services.mysql.package = pkgs.mariadb;
+      services.mysql.ensureDatabases = [ "redmine" ];
+      services.mysql.ensureUsers = [
+        { name = "redmine";
+          ensurePermissions = { "redmine.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+
+      services.redmine.enable = true;
+      services.redmine.database.socket = "/run/mysqld/mysqld.sock";
+      services.redmine.plugins = {
+        redmine_env_auth = pkgs.fetchurl {
+          url = https://github.com/Intera/redmine_env_auth/archive/0.6.zip;
+          sha256 = "0yyr1yjd8gvvh832wdc8m3xfnhhxzk2pk3gm2psg5w9jdvd6skak";
+        };
+      };
+      services.redmine.themes = {
+        dkuk-redmine_alex_skin = pkgs.fetchurl {
+          url = https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip;
+          sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
+        };
+      };
+    };
+
+  testScript = ''
+    startAll;
+
+    $machine->waitForUnit('redmine.service');
+    $machine->waitForOpenPort('3000');
+    $machine->succeed("curl --fail http://localhost:3000/");
+  '';
+})