about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/administration/imperative-containers.xml2
-rw-r--r--nixos/doc/manual/configuration/x-windows.xml7
-rw-r--r--nixos/doc/manual/development/option-declarations.xml2
-rw-r--r--nixos/doc/manual/development/option-types.xml2
-rw-r--r--nixos/doc/manual/release-notes/rl-1509.xml2
-rw-r--r--nixos/doc/manual/release-notes/rl-1603.xml2
-rw-r--r--nixos/doc/manual/release-notes/rl-1709.xml24
-rw-r--r--nixos/lib/test-driver/Machine.pm23
-rw-r--r--nixos/modules/installer/tools/nix-fallback-paths.nix6
-rw-r--r--nixos/modules/misc/ids.nix2
-rw-r--r--nixos/modules/module-list.nix4
-rw-r--r--nixos/modules/profiles/hardened.nix2
-rw-r--r--nixos/modules/programs/gnupg.nix83
-rw-r--r--nixos/modules/programs/ssh.nix2
-rw-r--r--nixos/modules/programs/thefuck.nix31
-rw-r--r--nixos/modules/programs/zsh/zsh.nix2
-rw-r--r--nixos/modules/security/wrappers/default.nix6
-rw-r--r--nixos/modules/services/cluster/kubernetes.nix2
-rw-r--r--nixos/modules/services/continuous-integration/hydra/default.nix5
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/default.nix11
-rw-r--r--nixos/modules/services/databases/rethinkdb.nix110
-rw-r--r--nixos/modules/services/logging/logstash.nix108
-rw-r--r--nixos/modules/services/mail/mailhog.nix43
-rw-r--r--nixos/modules/services/misc/errbot.nix2
-rw-r--r--nixos/modules/services/misc/plex.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/default.nix9
-rw-r--r--nixos/modules/services/networking/aiccu.nix185
-rw-r--r--nixos/modules/services/networking/bind.nix17
-rw-r--r--nixos/modules/services/networking/cntlm.nix154
-rw-r--r--nixos/modules/services/networking/hostapd.nix2
-rw-r--r--nixos/modules/services/networking/nsd.nix1
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix2
-rw-r--r--nixos/modules/services/networking/toxvpn.nix16
-rw-r--r--nixos/modules/services/search/elasticsearch.nix5
-rw-r--r--nixos/modules/services/search/kibana.nix48
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/default.nix2
-rw-r--r--nixos/modules/services/web-servers/caddy.nix29
-rw-r--r--nixos/modules/services/web-servers/minio.nix69
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix4
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix34
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix28
-rw-r--r--nixos/modules/services/x11/xserver.nix47
-rw-r--r--nixos/modules/system/boot/loader/grub/grub.nix2
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py85
-rw-r--r--nixos/modules/tasks/network-interfaces.nix4
-rw-r--r--nixos/release-combined.nix6
-rw-r--r--nixos/tests/elk.nix95
-rw-r--r--nixos/tests/installer.nix2
-rw-r--r--nixos/tests/logstash.nix41
-rw-r--r--nixos/tests/minio.nix19
-rw-r--r--nixos/tests/sddm.nix2
52 files changed, 916 insertions, 479 deletions
diff --git a/nixos/doc/manual/administration/imperative-containers.xml b/nixos/doc/manual/administration/imperative-containers.xml
index 9851eb08afb5..d5d8140e0764 100644
--- a/nixos/doc/manual/administration/imperative-containers.xml
+++ b/nixos/doc/manual/administration/imperative-containers.xml
@@ -57,7 +57,7 @@ Thus, if something went wrong, you can get status info using
 
 </para>
 
-<para>If the container has started succesfully, you can log in as
+<para>If the container has started successfully, you can log in as
 root using the <command>root-login</command> operation:
 
 <screen>
diff --git a/nixos/doc/manual/configuration/x-windows.xml b/nixos/doc/manual/configuration/x-windows.xml
index 4a73695e0942..fc6082ce3afd 100644
--- a/nixos/doc/manual/configuration/x-windows.xml
+++ b/nixos/doc/manual/configuration/x-windows.xml
@@ -45,6 +45,13 @@ services.xserver.displayManager.lightdm.enable = true;
 </programlisting>
 </para>
 
+<para>You can set the keyboard layout (and optionally the layout variant):
+<programlisting>
+services.xserver.layout = "de";
+services.xserver.xkbVariant = "neo";
+</programlisting>
+</para>
+
 <para>The X server is started automatically at boot time.  If you
 don’t want this to happen, you can set:
 <programlisting>
diff --git a/nixos/doc/manual/development/option-declarations.xml b/nixos/doc/manual/development/option-declarations.xml
index e322b6458a1a..d20c2d1aa2e2 100644
--- a/nixos/doc/manual/development/option-declarations.xml
+++ b/nixos/doc/manual/development/option-declarations.xml
@@ -96,7 +96,7 @@ options = {
   </itemizedlist>
   </para>
 
-  <para>Both approachs have problems.</para>
+  <para>Both approaches have problems.</para>
 
   <para>Making backends independent can quickly become hard to manage. For
     display managers, there can be only one enabled at a time, but the type
diff --git a/nixos/doc/manual/development/option-types.xml b/nixos/doc/manual/development/option-types.xml
index e928c5570874..441393c98276 100644
--- a/nixos/doc/manual/development/option-types.xml
+++ b/nixos/doc/manual/development/option-types.xml
@@ -396,7 +396,7 @@ code before creating a new type.</para>
     <listitem><para>For composed types that can take a submodule as type 
         parameter, this function can be used to substitute the parameter of a 
         submodule type. It takes a module as parameter and return the type with 
-        the submodule options substituted. It is usally defined as a type 
+        the submodule options substituted. It is usually defined as a type 
         function call with a recursive call to 
         <literal>substSubModules</literal>, e.g for a type 
         <literal>composedType</literal> that take an <literal>elemtype</literal> 
diff --git a/nixos/doc/manual/release-notes/rl-1509.xml b/nixos/doc/manual/release-notes/rl-1509.xml
index e0271485c361..967fbcf869db 100644
--- a/nixos/doc/manual/release-notes/rl-1509.xml
+++ b/nixos/doc/manual/release-notes/rl-1509.xml
@@ -342,7 +342,7 @@ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.pandoc
 
 <listitem>
   <para>
-    Python 2.6 has been marked as broken (as it no longer recieves
+    Python 2.6 has been marked as broken (as it no longer receives
     security updates from upstream).
   </para>
 </listitem>
diff --git a/nixos/doc/manual/release-notes/rl-1603.xml b/nixos/doc/manual/release-notes/rl-1603.xml
index f460e00e836e..7279dd058270 100644
--- a/nixos/doc/manual/release-notes/rl-1603.xml
+++ b/nixos/doc/manual/release-notes/rl-1603.xml
@@ -362,7 +362,7 @@ services.syncthing = {
   <listitem>
     <para>
       <literal>networking.firewall.allowPing</literal> is now enabled by
-      default. Users are encourarged to configure an approiate rate limit for
+      default. Users are encouraged to configure an appropriate rate limit for
       their machines using the Kernel interface at
       <filename>/proc/sys/net/ipv4/icmp_ratelimit</filename> and
       <filename>/proc/sys/net/ipv6/icmp/ratelimit</filename> or using the
diff --git a/nixos/doc/manual/release-notes/rl-1709.xml b/nixos/doc/manual/release-notes/rl-1709.xml
index 6948c22cc88f..34cfe1702e9c 100644
--- a/nixos/doc/manual/release-notes/rl-1709.xml
+++ b/nixos/doc/manual/release-notes/rl-1709.xml
@@ -57,6 +57,12 @@ following incompatible changes:</para>
 <itemizedlist>
   <listitem>
     <para>
+      <literal>aiccu</literal> package was removed. This is due to SixXS
+      <link xlink:href="https://www.sixxs.net/main/"> sunsetting</link> its IPv6 tunnel.
+    </para>
+  </listitem>
+  <listitem>
+    <para>
       Top-level <literal>idea</literal> package collection was renamed.
       All JetBrains IDEs are now at <literal>jetbrains</literal>.
     </para>
@@ -89,6 +95,24 @@ rmdir /var/lib/ipfs/.ipfs
       The <literal>postgres</literal> default <literal>dataDir</literal> has changed from <literal>/var/db/postgres</literal> to <literal>/var/lib/postgresql/$psqlSchema</literal> where $psqlSchema is 9.6 for example.
     </para>
   </listitem>
+  <listitem>
+    <para>
+      The <literal>caddy</literal> service was previously using an extra
+      <literal>.caddy</literal> in the data directory specified with the
+      <literal>dataDir</literal> option. The contents of the
+      <literal>.caddy</literal> directory are now expected to be in the
+      <literal>dataDir</literal>.
+    </para>
+  </listitem>
+  <listitem>
+    <para>
+      The <literal>ssh-agent</literal> user service is not started by default
+      anymore. Use <literal>programs.ssh.startAgent</literal> to enable it if
+      needed. There is also a new <literal>programs.gnupg.agent</literal>
+      module that creates a <literal>gpg-agent</literal> user service. It can
+      also serve as a SSH agent if <literal>enableSSHSupport</literal> is set.
+    </para>
+  </listitem>
 </itemizedlist>
 
 
diff --git a/nixos/lib/test-driver/Machine.pm b/nixos/lib/test-driver/Machine.pm
index 6be119bbf331..cd375352c4ca 100644
--- a/nixos/lib/test-driver/Machine.pm
+++ b/nixos/lib/test-driver/Machine.pm
@@ -219,8 +219,8 @@ sub waitForMonitorPrompt {
 sub retry {
     my ($coderef) = @_;
     my $n;
-    for ($n = 0; $n < 900; $n++) {
-        return if &$coderef;
+    for ($n = 899; $n >=0; $n--) {
+        return if &$coderef($n);
         sleep 1;
     }
     die "action timed out after $n seconds";
@@ -518,6 +518,12 @@ sub waitUntilTTYMatches {
 
     $self->nest("waiting for $regexp to appear on tty $tty", sub {
         retry sub {
+            my ($retries_remaining) = @_;
+            if ($retries_remaining == 0) {
+                $self->log("Last chance to match /$regexp/ on TTY$tty, which currently contains:");
+                $self->log($self->getTTYText($tty));
+            }
+
             return 1 if $self->getTTYText($tty) =~ /$regexp/;
         }
     });
@@ -566,6 +572,12 @@ sub waitForText {
     my ($self, $regexp) = @_;
     $self->nest("waiting for $regexp to appear on the screen", sub {
         retry sub {
+            my ($retries_remaining) = @_;
+            if ($retries_remaining == 0) {
+                $self->log("Last chance to match /$regexp/ on the screen, which currently contains:");
+                $self->log($self->getScreenText);
+            }
+
             return 1 if $self->getScreenText =~ /$regexp/;
         }
     });
@@ -600,6 +612,13 @@ sub waitForWindow {
     $self->nest("waiting for a window to appear", sub {
         retry sub {
             my @names = $self->getWindowNames;
+
+            my ($retries_remaining) = @_;
+            if ($retries_remaining == 0) {
+                $self->log("Last chance to match /$regexp/ on the the window list, which currently contains:");
+                $self->log(join(", ", @names));
+            }
+
             foreach my $n (@names) {
                 return 1 if $n =~ /$regexp/;
             }
diff --git a/nixos/modules/installer/tools/nix-fallback-paths.nix b/nixos/modules/installer/tools/nix-fallback-paths.nix
index 833782477199..80241cd3ebec 100644
--- a/nixos/modules/installer/tools/nix-fallback-paths.nix
+++ b/nixos/modules/installer/tools/nix-fallback-paths.nix
@@ -1,5 +1,5 @@
 {
-  x86_64-linux = "/nix/store/71im965h634iy99zsmlncw6qhx5jcclx-nix-1.11.9";
-  i686-linux = "/nix/store/cgvavixkayc36l6kl92i8mxr6k0p2yhy-nix-1.11.9";
-  x86_64-darwin = "/nix/store/w1c96v5yxvdmq4nvqlxjvg6kp7xa2lag-nix-1.11.9";
+  x86_64-linux = "/nix/store/crqd5wmrqipl4n1fcm5kkc1zg4sj80js-nix-1.11.11";
+  i686-linux = "/nix/store/wsjn14xp5ja509d4dxb1c78zhirw0b5x-nix-1.11.11";
+  x86_64-darwin = "/nix/store/zqkqnhk85g2shxlpb04y72h1i3db3gpl-nix-1.11.11";
 }
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index d7459e3fe91c..22059bb7fbbb 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -295,6 +295,7 @@
       aria2 = 277;
       clickhouse = 278;
       rslsync = 279;
+      minio = 280;
 
       # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
 
@@ -559,6 +560,7 @@
       aria2 = 277;
       clickhouse = 278;
       rslsync = 279;
+      minio = 280;
 
       # When adding a gid, make sure it doesn't match an existing
       # uid. Users and groups with the same name should have equal
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 13924f7e507e..5d9b062f2048 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -99,6 +99,7 @@
   ./programs/spacefm.nix
   ./programs/ssh.nix
   ./programs/ssmtp.nix
+  ./programs/thefuck.nix
   ./programs/tmux.nix
   ./programs/venus.nix
   ./programs/vim.nix
@@ -251,6 +252,7 @@
   ./services/mail/exim.nix
   ./services/mail/freepops.nix
   ./services/mail/mail.nix
+  ./services/mail/mailhog.nix
   ./services/mail/mlmmj.nix
   ./services/mail/offlineimap.nix
   ./services/mail/opendkim.nix
@@ -388,7 +390,6 @@
   ./services/network-filesystems/u9fs.nix
   ./services/network-filesystems/yandex-disk.nix
   ./services/network-filesystems/xtreemfs.nix
-  ./services/networking/aiccu.nix
   ./services/networking/amuled.nix
   ./services/networking/asterisk.nix
   ./services/networking/atftpd.nix
@@ -587,6 +588,7 @@
   ./services/web-servers/lighttpd/default.nix
   ./services/web-servers/lighttpd/gitweb.nix
   ./services/web-servers/lighttpd/inginious.nix
+  ./services/web-servers/minio.nix
   ./services/web-servers/nginx/default.nix
   ./services/web-servers/phpfpm/default.nix
   ./services/web-servers/shellinabox.nix
diff --git a/nixos/modules/profiles/hardened.nix b/nixos/modules/profiles/hardened.nix
index 8bde2e4f4984..0a0838431da7 100644
--- a/nixos/modules/profiles/hardened.nix
+++ b/nixos/modules/profiles/hardened.nix
@@ -55,7 +55,7 @@ with lib;
   # same privileges as it would have inside it.  This is particularly
   # bad in the common case of running as root within the namespace.
   #
-  # Setting the number of allowed userns to 0 effectively disables
+  # Setting the number of allowed user namespaces to 0 effectively disables
   # the feature at runtime.  Attempting to create a user namespace
   # with unshare will then fail with "no space left on device".
   boot.kernel.sysctl."user.max_user_namespaces" = mkDefault 0;
diff --git a/nixos/modules/programs/gnupg.nix b/nixos/modules/programs/gnupg.nix
index c5277f40d260..68adee94f79e 100644
--- a/nixos/modules/programs/gnupg.nix
+++ b/nixos/modules/programs/gnupg.nix
@@ -21,13 +21,37 @@ in
 
     agent.enableSSHSupport = mkOption {
       type = types.bool;
-      default = true;
+      default = false;
       description = ''
         Enable SSH agent support in GnuPG agent. Also sets SSH_AUTH_SOCK
         environment variable correctly. This will disable socket-activation
         and thus always start a GnuPG agent per user session.
       '';
     };
+
+    agent.enableExtraSocket = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable extra socket for GnuPG agent.
+      '';
+    };
+
+    agent.enableBrowserSocket = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable browser socket for GnuPG agent.
+      '';
+    };
+
+    dirmngr.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enables GnuPG network certificate management daemon with socket-activation for every user session.
+      '';
+    };
   };
 
   config = mkIf cfg.agent.enable {
@@ -38,15 +62,72 @@ in
           ("${pkgs.gnupg}/bin/gpg-agent --supervised "
             + optionalString cfg.agent.enableSSHSupport "--enable-ssh-support")
         ];
+        ExecReload = "${pkgs.gnupg}/bin/gpgconf --reload gpg-agent";
       };
     };
 
     systemd.user.sockets.gpg-agent = {
       wantedBy = [ "sockets.target" ];
+      listenStreams = [ "%t/gnupg/S.gpg-agent" ];
+      socketConfig = {
+        FileDescriptorName = "std";
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
     };
 
     systemd.user.sockets.gpg-agent-ssh = mkIf cfg.agent.enableSSHSupport {
       wantedBy = [ "sockets.target" ];
+      listenStreams = [ "%t/gnupg/S.gpg-agent.ssh" ];
+      socketConfig = {
+        FileDescriptorName = "ssh";
+        Service = "gpg-agent.service";
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
+    };
+
+    systemd.user.sockets.gpg-agent-extra = mkIf cfg.agent.enableExtraSocket {
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "%t/gnupg/S.gpg-agent.extra" ];
+      socketConfig = {
+        FileDescriptorName = "extra";
+        Service = "gpg-agent.service";
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
+    };
+
+    systemd.user.sockets.gpg-agent-browser = mkIf cfg.agent.enableBrowserSocket {
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "%t/gnupg/S.gpg-agent.browser" ];
+      socketConfig = {
+        FileDescriptorName = "browser";
+        Service = "gpg-agent.service";
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
+    };
+
+    systemd.user.services.dirmngr = {
+      requires = [ "dirmngr.socket" ];
+      after = [ "dirmngr.socket" ];
+      unitConfig = {
+        RefuseManualStart = "true";
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.gnupg}/bin/dirmngr --supervised";
+        ExecReload = "${pkgs.gnupg}/bin/gpgconf --reload dirmngr";
+      };
+    };
+
+    systemd.user.sockets.dirmngr = {
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "%t/gnupg/S.dirmngr" ];
+      socketConfig = {
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
     };
 
     systemd.packages = [ pkgs.gnupg ];
diff --git a/nixos/modules/programs/ssh.nix b/nixos/modules/programs/ssh.nix
index 4faef2c609bc..e0fbba897fa4 100644
--- a/nixos/modules/programs/ssh.nix
+++ b/nixos/modules/programs/ssh.nix
@@ -74,7 +74,7 @@ in
 
       startAgent = mkOption {
         type = types.bool;
-        default = true;
+        default = false;
         description = ''
           Whether to start the OpenSSH agent when you log in.  The OpenSSH agent
           remembers private keys for you so that you don't have to type in
diff --git a/nixos/modules/programs/thefuck.nix b/nixos/modules/programs/thefuck.nix
new file mode 100644
index 000000000000..433a0ca95fef
--- /dev/null
+++ b/nixos/modules/programs/thefuck.nix
@@ -0,0 +1,31 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.thefuck;
+in
+  {
+    options = {
+      programs.thefuck = {
+        enable = mkEnableOption "thefuck";
+
+        alias = mkOption {
+          default = "fuck";
+          type = types.string;
+
+          description = ''
+            `thefuck` needs an alias to be configured.
+            The default value is `fuck`, but you can use anything else as well.
+          '';
+        };
+      };
+    };
+
+    config = mkIf cfg.enable {
+      environment.systemPackages = with pkgs; [ thefuck ];
+      environment.shellInit = ''
+        eval $(${pkgs.thefuck}/bin/thefuck --alias ${cfg.alias})
+      '';
+    };
+  }
diff --git a/nixos/modules/programs/zsh/zsh.nix b/nixos/modules/programs/zsh/zsh.nix
index acb3e987aee6..b276bf9bb73c 100644
--- a/nixos/modules/programs/zsh/zsh.nix
+++ b/nixos/modules/programs/zsh/zsh.nix
@@ -117,7 +117,7 @@ in
 
         # Tell zsh how to find installed completions
         for p in ''${(z)NIX_PROFILES}; do
-          fpath+=($p/share/zsh/site-functions $p/share/zsh/$ZSH_VERSION/functions)
+          fpath+=($p/share/zsh/site-functions $p/share/zsh/$ZSH_VERSION/functions $p/share/zsh/vendor-completions)
         done
 
         ${if cfg.enableCompletion then "autoload -U compinit && compinit" else ""}
diff --git a/nixos/modules/security/wrappers/default.nix b/nixos/modules/security/wrappers/default.nix
index c051b7d49e3f..a6dc8faaae98 100644
--- a/nixos/modules/security/wrappers/default.nix
+++ b/nixos/modules/security/wrappers/default.nix
@@ -80,8 +80,8 @@ let
                     group = "root";
                   } // s)
           else if 
-             (s ? "setuid"  && s.setuid  == true) ||
-             (s ? "setguid" && s.setguid == true) ||
+             (s ? "setuid" && s.setuid) ||
+             (s ? "setgid" && s.setgid) ||
              (s ? "permissions")
           then mkSetuidProgram s
           else mkSetuidProgram
@@ -171,7 +171,7 @@ in
 
     ###### setcap activation script
     system.activationScripts.wrappers =
-      lib.stringAfter [ "users" ]
+      lib.stringAfter [ "specialfs" "users" ]
         ''
           # Look in the system path and in the default profile for
           # programs to be wrapped.
diff --git a/nixos/modules/services/cluster/kubernetes.nix b/nixos/modules/services/cluster/kubernetes.nix
index f58306ab63ed..68917af5094c 100644
--- a/nixos/modules/services/cluster/kubernetes.nix
+++ b/nixos/modules/services/cluster/kubernetes.nix
@@ -40,7 +40,7 @@ let
   });
 
   policyFile = pkgs.writeText "kube-policy"
-    concatStringsSep "\n" (map (builtins.toJSON cfg.apiserver.authorizationPolicy));
+    (concatStringsSep "\n" (map builtins.toJSON cfg.apiserver.authorizationPolicy));
 
   cniConfig = pkgs.buildEnv {
     name = "kubernetes-cni-config";
diff --git a/nixos/modules/services/continuous-integration/hydra/default.nix b/nixos/modules/services/continuous-integration/hydra/default.nix
index c515622d11a0..fcc0f58637c4 100644
--- a/nixos/modules/services/continuous-integration/hydra/default.nix
+++ b/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -308,6 +308,7 @@ in
         requires = [ "hydra-init.service" ];
         after = [ "hydra-init.service" ];
         environment = serverEnv;
+        restartTriggers = [ hydraConf ];
         serviceConfig =
           { ExecStart =
               "@${cfg.package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
@@ -324,6 +325,7 @@ in
         requires = [ "hydra-init.service" ];
         after = [ "hydra-init.service" "network.target" ];
         path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
+        restartTriggers = [ hydraConf ];
         environment = env // {
           PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
           IN_SYSTEMD = "1"; # to get log severity levels
@@ -344,7 +346,8 @@ in
       { wantedBy = [ "multi-user.target" ];
         requires = [ "hydra-init.service" ];
         after = [ "hydra-init.service" "network.target" ];
-        path = [ cfg.package pkgs.nettools ];
+        path = with pkgs; [ cfg.package nettools jq ];
+        restartTriggers = [ hydraConf ];
         environment = env;
         serviceConfig =
           { ExecStart = "@${cfg.package}/bin/hydra-evaluator hydra-evaluator";
diff --git a/nixos/modules/services/continuous-integration/jenkins/default.nix b/nixos/modules/services/continuous-integration/jenkins/default.nix
index 11adf74b9ed5..c14aa4167231 100644
--- a/nixos/modules/services/continuous-integration/jenkins/default.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/default.nix
@@ -125,6 +125,15 @@ in {
           Additional command line arguments to pass to Jenkins.
         '';
       };
+
+      extraJavaOptions = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "-Xmx80m" ];
+        description = ''
+          Additional command line arguments to pass to the Java run time (as opposed to Jenkins).
+        '';
+      };
     };
   };
 
@@ -185,7 +194,7 @@ in {
         '';
 
       script = ''
-        ${pkgs.jdk}/bin/java -jar ${pkgs.jenkins}/webapps/jenkins.war --httpListenAddress=${cfg.listenAddress} \
+        ${pkgs.jdk}/bin/java ${concatStringsSep " " cfg.extraJavaOptions} -jar ${pkgs.jenkins}/webapps/jenkins.war --httpListenAddress=${cfg.listenAddress} \
                                                   --httpPort=${toString cfg.port} \
                                                   --prefix=${cfg.prefix} \
                                                   ${concatStringsSep " " cfg.extraOptions}
diff --git a/nixos/modules/services/databases/rethinkdb.nix b/nixos/modules/services/databases/rethinkdb.nix
new file mode 100644
index 000000000000..cd8c386b08db
--- /dev/null
+++ b/nixos/modules/services/databases/rethinkdb.nix
@@ -0,0 +1,110 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rethinkdb;
+  rethinkdb = cfg.package;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.rethinkdb = {
+
+      enable = mkOption {
+        default = false;
+        description = "Whether to enable the RethinkDB server.";
+      };
+
+      #package = mkOption {
+      #  default = pkgs.rethinkdb;
+      #  description = "Which RethinkDB derivation to use.";
+      #};
+
+      user = mkOption {
+        default = "rethinkdb";
+        description = "User account under which RethinkDB runs.";
+      };
+
+      group = mkOption {
+        default = "rethinkdb";
+        description = "Group which rethinkdb user belongs to.";
+      };
+
+      dbpath = mkOption {
+        default = "/var/db/rethinkdb";
+        description = "Location where RethinkDB stores its data, 1 data directory per instance.";
+      };
+
+      pidpath = mkOption {
+        default = "/var/run/rethinkdb";
+        description = "Location where each instance's pid file is located.";
+      };
+
+      #cfgpath = mkOption {
+      #  default = "/etc/rethinkdb/instances.d";
+      #  description = "Location where RethinkDB stores it config files, 1 config file per instance.";
+      #};
+
+      # TODO: currently not used by our implementation.
+      #instances = mkOption {
+      #  type = types.attrsOf types.str;
+      #  default = {};
+      #  description = "List of named RethinkDB instances in our cluster.";
+      #};
+
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf config.services.rethinkdb.enable {
+
+    environment.systemPackages = [ rethinkdb ];
+
+    systemd.services.rethinkdb = {
+      description = "RethinkDB server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        # TODO: abstract away 'default', which is a per-instance directory name
+        #       allowing end user of this nix module to provide multiple instances,
+        #       and associated directory per instance
+        ExecStart = "${rethinkdb}/bin/rethinkdb -d ${cfg.dbpath}/default";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        User = cfg.user;
+        Group = cfg.group;
+        PIDFile = "${cfg.pidpath}/default.pid";
+        PermissionsStartOnly = true;
+      };
+
+      preStart = ''
+        if ! test -e ${cfg.dbpath}; then
+            install -d -m0755 -o ${cfg.user} -g ${cfg.group} ${cfg.dbpath}
+            install -d -m0755 -o ${cfg.user} -g ${cfg.group} ${cfg.dbpath}/default
+            chown -R ${cfg.user}:${cfg.group} ${cfg.dbpath}
+        fi
+        if ! test -e "${cfg.pidpath}/default.pid"; then
+            install -D -o ${cfg.user} -g ${cfg.group} /dev/null "${cfg.pidpath}/default.pid"
+        fi
+      '';
+    };
+
+    users.extraUsers.rethinkdb = mkIf (cfg.user == "rethinkdb")
+      { name = "rethinkdb";
+        description = "RethinkDB server user";
+      };
+
+    users.extraGroups = optionalAttrs (cfg.group == "rethinkdb") (singleton
+      { name = "rethinkdb";
+      });
+
+  };
+
+}
diff --git a/nixos/modules/services/logging/logstash.nix b/nixos/modules/services/logging/logstash.nix
index c9477b9e3ab0..b4abd2cd7e5e 100644
--- a/nixos/modules/services/logging/logstash.nix
+++ b/nixos/modules/services/logging/logstash.nix
@@ -4,17 +4,46 @@ with lib;
 
 let
   cfg = config.services.logstash;
+  atLeast54 = versionAtLeast (builtins.parseDrvName cfg.package.name).version "5.4";
   pluginPath = lib.concatStringsSep ":" cfg.plugins;
   havePluginPath = lib.length cfg.plugins > 0;
   ops = lib.optionalString;
-  verbosityFlag = {
-    debug = "--debug";
-    info  = "--verbose";
-    warn  = ""; # intentionally empty
-    error = "--quiet";
-    fatal = "--silent";
-  }."${cfg.logLevel}";
-
+  verbosityFlag =
+    if atLeast54
+    then "--log.level " + cfg.logLevel
+    else {
+      debug = "--debug";
+      info  = "--verbose";
+      warn  = ""; # intentionally empty
+      error = "--quiet";
+      fatal = "--silent";
+    }."${cfg.logLevel}";
+
+  pluginsPath =
+    if atLeast54
+    then "--path.plugins ${pluginPath}"
+    else "--pluginpath ${pluginPath}";
+
+  logstashConf = pkgs.writeText "logstash.conf" ''
+    input {
+      ${cfg.inputConfig}
+    }
+
+    filter {
+      ${cfg.filterConfig}
+    }
+
+    output {
+      ${cfg.outputConfig}
+    }
+  '';
+
+  logstashSettingsYml = pkgs.writeText "logstash.yml" cfg.extraSettings;
+
+  logstashSettingsDir = pkgs.runCommand "logstash-settings" {inherit logstashSettingsYml;} ''
+    mkdir -p $out
+    ln -s $logstashSettingsYml $out/logstash.yml
+  '';
 in
 
 {
@@ -45,6 +74,15 @@ in
         description = "The paths to find other logstash plugins in.";
       };
 
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/logstash";
+        description = ''
+          A path to directory writable by logstash that it uses to store data.
+          Plugins will also have access to this path.
+        '';
+      };
+
       logLevel = mkOption {
         type = types.enum [ "debug" "info" "warn" "error" "fatal" ];
         default = "warn";
@@ -116,6 +154,19 @@ in
         '';
       };
 
+      extraSettings = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Extra Logstash settings in YAML format.";
+        example = ''
+          pipeline:
+            batch:
+              size: 125
+              delay: 5
+        '';
+      };
+
+
     };
   };
 
@@ -123,31 +174,34 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
+    assertions = [
+      { assertion = atLeast54 -> !cfg.enableWeb;
+        message = ''
+          The logstash web interface is only available for versions older than 5.4.
+          So either set services.logstash.enableWeb = false,
+          or set services.logstash.package to an older logstash.
+        '';
+      }
+    ];
+
     systemd.services.logstash = with pkgs; {
       description = "Logstash Daemon";
       wantedBy = [ "multi-user.target" ];
       environment = { JAVA_HOME = jre; };
       path = [ pkgs.bash ];
       serviceConfig = {
-        ExecStart =
-          "${cfg.package}/bin/logstash agent " +
-          "-w ${toString cfg.filterWorkers} " +
-          ops havePluginPath "--pluginpath ${pluginPath} " +
-          "${verbosityFlag} " +
-          "-f ${writeText "logstash.conf" ''
-            input {
-              ${cfg.inputConfig}
-            }
-
-            filter {
-              ${cfg.filterConfig}
-            }
-
-            output {
-              ${cfg.outputConfig}
-            }
-          ''} " +
-          ops cfg.enableWeb "-- web -a ${cfg.listenAddress} -p ${cfg.port}";
+        ExecStartPre = ''${pkgs.coreutils}/bin/mkdir -p "${cfg.dataDir}" ; ${pkgs.coreutils}/bin/chmod 700 "${cfg.dataDir}"'';
+        ExecStart = concatStringsSep " " (filter (s: stringLength s != 0) [
+          "${cfg.package}/bin/logstash"
+          (ops (!atLeast54) "agent")
+          "-w ${toString cfg.filterWorkers}"
+          (ops havePluginPath pluginsPath)
+          "${verbosityFlag}"
+          "-f ${logstashConf}"
+          (ops atLeast54 "--path.settings ${logstashSettingsDir}")
+          (ops atLeast54 "--path.data ${cfg.dataDir}")
+          (ops cfg.enableWeb "-- web -a ${cfg.listenAddress} -p ${cfg.port}")
+        ]);
       };
     };
   };
diff --git a/nixos/modules/services/mail/mailhog.nix b/nixos/modules/services/mail/mailhog.nix
new file mode 100644
index 000000000000..206fb50d31a2
--- /dev/null
+++ b/nixos/modules/services/mail/mailhog.nix
@@ -0,0 +1,43 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mailhog;
+in {
+  ###### interface
+
+  options = {
+
+    services.mailhog = {
+      enable = mkEnableOption "MailHog";
+      user = mkOption {
+        type = types.str;
+        default = "mailhog";
+        description = "User account under which mailhog runs.";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.extraUsers.mailhog = {
+      name = cfg.user;
+      description = "MailHog service user";
+    };
+
+    systemd.services.mailhog = {
+      description = "MailHog service";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.mailhog}/bin/MailHog";
+        User = cfg.user;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/misc/errbot.nix b/nixos/modules/services/misc/errbot.nix
index 427cb7c546d0..cb2fa6776240 100644
--- a/nixos/modules/services/misc/errbot.nix
+++ b/nixos/modules/services/misc/errbot.nix
@@ -84,7 +84,7 @@ in {
       dataDir = if !isNull instanceCfg.dataDir then instanceCfg.dataDir else
         "/var/lib/errbot/${name}";
     in {
-      after = [ "network.target" ];
+      after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
         mkdir -p ${dataDir}
diff --git a/nixos/modules/services/misc/plex.nix b/nixos/modules/services/misc/plex.nix
index ecd9a6f52da2..e37b486375bd 100644
--- a/nixos/modules/services/misc/plex.nix
+++ b/nixos/modules/services/misc/plex.nix
@@ -82,7 +82,7 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
-        test -d "${cfg.dataDir}" || {
+        test -d "${cfg.dataDir}/Plex Media Server" || {
           echo "Creating initial Plex data directory in \"${cfg.dataDir}\"."
           mkdir -p "${cfg.dataDir}/Plex Media Server"
           chown -R ${cfg.user}:${cfg.group} "${cfg.dataDir}"
diff --git a/nixos/modules/services/monitoring/prometheus/default.nix b/nixos/modules/services/monitoring/prometheus/default.nix
index b9435c02b1de..c12b5f35dea3 100644
--- a/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixos/modules/services/monitoring/prometheus/default.nix
@@ -66,15 +66,6 @@ let
           How frequently to evaluate rules by default.
         '';
       };
-
-      labels = mkOption {
-        type = types.attrsOf types.str;
-        default = {};
-        description = ''
-          The labels to add to any timeseries that this Prometheus instance
-          scrapes.
-        '';
-      };
     };
   };
 
diff --git a/nixos/modules/services/networking/aiccu.nix b/nixos/modules/services/networking/aiccu.nix
deleted file mode 100644
index ac755270951b..000000000000
--- a/nixos/modules/services/networking/aiccu.nix
+++ /dev/null
@@ -1,185 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-
-  cfg = config.services.aiccu;
-  notNull = a: ! isNull a;
-  configFile = pkgs.writeText "aiccu.conf" ''
-    ${if notNull cfg.username then "username " + cfg.username else ""}
-    ${if notNull cfg.password then "password " + cfg.password else ""}
-    protocol ${cfg.protocol}
-    server ${cfg.server}
-    ipv6_interface ${cfg.interfaceName}
-    verbose ${boolToString cfg.verbose}
-    daemonize true
-    automatic ${boolToString cfg.automatic}
-    requiretls ${boolToString cfg.requireTLS}
-    pidfile ${cfg.pidFile}
-    defaultroute ${boolToString cfg.defaultRoute}
-    ${if notNull cfg.setupScript then cfg.setupScript else ""}
-    makebeats ${boolToString cfg.makeHeartBeats}
-    noconfigure ${boolToString cfg.noConfigure}
-    behindnat ${boolToString cfg.behindNAT}
-    ${if cfg.localIPv4Override then "local_ipv4_override" else ""}
-  '';
-
-in {
-
-  options = {
-
-    services.aiccu = {
-
-      enable = mkOption {
-        type = types.bool;
-        default = false;
-        description = "Enable aiccu IPv6 over IPv4 SiXXs tunnel";
-      };
-
-      username = mkOption {
-        type = with types; nullOr str;
-        default = null;
-        example = "FAB5-SIXXS";
-        description = "Login credential";
-      };
-
-      password = mkOption {
-        type = with types; nullOr str;
-        default = null;
-        example = "TmAkRbBEr0";
-        description = "Login credential";
-      };
-
-      protocol = mkOption {
-        type = types.str;
-        default = "tic";
-        example = "tic|tsp|l2tp";
-        description = "Protocol to use for setting up the tunnel";
-      };
-
-      server = mkOption {
-        type = types.str;
-        default = "tic.sixxs.net";
-        example = "enabled.ipv6server.net";
-        description = "Server to use for setting up the tunnel";
-      };
-
-      interfaceName = mkOption {
-        type = types.str;
-        default = "aiccu";
-        example = "sixxs";
-        description = ''
-          The name of the interface that will be used as a tunnel interface.
-          On *BSD the ipv6_interface should be set to gifX (eg gif0) for proto-41 tunnels
-          or tunX (eg tun0) for AYIYA tunnels.
-        '';
-      };
-
-      tunnelID = mkOption {
-        type = with types; nullOr str;
-        default = null;
-        example = "T12345";
-        description = "The tunnel id to use, only required when there are multiple tunnels in the list";
-      };
-
-      verbose = mkOption {
-        type = types.bool;
-        default = false;
-        description = "Be verbose?";
-      };
-
-      automatic = mkOption {
-        type = types.bool;
-        default = true;
-        description = "Automatic Login and Tunnel activation";
-      };
-
-      requireTLS = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          When set to true, if TLS is not supported on the server
-          the TIC transaction will fail.
-          When set to false, it will try a starttls, when that is
-          not supported it will continue.
-          In any case if AICCU is build with TLS support it will
-          try to do a 'starttls' to the TIC server to see if that
-          is supported.
-        '';
-      };
-
-      pidFile = mkOption {
-        type = types.path;
-        default = "/run/aiccu.pid";
-        example = "/var/lib/aiccu/aiccu.pid";
-        description = "Location of PID File";
-      };
-
-      defaultRoute = mkOption {
-        type = types.bool;
-        default = true;
-        description = "Add a default route";
-      };
-
-      setupScript = mkOption {
-        type = with types; nullOr path;
-        default = null;
-        example = "/var/lib/aiccu/fix-subnets.sh";
-        description = "Script to run after setting up the interfaces";
-      };
-
-      makeHeartBeats = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          In general you don't want to turn this off
-          Of course only applies to AYIYA and heartbeat tunnels not to static ones
-        '';
-      };
-
-      noConfigure = mkOption {
-        type = types.bool;
-        default = false;
-        description = "Don't configure anything";
-      };
-
-      behindNAT = mkOption {
-        type = types.bool;
-        default = false;
-        description = "Notify the user that a NAT-kind network is detected";
-      };
-
-      localIPv4Override = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Overrides the IPv4 parameter received from TIC
-          This allows one to configure a NAT into "DMZ" mode and then
-          forwarding the proto-41 packets to an internal host.
-
-          This is only needed for static proto-41 tunnels!
-          AYIYA and heartbeat tunnels don't require this.
-        '';
-      };
-
-    };
-  };
-
-  config = mkIf cfg.enable {
-
-    systemd.services.aiccu = {
-      description = "Automatic IPv6 Connectivity Client Utility";
-      after = [ "network.target" ];
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig = {
-        ExecStart = "${pkgs.aiccu}/bin/aiccu start ${configFile}";
-        ExecStop = "${pkgs.aiccu}/bin/aiccu stop";
-        Type = "forking";
-        PIDFile = cfg.pidFile;
-        Restart = "no"; # aiccu startup errors are serious, do not pound the tic server or be banned.
-      };
-    };
-
-  };
-}
diff --git a/nixos/modules/services/networking/bind.nix b/nixos/modules/services/networking/bind.nix
index ca375c935e83..763283dfe7a2 100644
--- a/nixos/modules/services/networking/bind.nix
+++ b/nixos/modules/services/networking/bind.nix
@@ -10,6 +10,11 @@ let
 
   confFile = pkgs.writeText "named.conf"
     ''
+      include "/etc/bind/rndc.key";
+      controls {
+        inet 127.0.0.1 allow {localhost;} keys {"rndc-key";};
+      };
+
       acl cachenetworks { ${concatMapStrings (entry: " ${entry}; ") cfg.cacheNetworks} };
       acl badnetworks { ${concatMapStrings (entry: " ${entry}; ") cfg.blockedNetworks} };
 
@@ -167,11 +172,21 @@ in
       wantedBy = [ "multi-user.target" ];
 
       preStart = ''
+        mkdir -m 0755 -p /etc/bind
+        if ! [ -f "/etc/bind/rndc.key" ]; then
+          ${pkgs.bind.out}/sbin/rndc-confgen -r /dev/urandom -c /etc/bind/rndc.key -u ${bindUser} -a -A hmac-sha256 2>/dev/null
+        fi
+
         ${pkgs.coreutils}/bin/mkdir -p /var/run/named
         chown ${bindUser} /var/run/named
       '';
 
-      script = "${pkgs.bind.out}/sbin/named -u ${bindUser} ${optionalString cfg.ipv4Only "-4"} -c ${cfg.configFile} -f";
+      serviceConfig = {
+        ExecStart  = "${pkgs.bind.out}/sbin/named -u ${bindUser} ${optionalString cfg.ipv4Only "-4"} -c ${cfg.configFile} -f";
+        ExecReload = "${pkgs.bind.out}/sbin/rndc -k '/etc/bind/rndc.key' reload";
+        ExecStop   = "${pkgs.bind.out}/sbin/rndc -k '/etc/bind/rndc.key' stop";
+      };
+
       unitConfig.Documentation = "man:named(8)";
     };
   };
diff --git a/nixos/modules/services/networking/cntlm.nix b/nixos/modules/services/networking/cntlm.nix
index 890ff5084078..3978a1969ce9 100644
--- a/nixos/modules/services/networking/cntlm.nix
+++ b/nixos/modules/services/networking/cntlm.nix
@@ -5,110 +5,122 @@ with lib;
 let
 
   cfg = config.services.cntlm;
-  uid = config.ids.uids.cntlm;
+
+  configFile = if cfg.configText != "" then
+    pkgs.writeText "cntlm.conf" ''
+      ${cfg.configText}
+    ''
+    else
+    pkgs.writeText "lighttpd.conf" ''
+      # Cntlm Authentication Proxy Configuration
+      Username ${cfg.username}
+      Domain ${cfg.domain}
+      Password ${cfg.password}
+      ${optionalString (cfg.netbios_hostname != "") "Workstation ${cfg.netbios_hostname}"}
+      ${concatMapStrings (entry: "Proxy ${entry}\n") cfg.proxy}
+      ${optionalString (cfg.noproxy != []) "NoProxy ${concatStringsSep ", " cfg.noproxy}"}
+
+      ${concatMapStrings (port: ''
+        Listen ${toString port}
+      '') cfg.port}
+
+      ${cfg.extraConfig}
+    '';
 
 in
 
 {
 
-  options = {
+  options.services.cntlm = {
 
-    services.cntlm = {
+    enable = mkOption {
+      default = false;
+      description = ''
+        Whether to enable the cntlm, which start a local proxy.
+      '';
+    };
 
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to enable the cntlm, which start a local proxy.
-        '';
-      };
+    username = mkOption {
+      description = ''
+        Proxy account name, without the possibility to include domain name ('at' sign is interpreted literally).
+      '';
+    };
 
-      username = mkOption {
-        description = ''
-          Proxy account name, without the possibility to include domain name ('at' sign is interpreted literally).
-        '';
-      };
+    domain = mkOption {
+      description = ''Proxy account domain/workgroup name.'';
+    };
 
-      domain = mkOption {
-        description = ''Proxy account domain/workgroup name.'';
-      };
+    password = mkOption {
+      default = "/etc/cntlm.password";
+      type = types.str;
+      description = ''Proxy account password. Note: use chmod 0600 on /etc/cntlm.password for security.'';
+    };
 
-      password = mkOption {
-        default = "/etc/cntlm.password";
-        type = types.str;
-        description = ''Proxy account password. Note: use chmod 0600 on /etc/cntlm.password for security.'';
-      };
+    netbios_hostname = mkOption {
+      type = types.str;
+      default = "";
+      description = ''
+        The hostname of your machine.
+      '';
+    };
 
-      netbios_hostname = mkOption {
-        type = types.str;
-        description = ''
-          The hostname of your machine.
-        '';
-      };
+    proxy = mkOption {
+      description = ''
+        A list of NTLM/NTLMv2 authenticating HTTP proxies.
 
-      proxy = mkOption {
-        description = ''
-          A list of NTLM/NTLMv2 authenticating HTTP proxies.
+        Parent proxy, which requires authentication. The same as proxy on the command-line, can be used more than  once  to  specify  unlimited
+        number  of  proxies.  Should  one proxy fail, cntlm automatically moves on to the next one. The connect request fails only if the whole
+        list of proxies is scanned and (for each request) and found to be invalid. Command-line takes precedence over the configuration file.
+      '';
+      example = [ "proxy.example.com:81" ];
+    };
 
-          Parent proxy, which requires authentication. The same as proxy on the command-line, can be used more than  once  to  specify  unlimited
-          number  of  proxies.  Should  one proxy fail, cntlm automatically moves on to the next one. The connect request fails only if the whole
-          list of proxies is scanned and (for each request) and found to be invalid. Command-line takes precedence over the configuration file.
-        '';
-      };
+    noproxy = mkOption {
+      description = ''
+        A list of domains where the proxy is skipped.
+      '';
+      default = [];
+      example = [ "*.example.com" "example.com" ];
+    };
 
-      port = mkOption {
-        default = [3128];
-        description = "Specifies on which ports the cntlm daemon listens.";
-      };
+    port = mkOption {
+      default = [3128];
+      description = "Specifies on which ports the cntlm daemon listens.";
+    };
 
-     extraConfig = mkOption {
-        type = types.lines;
-        default = "";
-        description = "Verbatim contents of <filename>cntlm.conf</filename>.";
-     };
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = "Additional config appended to the end of the generated <filename>cntlm.conf</filename>.";
+    };
 
+    configText = mkOption {
+       type = types.lines;
+       default = "";
+       description = "Verbatim contents of <filename>cntlm.conf</filename>.";
     };
 
   };
 
-
   ###### implementation
 
-  config = mkIf config.services.cntlm.enable {
+  config = mkIf cfg.enable {
     systemd.services.cntlm = {
       description = "CNTLM is an NTLM / NTLM Session Response / NTLMv2 authenticating HTTP proxy";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
-        Type = "forking";
         User = "cntlm";
         ExecStart = ''
-          ${pkgs.cntlm}/bin/cntlm -U cntlm \
-            -c ${pkgs.writeText "cntlm_config" cfg.extraConfig}
+          ${pkgs.cntlm}/bin/cntlm -U cntlm -c ${configFile} -v -f
         '';
-      };  
+      };
     };
-   
-    services.cntlm.netbios_hostname = mkDefault config.networking.hostName;
-  
-    users.extraUsers.cntlm =  { 
+
+    users.extraUsers.cntlm = {
       name = "cntlm";
       description = "cntlm system-wide daemon";
-      home = "/var/empty";
+      isSystemUser = true;
     };
-
-    services.cntlm.extraConfig =
-      ''
-        # Cntlm Authentication Proxy Configuration
-        Username        ${cfg.username}
-        Domain          ${cfg.domain}
-        Password        ${cfg.password}
-        Workstation     ${cfg.netbios_hostname}
-        ${concatMapStrings (entry: "Proxy ${entry}\n") cfg.proxy}
-    
-        ${concatMapStrings (port: ''
-          Listen ${toString port}
-        '') cfg.port}
-      '';      
   };
-  
 }
diff --git a/nixos/modules/services/networking/hostapd.nix b/nixos/modules/services/networking/hostapd.nix
index fd4545e88e2d..bf2d0916fa36 100644
--- a/nixos/modules/services/networking/hostapd.nix
+++ b/nixos/modules/services/networking/hostapd.nix
@@ -164,7 +164,7 @@ in
         path = [ pkgs.hostapd ];
         wantedBy = [ "network.target" ];
 
-        after = [ "${cfg.interface}-cfg.service" "nat.service" "bind.service" "dhcpd.service"];
+        after = [ "${cfg.interface}-cfg.service" "nat.service" "bind.service" "dhcpd.service" "sys-subsystem-net-devices-${cfg.interface}.device" ];
 
         serviceConfig =
           { ExecStart = "${pkgs.hostapd}/bin/hostapd ${configFile}";
diff --git a/nixos/modules/services/networking/nsd.nix b/nixos/modules/services/networking/nsd.nix
index 0f01b9d4af04..c8b8ed547ebb 100644
--- a/nixos/modules/services/networking/nsd.nix
+++ b/nixos/modules/services/networking/nsd.nix
@@ -811,6 +811,7 @@ in
 
       serviceConfig = {
         ExecStart = "${nsdPkg}/sbin/nsd -d -c ${nsdEnv}/nsd.conf";
+        StandardError = "null";
         PIDFile = pidFile;
         Restart = "always";
         RestartSec = "4s";
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index bebf4e145ca8..0f58536b4b73 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -21,6 +21,8 @@ let
           daemon reads in addition to the the user's authorized_keys file.
           You can combine the <literal>keys</literal> and
           <literal>keyFiles</literal> options.
+          Warning: If you are using <literal>NixOps</literal> then don't use this 
+          option since it will replace the key required for deployment via ssh.
         '';
       };
 
diff --git a/nixos/modules/services/networking/toxvpn.nix b/nixos/modules/services/networking/toxvpn.nix
index 911836fdee42..5e13402d7645 100644
--- a/nixos/modules/services/networking/toxvpn.nix
+++ b/nixos/modules/services/networking/toxvpn.nix
@@ -18,6 +18,13 @@ with lib;
         default     = 33445;
         description = "udp port for toxcore, port-forward to help with connectivity if you run many nodes behind one NAT";
       };
+
+      auto_add_peers = mkOption {
+        type        = types.listOf types.string;
+        default     = [];
+        example     = ''[ "toxid1" "toxid2" ]'';
+        description = "peers to automacally connect to on startup";
+      };
     };
   };
 
@@ -33,8 +40,13 @@ with lib;
         chown toxvpn /run/toxvpn
       '';
 
+      path = [ pkgs.toxvpn ];
+
+      script = ''
+        exec toxvpn -i ${config.services.toxvpn.localip} -l /run/toxvpn/control -u toxvpn -p ${toString config.services.toxvpn.port} ${lib.concatMapStringsSep " " (x: "-a ${x}") config.services.toxvpn.auto_add_peers}
+      '';
+
       serviceConfig = {
-        ExecStart = "${pkgs.toxvpn}/bin/toxvpn -i ${config.services.toxvpn.localip} -l /run/toxvpn/control -u toxvpn -p ${toString config.services.toxvpn.port}";
         KillMode  = "process";
         Restart   = "on-success";
         Type      = "notify";
@@ -43,6 +55,8 @@ with lib;
       restartIfChanged = false; # Likely to be used for remote admin
     };
 
+    environment.systemPackages = [ pkgs.toxvpn ];
+
     users.extraUsers = {
       toxvpn = {
         uid        = config.ids.uids.toxvpn;
diff --git a/nixos/modules/services/search/elasticsearch.nix b/nixos/modules/services/search/elasticsearch.nix
index c76c86b0cadc..2ea22a945940 100644
--- a/nixos/modules/services/search/elasticsearch.nix
+++ b/nixos/modules/services/search/elasticsearch.nix
@@ -188,11 +188,6 @@ in {
         ln -sfT ${cfg.package}/modules ${cfg.dataDir}/modules
         if [ "$(id -u)" = 0 ]; then chown -R elasticsearch ${cfg.dataDir}; fi
       '';
-      postStart = mkBefore ''
-        until ${pkgs.curl.bin}/bin/curl -s -o /dev/null ${cfg.listenAddress}:${toString cfg.port}; do
-          sleep 1
-        done
-      '';
     };
 
     environment.systemPackages = [ cfg.package ];
diff --git a/nixos/modules/services/search/kibana.nix b/nixos/modules/services/search/kibana.nix
index d377a6feeb8e..9d7d2d799189 100644
--- a/nixos/modules/services/search/kibana.nix
+++ b/nixos/modules/services/search/kibana.nix
@@ -5,7 +5,11 @@ with lib;
 let
   cfg = config.services.kibana;
 
-  cfgFile = pkgs.writeText "kibana.json" (builtins.toJSON (
+  atLeast54 = versionAtLeast (builtins.parseDrvName cfg.package.name).version "5.4";
+
+  cfgFile = if atLeast54 then cfgFile5 else cfgFile4;
+
+  cfgFile4 = pkgs.writeText "kibana.json" (builtins.toJSON (
     (filterAttrsRecursive (n: v: v != null) ({
       host = cfg.listenAddress;
       port = cfg.port;
@@ -36,6 +40,27 @@ let
       ];
     } // cfg.extraConf)
   )));
+
+  cfgFile5 = pkgs.writeText "kibana.json" (builtins.toJSON (
+    (filterAttrsRecursive (n: v: v != null) ({
+      server.host = cfg.listenAddress;
+      server.port = cfg.port;
+      server.ssl.certificate = cfg.cert;
+      server.ssl.key = cfg.key;
+
+      kibana.index = cfg.index;
+      kibana.defaultAppId = cfg.defaultAppId;
+
+      elasticsearch.url = cfg.elasticsearch.url;
+      elasticsearch.username = cfg.elasticsearch.username;
+      elasticsearch.password = cfg.elasticsearch.password;
+
+      elasticsearch.ssl.certificate = cfg.elasticsearch.cert;
+      elasticsearch.ssl.key = cfg.elasticsearch.key;
+      elasticsearch.ssl.certificateAuthorities = cfg.elasticsearch.certificateAuthorities;
+    } // cfg.extraConf)
+  )));
+
 in {
   options.services.kibana = {
     enable = mkEnableOption "enable kibana service";
@@ -96,11 +121,29 @@ in {
       };
 
       ca = mkOption {
-        description = "CA file to auth against elasticsearch.";
+        description = ''
+          CA file to auth against elasticsearch.
+
+          It's recommended to use the <option>certificateAuthorities</option> option
+          when using kibana-5.4 or newer.
+        '';
         default = null;
         type = types.nullOr types.path;
       };
 
+      certificateAuthorities = mkOption {
+        description = ''
+          CA files to auth against elasticsearch.
+
+          Please use the <option>ca</option> option when using kibana &lt; 5.4
+          because those old versions don't support setting multiple CA's.
+
+          This defaults to the singleton list [ca] when the <option>ca</option> option is defined.
+        '';
+        default = if isNull cfg.elasticsearch.ca then [] else [ca];
+        type = types.listOf types.path;
+      };
+
       cert = mkOption {
         description = "Certificate file to auth against elasticsearch.";
         default = null;
@@ -118,6 +161,7 @@ in {
       description = "Kibana package to use";
       default = pkgs.kibana;
       defaultText = "pkgs.kibana";
+      example = "pkgs.kibana5";
       type = types.package;
     };
 
diff --git a/nixos/modules/services/web-servers/apache-httpd/default.nix b/nixos/modules/services/web-servers/apache-httpd/default.nix
index ed77e0844769..1c3c7835d961 100644
--- a/nixos/modules/services/web-servers/apache-httpd/default.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/default.nix
@@ -16,7 +16,7 @@ let
 
   phpMajorVersion = head (splitString "." php.version);
 
-  mod_perl = pkgs.mod_perl.override { apacheHttpd = httpd; };
+  mod_perl = pkgs.apacheHttpdPackages.mod_perl.override { apacheHttpd = httpd; };
 
   defaultListen = cfg: if cfg.enableSSL
     then [{ip = "*"; port = 443;}]
diff --git a/nixos/modules/services/web-servers/caddy.nix b/nixos/modules/services/web-servers/caddy.nix
index eec285f6bc44..ee32a1c86d4d 100644
--- a/nixos/modules/services/web-servers/caddy.nix
+++ b/nixos/modules/services/web-servers/caddy.nix
@@ -36,7 +36,11 @@ in
     dataDir = mkOption {
       default = "/var/lib/caddy";
       type = types.path;
-      description = "The data directory, for storing certificates.";
+      description = ''
+        The data directory, for storing certificates. Before 17.09, this
+        would create a .caddy directory. With 17.09 the contents of the
+        .caddy directory are in the specified data directory instead.
+      '';
     };
 
     package = mkOption {
@@ -50,17 +54,32 @@ in
   config = mkIf cfg.enable {
     systemd.services.caddy = {
       description = "Caddy web server";
-      after = [ "network.target" ];
+      after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
+      environment = mkIf (versionAtLeast config.system.stateVersion "17.09")
+        { CADDYPATH = cfg.dataDir; };
       serviceConfig = {
-        ExecStart = ''${cfg.package.bin}/bin/caddy -conf=${configFile} \
-          -ca=${cfg.ca} -email=${cfg.email} ${optionalString cfg.agree "-agree"}
+        ExecStart = ''
+          ${cfg.package.bin}/bin/caddy -root=/var/tmp -conf=${configFile} \
+            -ca=${cfg.ca} -email=${cfg.email} ${optionalString cfg.agree "-agree"}
         '';
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
         Type = "simple";
         User = "caddy";
         Group = "caddy";
+        Restart = "on-failure";
+        StartLimitInterval = 86400;
+        StartLimitBurst = 5;
         AmbientCapabilities = "cap_net_bind_service";
-        LimitNOFILE = 8192;
+        CapabilityBoundingSet = "cap_net_bind_service";
+        NoNewPrivileges = true;
+        LimitNPROC = 64;
+        LimitNOFILE = 1048576;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHome = true;
+        ProtectSystem = "full";
+        ReadWriteDirectories = cfg.dataDir;
       };
     };
 
diff --git a/nixos/modules/services/web-servers/minio.nix b/nixos/modules/services/web-servers/minio.nix
new file mode 100644
index 000000000000..1893edf3a776
--- /dev/null
+++ b/nixos/modules/services/web-servers/minio.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.minio;
+in
+{
+  meta.maintainers = [ maintainers.bachp ];
+
+  options.services.minio = {
+    enable = mkEnableOption "Minio Object Storage";
+
+    listenAddress = mkOption {
+      default = ":9000";
+      type = types.str;
+      description = "Listen on a specific IP address and port.";
+    };
+
+    dataDir = mkOption {
+      default = "/var/lib/minio/data";
+      type = types.path;
+      description = "The data directory, for storing the objects.";
+    };
+
+    configDir = mkOption {
+      default = "/var/lib/minio/config";
+      type = types.path;
+      description = "The config directory, for the access keys and other settings.";
+    };
+
+    package = mkOption {
+      default = pkgs.minio;
+      defaultText = "pkgs.minio";
+      type = types.package;
+      description = "Minio package to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.minio = {
+      description = "Minio Object Storage";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        # Make sure directories exist with correct owner
+        mkdir -p ${cfg.configDir}
+        chown -R minio:minio ${cfg.configDir}
+        mkdir -p ${cfg.dataDir}
+        chown minio:minio ${cfg.dataDir}
+      '';
+      serviceConfig = {
+        PermissionsStartOnly = true;
+        ExecStart = "${cfg.package}/bin/minio server --address ${cfg.listenAddress} --config-dir=${cfg.configDir} ${cfg.dataDir}";
+        Type = "simple";
+        User = "minio";
+        Group = "minio";
+        LimitNOFILE = 65536;
+      };
+    };
+
+    users.extraUsers.minio = {
+      group = "minio";
+      uid = config.ids.uids.minio;
+    };
+
+    users.extraGroups.minio.gid = config.ids.uids.minio;
+  };
+}
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index 5f99c2bcf1f1..c043884f8e0b 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -35,10 +35,10 @@ let
      chmod -R a+w $out/share/gsettings-schemas/nixos-gsettings-overrides
      cat - > $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas/nixos-defaults.gschema.override <<- EOF
        [org.gnome.desktop.background]
-       picture-uri='${pkgs.nixos-artwork}/share/artwork/gnome/Gnome_Dark.png'
+       picture-uri='${pkgs.nixos-artwork.wallpapers.gnome-dark}/share/artwork/gnome/Gnome_Dark.png'
 
        [org.gnome.desktop.screensaver]
-       picture-uri='${pkgs.nixos-artwork}/share/artwork/gnome/Gnome_Dark.png'
+       picture-uri='${pkgs.nixos-artwork.wallpapers.gnome-dark}/share/artwork/gnome/Gnome_Dark.png'
 
        ${cfg.extraGSettingsOverrides}
      EOF
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index 2216104be31a..f099117f4777 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -7,7 +7,7 @@ let
   xcfg = config.services.xserver;
   cfg = xcfg.desktopManager.plasma5;
 
-  inherit (pkgs) kdeWrapper kdeApplications plasma5 libsForQt5 qt5 xorg;
+  inherit (pkgs) kdeApplications plasma5 libsForQt5 qt5 xorg;
 
 in
 
@@ -30,24 +30,12 @@ in
         '';
       };
 
-      extraPackages = mkOption {
-        type = types.listOf types.package;
-        default = [];
-        description = ''
-          KDE packages that need to be installed system-wide.
-        '';
-      };
-
     };
 
   };
 
 
   config = mkMerge [
-    (mkIf (cfg.extraPackages != []) {
-      environment.systemPackages = [ (kdeWrapper cfg.extraPackages) ];
-    })
-
     (mkIf (xcfg.enable && cfg.enable) {
       services.xserver.desktopManager.session = singleton {
         name = "plasma5";
@@ -64,8 +52,8 @@ in
       };
 
       security.wrappers = {
-        kcheckpass.source = "${plasma5.plasma-workspace.out}/lib/libexec/kcheckpass";
-        "start_kdeinit".source = "${pkgs.kinit.out}/lib/libexec/kf5/start_kdeinit";
+        kcheckpass.source = "${lib.getBin plasma5.plasma-workspace}/lib/libexec/kcheckpass";
+        "start_kdeinit".source = "${lib.getBin pkgs.kinit}/lib/libexec/kf5/start_kdeinit";
       };
 
       environment.systemPackages = with pkgs; with qt5; with libsForQt5; with plasma5; with kdeApplications;
@@ -139,10 +127,14 @@ in
           plasma-workspace
           plasma-workspace-wallpapers
 
+          dolphin
           dolphin-plugins
           ffmpegthumbs
           kdegraphics-thumbnailers
+          khelpcenter
           kio-extras
+          konsole
+          oxygen
           print-manager
 
           breeze-icons
@@ -163,16 +155,6 @@ in
         ++ lib.optional config.services.colord.enable colord-kde
         ++ lib.optionals config.services.samba.enable [ kdenetwork-filesharing pkgs.samba ];
 
-      services.xserver.desktopManager.plasma5.extraPackages =
-        with kdeApplications; with plasma5;
-        [
-          khelpcenter
-          oxygen
-
-          dolphin
-          konsole
-        ];
-
       environment.pathsToLink = [ "/share" ];
 
       environment.etc = singleton {
@@ -183,7 +165,6 @@ in
       environment.variables = {
         # Enable GTK applications to load SVG icons
         GDK_PIXBUF_MODULE_FILE = "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache";
-        QT_PLUGIN_PATH = "/run/current-system/sw/lib/qt5/plugins";
       };
 
       fonts.fonts = with pkgs; [ noto-fonts hack-font ];
@@ -209,7 +190,6 @@ in
 
       services.xserver.displayManager.sddm = {
         theme = "breeze";
-        package = pkgs.sddmPlasma5;
       };
 
       security.pam.services.kde = { allowNullPassword = true; };
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 256bfb9ce3f4..1733f2fd39b2 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -111,7 +111,7 @@ in
 
       background = mkOption {
         type = types.str;
-        default = "${pkgs.nixos-artwork}/share/artwork/gnome/Gnome_Dark.png";
+        default = "${pkgs.nixos-artwork.wallpapers.gnome-dark}/share/artwork/gnome/Gnome_Dark.png";
         description = ''
           The background image or color to use.
         '';
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index 2eb7ddcb1ec0..e6cc02e4d491 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -9,7 +9,7 @@ let
   cfg = dmcfg.sddm;
   xEnv = config.systemd.services."display-manager".environment;
 
-  sddm = cfg.package;
+  inherit (pkgs) sddm;
 
   xserverWrapper = pkgs.writeScript "xserver-wrapper" ''
     #!/bin/sh
@@ -37,8 +37,8 @@ let
 
     [Theme]
     Current=${cfg.theme}
-    ThemeDir=${sddm}/share/sddm/themes
-    FacesDir=${sddm}/share/sddm/faces
+    ThemeDir=/run/current-system/sw/share/sddm/themes
+    FacesDir=/run/current-system/sw/share/sddm/faces
 
     [Users]
     MaximumUid=${toString config.ids.uids.nixbld}
@@ -105,15 +105,6 @@ in
         '';
       };
 
-      package = mkOption {
-        type = types.package;
-        default = pkgs.sddm;
-        description = ''
-          The SDDM package to install.
-          The default package can be overridden to provide extra themes.
-        '';
-      };
-
       autoNumlock = mkOption {
         type = types.bool;
         default = false;
@@ -205,7 +196,15 @@ in
     services.xserver.displayManager.job = {
       logsXsession = true;
 
-      execCmd = "exec ${sddm}/bin/sddm";
+      environment = {
+        # Load themes from system environment
+        QT_PLUGIN_PATH = "/run/current-system/sw/" + pkgs.qt5.qtbase.qtPluginPrefix;
+        QML2_IMPORT_PATH = "/run/current-system/sw/" + pkgs.qt5.qtbase.qtQmlPrefix;
+
+        XDG_DATA_DIRS = "/run/current-system/sw/share";
+      };
+
+      execCmd = "exec /run/current-system/sw/bin/sddm";
     };
 
     security.pam.services = {
@@ -254,7 +253,8 @@ in
 
     users.extraGroups.sddm.gid = config.ids.gids.sddm;
 
-    services.dbus.packages = [ sddm.unwrapped ];
+    environment.systemPackages = [ sddm ];
+    services.dbus.packages = [ sddm ];
 
     # To enable user switching, allow sddm to allocate TTYs/displays dynamically.
     services.xserver.tty = null;
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 09fcdd0b72ba..01e1659b30bb 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -648,34 +648,25 @@ in
 
     services.xserver.xkbDir = mkDefault "${pkgs.xkeyboard_config}/etc/X11/xkb";
 
-    system.extraDependencies = [
-      (pkgs.runCommand "xkb-layouts-exist" {
-            layouts=cfg.layout;
-        } ''
-        missing=()
-        while read -d , layout
-        do
-          [[ -f "${cfg.xkbDir}/symbols/$layout" ]] || missing+=($layout)
-        done <<< "$layouts,"
-        if [[ ''${#missing[@]} -eq 0 ]]
-        then
-          touch $out
-          exit 0
-        fi
-
-        cat >&2 <<EOF
-
-        Some of the selected keyboard layouts do not exist:
-
-          ''${missing[@]}
-
-        Set services.xserver.layout to the name of an existing keyboard
-        layout (check ${cfg.xkbDir}/symbols for options).
-
-        EOF
-        exit -1
-      '')
-    ];
+    system.extraDependencies = singleton (pkgs.runCommand "xkb-layouts-exist" {
+      inherit (cfg) layout xkbDir;
+    } ''
+      sed -n -e ':i /^! \(layout\|variant\) *$/ {
+        :l; n; /^!/bi; s/^ *\([^ ]\+\).*/\1/p; tl
+      }' "$xkbDir/rules/base.lst" | grep -qxF "$layout" && exec touch "$out"
+
+      cat >&2 <<-EOF
+
+      The selected keyboard layout definition does not exist:
+
+        $layout
+
+      Set \`services.xserver.layout' to the name of an existing keyboard
+      layout (check $xkbDir/rules/base.lst for options).
+
+      EOF
+      exit 1
+    '');
 
     services.xserver.config =
       ''
diff --git a/nixos/modules/system/boot/loader/grub/grub.nix b/nixos/modules/system/boot/loader/grub/grub.nix
index 1681439a7287..9056121fa7d1 100644
--- a/nixos/modules/system/boot/loader/grub/grub.nix
+++ b/nixos/modules/system/boot/loader/grub/grub.nix
@@ -517,7 +517,7 @@ in
           sha256 = "14kqdx2lfqvh40h6fjjzqgff1mwk74dmbjvmqphi6azzra7z8d59";
         }
         # GRUB 1.97 doesn't support gzipped XPMs.
-        else "${pkgs.nixos-artwork}/share/artwork/gnome/Gnome_Dark.png");
+        else "${pkgs.nixos-artwork.wallpapers.gnome-dark}/share/artwork/gnome/Gnome_Dark.png");
     }
 
     (mkIf cfg.enable {
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
index cb2a17f18f48..779005c0df52 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
@@ -11,35 +11,42 @@ import errno
 import warnings
 import ctypes
 libc = ctypes.CDLL("libc.so.6")
+import re
 
 def copy_if_not_exists(source, dest):
     if not os.path.exists(dest):
         shutil.copyfile(source, dest)
 
-def system_dir(generation):
-    return "/nix/var/nix/profiles/system-%d-link" % (generation)
+def system_dir(profile, generation):
+    if profile:
+        return "/nix/var/nix/profiles/system-profiles/%s-%d-link" % (profile, generation)
+    else:
+        return "/nix/var/nix/profiles/system-%d-link" % (generation)
 
-BOOT_ENTRY = """title NixOS
+BOOT_ENTRY = """title NixOS{profile}
 version Generation {generation}
 linux {kernel}
 initrd {initrd}
 options {kernel_params}
 """
 
-def write_loader_conf(generation):
+def write_loader_conf(profile, generation):
     with open("@efiSysMountPoint@/loader/loader.conf.tmp", 'w') as f:
         if "@timeout@" != "":
             f.write("timeout @timeout@\n")
-        f.write("default nixos-generation-%d\n" % generation)
+        if profile:
+            f.write("default nixos-%s-generation-%d\n" % (profile, generation))
+        else:
+            f.write("default nixos-generation-%d\n" % (generation))
         if not @editor@:
             f.write("editor 0");
     os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf")
 
-def profile_path(generation, name):
-    return os.readlink("%s/%s" % (system_dir(generation), name))
+def profile_path(profile, generation, name):
+    return os.readlink("%s/%s" % (system_dir(profile, generation), name))
 
-def copy_from_profile(generation, name, dry_run=False):
-    store_file_path = profile_path(generation, name)
+def copy_from_profile(profile, generation, name, dry_run=False):
+    store_file_path = profile_path(profile, generation, name)
     suffix = os.path.basename(store_file_path)
     store_dir = os.path.basename(os.path.dirname(store_file_path))
     efi_file_path = "/efi/nixos/%s-%s.efi" % (store_dir, suffix)
@@ -47,22 +54,26 @@ def copy_from_profile(generation, name, dry_run=False):
         copy_if_not_exists(store_file_path, "@efiSysMountPoint@%s" % (efi_file_path))
     return efi_file_path
 
-def write_entry(generation, machine_id):
-    kernel = copy_from_profile(generation, "kernel")
-    initrd = copy_from_profile(generation, "initrd")
+def write_entry(profile, generation, machine_id):
+    kernel = copy_from_profile(profile, generation, "kernel")
+    initrd = copy_from_profile(profile, generation, "initrd")
     try:
-        append_initrd_secrets = profile_path(generation, "append-initrd-secrets")
+        append_initrd_secrets = profile_path(profile, generation, "append-initrd-secrets")
         subprocess.check_call([append_initrd_secrets, "@efiSysMountPoint@%s" % (initrd)])
     except FileNotFoundError:
         pass
-    entry_file = "@efiSysMountPoint@/loader/entries/nixos-generation-%d.conf" % (generation)
-    generation_dir = os.readlink(system_dir(generation))
+    if profile:
+        entry_file = "@efiSysMountPoint@/loader/entries/nixos-%s-generation-%d.conf" % (profile, generation)
+    else:
+        entry_file = "@efiSysMountPoint@/loader/entries/nixos-generation-%d.conf" % (generation)
+    generation_dir = os.readlink(system_dir(profile, generation))
     tmp_path = "%s.tmp" % (entry_file)
     kernel_params = "systemConfig=%s init=%s/init " % (generation_dir, generation_dir)
     with open("%s/kernel-params" % (generation_dir)) as params_file:
         kernel_params = kernel_params + params_file.read()
     with open(tmp_path, 'w') as f:
-        f.write(BOOT_ENTRY.format(generation=generation,
+        f.write(BOOT_ENTRY.format(profile=" [" + profile + "]" if profile else "",
+                    generation=generation,
                     kernel=kernel,
                     initrd=initrd,
                     kernel_params=kernel_params))
@@ -77,29 +88,33 @@ def mkdir_p(path):
         if e.errno != errno.EEXIST or not os.path.isdir(path):
             raise
 
-def get_generations(profile):
+def get_generations(profile=None):
     gen_list = subprocess.check_output([
         "@nix@/bin/nix-env",
         "--list-generations",
         "-p",
-        "/nix/var/nix/profiles/%s" % (profile),
+        "/nix/var/nix/profiles/%s" % ("system-profiles/" + profile if profile else "system"),
         "--option", "build-users-group", ""],
         universal_newlines=True)
     gen_lines = gen_list.split('\n')
     gen_lines.pop()
-    return [ int(line.split()[0]) for line in gen_lines ]
+    return [ (profile, int(line.split()[0])) for line in gen_lines ]
 
 def remove_old_entries(gens):
-    slice_start = len("@efiSysMountPoint@/loader/entries/nixos-generation-")
-    slice_end = -1 * len(".conf")
+    rex_profile = re.compile("^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$")
+    rex_generation = re.compile("^@efiSysMountPoint@/loader/entries/nixos.*-generation-(.*)\.conf$")
     known_paths = []
     for gen in gens:
-        known_paths.append(copy_from_profile(gen, "kernel", True))
-        known_paths.append(copy_from_profile(gen, "initrd", True))
-    for path in glob.iglob("@efiSysMountPoint@/loader/entries/nixos-generation-[1-9]*.conf"):
+        known_paths.append(copy_from_profile(*gen, "kernel", True))
+        known_paths.append(copy_from_profile(*gen, "initrd", True))
+    for path in glob.iglob("@efiSysMountPoint@/loader/entries/nixos*-generation-[1-9]*.conf"):
         try:
-            gen = int(path[slice_start:slice_end])
-            if not gen in gens:
+            if rex_profile.match(path):
+                prof = rex_profile.sub(r"\1", path)
+            else:
+                prof = "system"
+            gen = int(rex_generation.sub(r"\1", path))
+            if not (prof, gen) in gens:
                 os.unlink(path)
         except ValueError:
             pass
@@ -107,6 +122,14 @@ def remove_old_entries(gens):
         if not path in known_paths:
             os.unlink(path)
 
+def get_profiles():
+    if os.path.isdir("/nix/var/nix/profiles/system-profiles/"):
+        return [x
+            for x in os.listdir("/nix/var/nix/profiles/system-profiles/")
+            if not x.endswith("-link")]
+    else:
+        return []
+
 def main():
     parser = argparse.ArgumentParser(description='Update NixOS-related systemd-boot files')
     parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help='The default NixOS config to boot')
@@ -141,12 +164,14 @@ def main():
     mkdir_p("@efiSysMountPoint@/efi/nixos")
     mkdir_p("@efiSysMountPoint@/loader/entries")
 
-    gens = get_generations("system")
+    gens = get_generations()
+    for profile in get_profiles():
+        gens += get_generations(profile)
     remove_old_entries(gens)
     for gen in gens:
-        write_entry(gen, machine_id)
-        if os.readlink(system_dir(gen)) == args.default_config:
-            write_loader_conf(gen)
+        write_entry(*gen, machine_id)
+        if os.readlink(system_dir(*gen)) == args.default_config:
+            write_loader_conf(*gen)
 
     # Since fat32 provides little recovery facilities after a crash,
     # it can leave the system in an unbootable state, when a crash/outage
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index e9a3dca6418a..8ce7b2d2cf36 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -1110,7 +1110,7 @@ in
             '';
 
             # Udev script to execute for a new WLAN interface. The script configures the new WLAN interface.
-            newInterfaceScript = new: pkgs.writeScript "udev-run-script-wlan-interfaces-${new._iName}.sh" ''
+            newInterfaceScript = device: new: pkgs.writeScript "udev-run-script-wlan-interfaces-${new._iName}.sh" ''
               #!${pkgs.stdenv.shell}
               # Configure the new interface
               ${pkgs.iw}/bin/iw dev ${new._iName} set type ${new.type}
@@ -1132,7 +1132,7 @@ in
             # It is important to have that rule first as overwriting the NAME attribute also prevents the
             # next rules from matching.
             ${flip (concatMapStringsSep "\n") (wlanListDeviceFirst device wlanDeviceInterfaces."${device}") (interface:
-            ''ACTION=="add", SUBSYSTEM=="net", ENV{DEVTYPE}=="wlan", ENV{INTERFACE}=="${interface._iName}", ${systemdAttrs interface._iName}, RUN+="${newInterfaceScript interface}"'')}
+            ''ACTION=="add", SUBSYSTEM=="net", ENV{DEVTYPE}=="wlan", ENV{INTERFACE}=="${interface._iName}", ${systemdAttrs interface._iName}, RUN+="${newInterfaceScript device interface}"'')}
 
             # Add the required, new WLAN interfaces to the default WLAN interface with the
             # persistent, default name as assigned by udev.
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index 4217f5940ec6..eca2d281342f 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -72,6 +72,12 @@ in rec {
         (all nixos.tests.ecryptfs)
         (all nixos.tests.ipv6)
         (all nixos.tests.i3wm)
+        (all nixos.tests.keymap.azerty)
+        (all nixos.tests.keymap.colemak)
+        (all nixos.tests.keymap.dvorak)
+        (all nixos.tests.keymap.dvp)
+        (all nixos.tests.keymap.neo)
+        (all nixos.tests.keymap.qwertz)
         (all nixos.tests.plasma5)
         #(all nixos.tests.lightdm)
         (all nixos.tests.login)
diff --git a/nixos/tests/elk.nix b/nixos/tests/elk.nix
new file mode 100644
index 000000000000..65ff1cac070b
--- /dev/null
+++ b/nixos/tests/elk.nix
@@ -0,0 +1,95 @@
+# Test the ELK stack: Elasticsearch, Logstash and Kibana.
+
+import ./make-test.nix ({ pkgs, ...} :
+let
+  esUrl = "http://localhost:9200";
+in {
+  name = "ELK";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ eelco chaoflow offline basvandijk ];
+  };
+
+  nodes = {
+    one =
+      { config, pkgs, ... }: {
+          # Not giving the machine at least 2060MB results in elasticsearch failing with the following error:
+          #
+          #   OpenJDK 64-Bit Server VM warning:
+          #     INFO: os::commit_memory(0x0000000085330000, 2060255232, 0)
+          #     failed; error='Cannot allocate memory' (errno=12)
+          #
+          #   There is insufficient memory for the Java Runtime Environment to continue.
+          #   Native memory allocation (mmap) failed to map 2060255232 bytes for committing reserved memory.
+          #
+          # When setting this to 2500 I got "Kernel panic - not syncing: Out of
+          # memory: compulsory panic_on_oom is enabled" so lets give it even a
+          # bit more room:
+          virtualisation.memorySize = 3000;
+
+          # For querying JSON objects returned from elasticsearch and kibana.
+          environment.systemPackages = [ pkgs.jq ];
+
+          services = {
+            logstash = {
+              enable = true;
+              package = pkgs.logstash5;
+              inputConfig = ''
+                exec { command => "echo -n flowers" interval => 1 type => "test" }
+                exec { command => "echo -n dragons" interval => 1 type => "test" }
+              '';
+              filterConfig = ''
+                if [message] =~ /dragons/ {
+                  drop {}
+                }
+              '';
+              outputConfig = ''
+                file {
+                  path => "/tmp/logstash.out"
+                  codec => line { format => "%{message}" }
+                }
+                elasticsearch {
+                  hosts => [ "${esUrl}" ]
+                }
+              '';
+            };
+
+            elasticsearch = {
+              enable = true;
+              package = pkgs.elasticsearch5;
+            };
+
+            kibana = {
+              enable = true;
+              package = pkgs.kibana5;
+              elasticsearch.url = esUrl;
+            };
+          };
+        };
+    };
+
+  testScript = ''
+    startAll;
+
+    $one->waitForUnit("elasticsearch.service");
+
+    # Continue as long as the status is not "red". The status is probably
+    # "yellow" instead of "green" because we are using a single elasticsearch
+    # node which elasticsearch considers risky.
+    #
+    # TODO: extend this test with multiple elasticsearch nodes and see if the status turns "green".
+    $one->waitUntilSucceeds("curl --silent --show-error '${esUrl}/_cluster/health' | jq .status | grep -v red");
+
+    # Perform some simple logstash tests.
+    $one->waitForUnit("logstash.service");
+    $one->waitUntilSucceeds("cat /tmp/logstash.out | grep flowers");
+    $one->waitUntilSucceeds("cat /tmp/logstash.out | grep -v dragons");
+
+    # See if kibana is healthy.
+    $one->waitForUnit("kibana.service");
+    $one->waitUntilSucceeds("curl --silent --show-error 'http://localhost:5601/api/status' | jq .status.overall.state | grep green");
+
+    # See if logstash messages arive in elasticsearch.
+    $one->waitUntilSucceeds("curl --silent --show-error '${esUrl}/_search' -H 'Content-Type: application/json' -d '{\"query\" : { \"match\" : { \"message\" : \"flowers\"}}}' | jq .hits.total | grep -v 0");
+    $one->waitUntilSucceeds("curl --silent --show-error '${esUrl}/_search' -H 'Content-Type: application/json' -d '{\"query\" : { \"match\" : { \"message\" : \"dragons\"}}}' | jq .hits.total | grep 0");
+  '';
+})
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index 6dce6f407cda..85d31334d6bf 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -221,7 +221,7 @@ let
                 docbook5_xsl
                 unionfs-fuse
                 ntp
-                nixos-artwork
+                nixos-artwork.wallpapers.gnome-dark
                 perlPackages.XMLLibXML
                 perlPackages.ListCompare
 
diff --git a/nixos/tests/logstash.nix b/nixos/tests/logstash.nix
deleted file mode 100644
index 01f6a0358b2e..000000000000
--- a/nixos/tests/logstash.nix
+++ /dev/null
@@ -1,41 +0,0 @@
-# This test runs logstash and checks if messages flows and
-# elasticsearch is started.
-
-import ./make-test.nix ({ pkgs, ...} : {
-  name = "logstash";
-  meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow offline ];
-  };
-
-  nodes = {
-    one =
-      { config, pkgs, ... }:
-        {
-          services = {
-            logstash = {
-              enable = true;
-              inputConfig = ''
-                exec { command => "echo flowers" interval => 1 type => "test" }
-                exec { command => "echo dragons" interval => 1 type => "test" }
-              '';
-              filterConfig = ''
-                if [message] =~ /dragons/ {
-                  drop {}
-                }
-              '';
-              outputConfig = ''
-                stdout { codec => rubydebug }
-              '';
-            };
-          };
-        };
-    };
-
-  testScript = ''
-    startAll;
-
-    $one->waitForUnit("logstash.service");
-    $one->waitUntilSucceeds("journalctl -n 20 _SYSTEMD_UNIT=logstash.service | grep flowers");
-    $one->fail("journalctl -n 20 _SYSTEMD_UNIT=logstash.service | grep dragons");
-  '';
-})
diff --git a/nixos/tests/minio.nix b/nixos/tests/minio.nix
new file mode 100644
index 000000000000..462a3bc4768c
--- /dev/null
+++ b/nixos/tests/minio.nix
@@ -0,0 +1,19 @@
+import ./make-test.nix ({ pkgs, ...} : {
+  name = "minio";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ bachp ];
+  };
+
+  machine = { config, pkgs, ... }: {
+    services.minio.enable = true;
+  };
+
+  testScript =
+    ''
+      startAll;
+      $machine->waitForUnit("minio.service");
+      $machine->waitForOpenPort(9000);
+      $machine->succeed("curl --fail http://localhost:9000/minio/index.html");
+      $machine->shutdown;
+    '';
+})
diff --git a/nixos/tests/sddm.nix b/nixos/tests/sddm.nix
index 82be9bc1d727..1ce2b8157842 100644
--- a/nixos/tests/sddm.nix
+++ b/nixos/tests/sddm.nix
@@ -24,7 +24,7 @@ let
         user = nodes.machine.config.users.extraUsers.alice;
       in ''
         startAll;
-        $machine->waitForText(qr/ALICE/);
+        $machine->waitForText(qr/select your user/i);
         $machine->screenshot("sddm");
         $machine->sendChars("${user.password}\n");
         $machine->waitForFile("/home/alice/.Xauthority");