about summary refs log tree commit diff
path: root/nixos/modules
diff options
context:
space:
mode:
authorVladimír Čunát <vcunat@gmail.com>2015-12-31 09:47:26 +0100
committerVladimír Čunát <vcunat@gmail.com>2015-12-31 09:53:02 +0100
commitf9f6f41bff2213e199bded515e9b66d1e5c4d7dd (patch)
tree29c5a75228e31f305f42c5b761709a186e406776 /nixos/modules
parentbbcf127c7c9029cba43493d7d25a9d1c65d59152 (diff)
parent468f698f609e123bb0ffae67181d07ac99eb2204 (diff)
downloadnixlib-f9f6f41bff2213e199bded515e9b66d1e5c4d7dd.tar
nixlib-f9f6f41bff2213e199bded515e9b66d1e5c4d7dd.tar.gz
nixlib-f9f6f41bff2213e199bded515e9b66d1e5c4d7dd.tar.bz2
nixlib-f9f6f41bff2213e199bded515e9b66d1e5c4d7dd.tar.lz
nixlib-f9f6f41bff2213e199bded515e9b66d1e5c4d7dd.tar.xz
nixlib-f9f6f41bff2213e199bded515e9b66d1e5c4d7dd.tar.zst
nixlib-f9f6f41bff2213e199bded515e9b66d1e5c4d7dd.zip
Merge branch 'master' into closure-size
TODO: there was more significant refactoring of qtbase and plasma 5.5
on master, and I'm deferring pointing to correct outputs to later.
Diffstat (limited to 'nixos/modules')
-rw-r--r--nixos/modules/config/power-management.nix1
-rw-r--r--nixos/modules/config/system-path.nix2
-rw-r--r--nixos/modules/installer/cd-dvd/iso-image.nix16
-rw-r--r--nixos/modules/installer/tools/auto-upgrade.nix13
-rw-r--r--nixos/modules/misc/ids.nix2
-rw-r--r--nixos/modules/misc/nixos.xml2
-rw-r--r--nixos/modules/module-list.nix2
-rw-r--r--nixos/modules/rename.nix2
-rw-r--r--nixos/modules/security/acme.nix202
-rw-r--r--nixos/modules/security/acme.xml69
-rw-r--r--nixos/modules/services/audio/mopidy.nix17
-rw-r--r--nixos/modules/services/cluster/fleet.nix4
-rw-r--r--nixos/modules/services/computing/slurm/slurm.nix56
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/default.nix35
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/job-builder.nix2
-rw-r--r--nixos/modules/services/hardware/udisks2.nix2
-rw-r--r--nixos/modules/services/mail/dovecot.nix262
-rw-r--r--nixos/modules/services/networking/avahi-daemon.nix88
-rw-r--r--nixos/modules/services/networking/hostapd.nix47
-rw-r--r--nixos/modules/services/networking/miniupnpd.nix35
-rw-r--r--nixos/modules/services/networking/murmur.nix15
-rw-r--r--nixos/modules/services/networking/networkmanager.nix2
-rw-r--r--nixos/modules/services/networking/shairport-sync.nix80
-rw-r--r--nixos/modules/services/printing/cupsd.nix2
-rw-r--r--nixos/modules/services/security/clamav.nix135
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix3
-rw-r--r--nixos/modules/services/x11/desktop-managers/kde5.nix111
-rw-r--r--nixos/modules/services/x11/display-managers/default.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix15
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix47
-rw-r--r--nixos/modules/services/x11/redshift.nix17
-rw-r--r--nixos/modules/services/x11/xserver.nix35
-rw-r--r--nixos/modules/system/boot/loader/grub/grub.nix62
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix4
-rw-r--r--nixos/modules/tasks/network-interfaces-scripted.nix4
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix2
-rw-r--r--nixos/modules/virtualisation/amazon-init.nix1
-rw-r--r--nixos/modules/virtualisation/azure-agent.nix6
-rw-r--r--nixos/modules/virtualisation/docker.nix47
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix18
40 files changed, 1086 insertions, 381 deletions
diff --git a/nixos/modules/config/power-management.nix b/nixos/modules/config/power-management.nix
index dedc8a3f6793..fbd7867a0953 100644
--- a/nixos/modules/config/power-management.nix
+++ b/nixos/modules/config/power-management.nix
@@ -71,7 +71,6 @@ in
 
     # FIXME: Implement powersave governor for sandy bridge or later Intel CPUs
     powerManagement.cpuFreqGovernor = mkDefault "ondemand";
-    powerManagement.scsiLinkPolicy = mkDefault "min_power";
 
     systemd.targets.post-resume = {
       description = "Post-Resume Actions";
diff --git a/nixos/modules/config/system-path.nix b/nixos/modules/config/system-path.nix
index 6b4cc9ebb7d8..3df7d7cdac4f 100644
--- a/nixos/modules/config/system-path.nix
+++ b/nixos/modules/config/system-path.nix
@@ -134,7 +134,7 @@ in
       # !!! Hacky, should modularise.
       postBuild =
         ''
-          if [ -x $out/bin/update-mime-database -a -w $out/share/mime/packages ]; then
+          if [ -x $out/bin/update-mime-database -a -w $out/share/mime ]; then
               XDG_DATA_DIRS=$out/share $out/bin/update-mime-database -V $out/share/mime > /dev/null
           fi
 
diff --git a/nixos/modules/installer/cd-dvd/iso-image.nix b/nixos/modules/installer/cd-dvd/iso-image.nix
index fa9cc6fa20b9..d3353ee7d64d 100644
--- a/nixos/modules/installer/cd-dvd/iso-image.nix
+++ b/nixos/modules/installer/cd-dvd/iso-image.nix
@@ -43,6 +43,13 @@ let
     LINUX /boot/bzImage
     APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
     INITRD /boot/initrd
+
+    # A variant to boot with 'nomodeset'
+    LABEL boot-nomodeset
+    MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (with nomodeset)
+    LINUX /boot/bzImage
+    APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
+    INITRD /boot/initrd
   '';
 
   isolinuxMemtest86Entry = ''
@@ -59,10 +66,18 @@ let
     mkdir -p $out/EFI/boot
     cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
     mkdir -p $out/loader/entries
+
     echo "title NixOS Live CD" > $out/loader/entries/nixos-livecd.conf
     echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf
     echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf
     echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf
+
+    # A variant to boot with 'nomodeset'
+    echo "title NixOS Live CD (with nomodeset)" > $out/loader/entries/nixos-livecd-nomodeset.conf
+    echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd-nomodeset.conf
+    echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd-nomodeset.conf
+    echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset" >> $out/loader/entries/nixos-livecd-nomodeset.conf
+
     echo "default nixos-livecd" > $out/loader/loader.conf
     echo "timeout ${builtins.toString config.boot.loader.gummiboot.timeout}" >> $out/loader/loader.conf
   '';
@@ -230,7 +245,6 @@ in
     boot.kernelParams =
       [ "root=LABEL=${config.isoImage.volumeID}"
         "boot.shell_on_fail"
-        "nomodeset"
       ];
 
     fileSystems."/" =
diff --git a/nixos/modules/installer/tools/auto-upgrade.nix b/nixos/modules/installer/tools/auto-upgrade.nix
index 1fa9282b909e..ca51de0fb8c7 100644
--- a/nixos/modules/installer/tools/auto-upgrade.nix
+++ b/nixos/modules/installer/tools/auto-upgrade.nix
@@ -42,6 +42,17 @@ let cfg = config.system.autoUpgrade; in
         '';
       };
 
+      dates = mkOption {
+        default = "04:40";
+        type = types.str;
+        description = ''
+          Specification (in the format described by
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry>) of the time at
+          which the update will occur.
+        '';
+      };
+
     };
 
   };
@@ -73,7 +84,7 @@ let cfg = config.system.autoUpgrade; in
         ${config.system.build.nixos-rebuild}/bin/nixos-rebuild switch ${toString cfg.flags}
       '';
 
-      startAt = mkIf cfg.enable "04:40";
+      startAt = optionalString cfg.enable cfg.dates;
     };
 
   };
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index 6ff95605d4b2..7a7ed2f4408c 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -238,6 +238,7 @@
       heapster = 214;
       bepasty = 215;
       pumpio = 216;
+      nm-openvpn = 217;
 
       # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
 
@@ -453,6 +454,7 @@
       calibre-server = 213;
       bepasty = 215;
       pumpio = 216;
+      nm-openvpn = 217;
 
       # When adding a gid, make sure it doesn't match an existing
       # uid. Users and groups with the same name should have equal
diff --git a/nixos/modules/misc/nixos.xml b/nixos/modules/misc/nixos.xml
index f8d3b4bc6e33..064bdd80b3c9 100644
--- a/nixos/modules/misc/nixos.xml
+++ b/nixos/modules/misc/nixos.xml
@@ -35,7 +35,7 @@ nixos.path = ./nixpkgs-16-03/nixos;
 
 <para>Another option is to fetch a specific version of NixOS, with either
 the <literal>fetchTarball</literal> builtin, or the
-<literal>pkgs.fetchFromGithub</literal> function and use the result as an
+<literal>pkgs.fetchFromGitHub</literal> function and use the result as an
 input.
 
 <programlisting>
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 963daf721ad3..5c1cde98d3dc 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -80,6 +80,7 @@
   ./programs/xfs_quota.nix
   ./programs/zsh/zsh.nix
   ./rename.nix
+  ./security/acme.nix
   ./security/apparmor.nix
   ./security/apparmor-suid.nix
   ./security/ca.nix
@@ -343,6 +344,7 @@
   ./services/networking/searx.nix
   ./services/networking/seeks.nix
   ./services/networking/skydns.nix
+  ./services/networking/shairport-sync.nix
   ./services/networking/shout.nix
   ./services/networking/softether.nix
   ./services/networking/spiped.nix
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 28ac1c3e888a..2a3d89e9f6f2 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -75,6 +75,8 @@ with lib;
     # DNSCrypt-proxy
     (mkRenamedOptionModule [ "services" "dnscrypt-proxy" "port" ] [ "services" "dnscrypt-proxy" "localPort" ])
 
+    (mkRenamedOptionModule [ "services" "hostapd" "extraCfg" ] [ "services" "hostapd" "extraConfig" ])
+
     # Options that are obsolete and have no replacement.
     (mkRemovedOptionModule [ "boot" "initrd" "luks" "enable" ])
     (mkRemovedOptionModule [ "programs" "bash" "enable" ])
diff --git a/nixos/modules/security/acme.nix b/nixos/modules/security/acme.nix
new file mode 100644
index 000000000000..2de57dd68cba
--- /dev/null
+++ b/nixos/modules/security/acme.nix
@@ -0,0 +1,202 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.security.acme;
+
+  certOpts = { ... }: {
+    options = {
+      webroot = mkOption {
+        type = types.str;
+        description = ''
+          Where the webroot of the HTTP vhost is located.
+          <filename>.well-known/acme-challenge/</filename> directory
+          will be created automatically if it doesn't exist.
+          <literal>http://example.org/.well-known/acme-challenge/</literal> must also
+          be available (notice unencrypted HTTP).
+        '';
+      };
+
+      email = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Contact email address for the CA to be able to reach you.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "root";
+        description = "User running the ACME client.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "root";
+        description = "Group running the ACME client.";
+      };
+
+      postRun = mkOption {
+        type = types.lines;
+        default = "";
+        example = "systemctl reload nginx.service";
+        description = ''
+          Commands to run after certificates are re-issued. Typically
+          the web server and other servers using certificates need to
+          be reloaded.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.listOf (types.enum [
+          "cert.der" "cert.pem" "chain.der" "chain.pem" "external_pem.sh"
+          "fullchain.der" "fullchain.pem" "key.der" "key.pem" "account_key.json"
+        ]);
+        default = [ "fullchain.pem" "key.pem" "account_key.json" ];
+        description = ''
+          Plugins to enable. With default settings simp_le will
+          store public certificate bundle in <filename>fullchain.pem</filename>
+          and private key in <filename>key.pem</filename> in its state directory.
+        '';
+      };
+
+      extraDomains = mkOption {
+        type = types.attrsOf (types.nullOr types.str);
+        default = {};
+        example = {
+          "example.org" = "/srv/http/nginx";
+          "mydomain.org" = null;
+        };
+        description = ''
+          Extra domain names for which certificates are to be issued, with their
+          own server roots if needed.
+        '';
+      };
+    };
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    security.acme = {
+      directory = mkOption {
+        default = "/var/lib/acme";
+        type = types.str;
+        description = ''
+          Directory where certs and other state will be stored by default.
+        '';
+      };
+
+      validMin = mkOption {
+        type = types.int;
+        default = 30 * 24 * 3600;
+        description = "Minimum remaining validity before renewal in seconds.";
+      };
+
+      renewInterval = mkOption {
+        type = types.str;
+        default = "weekly";
+        description = ''
+          Systemd calendar expression when to check for renewal. See
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry>.
+        '';
+      };
+
+      certs = mkOption {
+        default = { };
+        type = types.loaOf types.optionSet;
+        description = ''
+          Attribute set of certificates to get signed and renewed.
+        '';
+        options = [ certOpts ];
+        example = {
+          "example.com" = {
+            webroot = "/var/www/challenges/";
+            email = "foo@example.com";
+            extraDomains = { "www.example.com" = null; "foo.example.com" = "/var/www/foo/"; };
+          };
+          "bar.example.com" = {
+            webroot = "/var/www/challenges/";
+            email = "bar@example.com";
+          };
+        };
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkMerge [
+    (mkIf (cfg.certs != { }) {
+
+      systemd.services = flip mapAttrs' cfg.certs (cert: data:
+        let
+          cpath = "${cfg.directory}/${cert}";
+          cmdline = [ "-v" "-d" cert "--default_root" data.webroot "--valid_min" cfg.validMin ]
+                    ++ optionals (data.email != null) [ "--email" data.email ]
+                    ++ concatMap (p: [ "-f" p ]) data.plugins
+                    ++ concatLists (mapAttrsToList (name: root: [ "-d" (if root == null then name else "${name}:${root}")]) data.extraDomains);
+
+        in nameValuePair
+        ("acme-${cert}")
+        ({
+          description = "ACME cert renewal for ${cert} using simp_le";
+          after = [ "network.target" ];
+          serviceConfig = {
+            Type = "oneshot";
+            SuccessExitStatus = [ "0" "1" ];
+            PermissionsStartOnly = true;
+            User = data.user;
+            Group = data.group;
+            PrivateTmp = true;
+          };
+          path = [ pkgs.simp_le ];
+          preStart = ''
+            mkdir -p '${cfg.directory}'
+            if [ ! -d '${cpath}' ]; then
+              mkdir -m 700 '${cpath}'
+              chown '${data.user}:${data.group}' '${cpath}'
+            fi
+          '';
+          script = ''
+            cd '${cpath}'
+            set +e
+            simp_le ${concatMapStringsSep " " (arg: escapeShellArg (toString arg)) cmdline}
+            EXITCODE=$?
+            set -e
+            echo "$EXITCODE" > /tmp/lastExitCode
+            exit "$EXITCODE"
+          '';
+          postStop = ''
+            if [ -e /tmp/lastExitCode ] && [ "$(cat /tmp/lastExitCode)" = "0" ]; then
+              echo "Executing postRun hook..."
+              ${data.postRun}
+            fi
+          '';
+        })
+      );
+
+      systemd.timers = flip mapAttrs' cfg.certs (cert: data: nameValuePair
+        ("acme-${cert}")
+        ({
+          description = "timer for ACME cert renewal of ${cert}";
+          wantedBy = [ "timers.target" ];
+          timerConfig = {
+            OnCalendar = cfg.renewInterval;
+            Unit = "acme-${cert}.service";
+          };
+        })
+      );
+    })
+
+    { meta.maintainers = with lib.maintainers; [ abbradar fpletz globin ];
+      meta.doc = ./acme.xml;
+    }
+  ];
+
+}
diff --git a/nixos/modules/security/acme.xml b/nixos/modules/security/acme.xml
new file mode 100644
index 000000000000..e32fa72c9393
--- /dev/null
+++ b/nixos/modules/security/acme.xml
@@ -0,0 +1,69 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="module-security-acme">
+
+<title>SSL/TLS Certificates with ACME</title>
+
+<para>NixOS supports automatic domain validation &amp; certificate
+retrieval and renewal using the ACME protocol. This is currently only
+implemented by and for Let's Encrypt. The alternative ACME client
+<literal>simp_le</literal> is used under the hood.</para>
+
+<section><title>Prerequisites</title>
+
+<para>You need to have a running HTTP server for verification. The server must
+have a webroot defined that can serve
+<filename>.well-known/acme-challenge</filename>. This directory must be
+writeable by the user that will run the ACME client.</para>
+
+<para>For instance, this generic snippet could be used for Nginx:
+
+<programlisting>
+http {
+  server {
+    server_name _;
+    listen 80;
+    listen [::]:80;
+
+    location /.well-known/acme-challenge {
+      root /var/www/challenges;
+    }
+
+    location / {
+      return 301 https://$host$request_uri;
+    }
+  }
+}
+</programlisting>
+</para>
+
+</section>
+
+<section><title>Configuring</title>
+
+<para>To enable ACME certificate retrieval &amp; renewal for a certificate for
+<literal>foo.example.com</literal>, add the following in your
+<filename>configuration.nix</filename>:
+
+<programlisting>
+security.acme.certs."foo.example.com" = {
+  webroot = "/var/www/challenges";
+  email = "foo@example.com";
+};
+</programlisting>
+</para>
+
+<para>The private key <filename>key.pem</filename> and certificate
+<filename>fullchain.pem</filename> will be put into
+<filename>/var/lib/acme/foo.example.com</filename>. The target directory can
+be configured with the option <literal>security.acme.directory</literal>.
+</para>
+
+<para>Refer to <xref linkend="ch-options" /> for all available configuration
+options for the <literal>security.acme</literal> module.</para>
+
+</section>
+
+</chapter>
diff --git a/nixos/modules/services/audio/mopidy.nix b/nixos/modules/services/audio/mopidy.nix
index a7a7e8ae688b..9981b065f28d 100644
--- a/nixos/modules/services/audio/mopidy.nix
+++ b/nixos/modules/services/audio/mopidy.nix
@@ -11,17 +11,8 @@ let
 
   mopidyConf = writeText "mopidy.conf" cfg.configuration;
 
-  mopidyLauncher = stdenv.mkDerivation {
-    name = "mopidy-launcher";
-    phases = [ "installPhase" ];
-    buildInputs = [ makeWrapper python ];
-    installPhase = ''
-      mkdir -p $out/bin
-      ln -s ${mopidy}/bin/mopidy $out/bin/mopidy
-      wrapProgram $out/bin/mopidy \
-        --prefix PYTHONPATH : \
-        "${concatStringsSep ":" (map (p: "$(toPythonPath ${p})") cfg.extensionPackages)}"
-    '';
+  mopidyEnv = python.buildEnv.override {
+    extraLibs = [ mopidy ] ++ cfg.extensionPackages;
   };
 
 in {
@@ -86,7 +77,7 @@ in {
       description = "mopidy music player daemon";
       preStart = "mkdir -p ${cfg.dataDir} && chown -R mopidy:mopidy  ${cfg.dataDir}";
       serviceConfig = {
-        ExecStart = "${mopidyLauncher}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)}";
+        ExecStart = "${mopidyEnv}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)}";
         User = "mopidy";
         PermissionsStartOnly = true;
       };
@@ -96,7 +87,7 @@ in {
       description = "mopidy local files scanner";
       preStart = "mkdir -p ${cfg.dataDir} && chown -R mopidy:mopidy  ${cfg.dataDir}";
       serviceConfig = {
-        ExecStart = "${mopidyLauncher}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)} local scan";
+        ExecStart = "${mopidyEnv}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)} local scan";
         User = "mopidy";
         PermissionsStartOnly = true;
         Type = "oneshot";
diff --git a/nixos/modules/services/cluster/fleet.nix b/nixos/modules/services/cluster/fleet.nix
index 04d95fbf186b..78d4ea93c491 100644
--- a/nixos/modules/services/cluster/fleet.nix
+++ b/nixos/modules/services/cluster/fleet.nix
@@ -90,7 +90,7 @@ in {
 
     extraConfig = mkOption {
       type = types.attrsOf types.str;
-      apply = mapAttrs' (n: v: nameValuePair ("ETCD_" + n) v);
+      apply = mapAttrs' (n: v: nameValuePair ("FLEET_" + n) v);
       default = {};
       example = literalExample ''
         {
@@ -120,7 +120,7 @@ in {
         FLEET_PUBLIC_IP = cfg.publicIp;
         FLEET_ETCD_CAFILE = cfg.etcdCafile;
         FLEET_ETCD_KEYFILE = cfg.etcdKeyfile;
-        FEELT_ETCD_CERTFILE = cfg.etcdCertfile;
+        FLEET_ETCD_CERTFILE = cfg.etcdCertfile;
         FLEET_METADATA = cfg.metadata;
       } // cfg.extraConfig;
       serviceConfig = {
diff --git a/nixos/modules/services/computing/slurm/slurm.nix b/nixos/modules/services/computing/slurm/slurm.nix
index 019d7fbb16cd..cf00d8946557 100644
--- a/nixos/modules/services/computing/slurm/slurm.nix
+++ b/nixos/modules/services/computing/slurm/slurm.nix
@@ -34,6 +34,15 @@ in
 
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.slurm-llnl;
+        example = literalExample "pkgs.slurm-llnl-full";
+        description = ''
+          The packge to use for slurm binaries.
+        '';
+      };
+
       controlMachine = mkOption {
         type = types.nullOr types.str;
         default = null;
@@ -91,38 +100,69 @@ in
 
   ###### implementation
 
-  config = mkIf (cfg.client.enable || cfg.server.enable) {
+  config =
+    let
+      wrappedSlurm = pkgs.stdenv.mkDerivation {
+        name = "wrappedSlurm";
+
+        propagatedBuildInputs = [ cfg.package configFile ];
+
+        builder = pkgs.writeText "builder.sh" ''
+          source $stdenv/setup
+          mkdir -p $out/bin
+          find  ${cfg.package}/bin -type f -executable | while read EXE
+          do
+            exename="$(basename $EXE)"
+            wrappername="$out/bin/$exename"
+            cat > "$wrappername" <<EOT
+          #!/bin/sh
+          if [ -z "$SLURM_CONF" ]
+          then
+            SLURM_CONF="${configFile}" "$EXE" "\$@"
+          else
+            "$EXE" "\$0"
+          fi
+          EOT
+            chmod +x "$wrappername"
+          done
+        '';
+      };
 
-    environment.systemPackages = [ pkgs.slurm-llnl ];
+  in mkIf (cfg.client.enable || cfg.server.enable) {
+
+    environment.systemPackages = [ wrappedSlurm ];
 
     systemd.services.slurmd = mkIf (cfg.client.enable) {
-      path = with pkgs; [ slurm-llnl coreutils ];
+      path = with pkgs; [ wrappedSlurm coreutils ];
 
       wantedBy = [ "multi-user.target" ];
       after = [ "systemd-tmpfiles-clean.service" ];
 
       serviceConfig = {
         Type = "forking";
-        ExecStart = "${pkgs.slurm-llnl}/bin/slurmd -f ${configFile}";
+        ExecStart = "${wrappedSlurm}/bin/slurmd";
         PIDFile = "/run/slurmd.pid";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
       };
+
+      preStart = ''
+        mkdir -p /var/spool
+      '';
     };
 
     systemd.services.slurmctld = mkIf (cfg.server.enable) {
-      path = with pkgs; [ slurm-llnl munge coreutils ];
+      path = with pkgs; [ wrappedSlurm munge coreutils ];
       
       wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" "auditd.service" "munged.service" "slurmdbd.service" ];
+      after = [ "network.target" "munged.service" ];
       requires = [ "munged.service" ];
 
       serviceConfig = {
         Type = "forking";
-        ExecStart = "${pkgs.slurm-llnl}/bin/slurmctld";
+        ExecStart = "${wrappedSlurm}/bin/slurmctld";
         PIDFile = "/run/slurmctld.pid";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
       };
-      environment = { SLURM_CONF = "${configFile}"; };
     };
 
   };
diff --git a/nixos/modules/services/continuous-integration/jenkins/default.nix b/nixos/modules/services/continuous-integration/jenkins/default.nix
index 0568b1af7d5c..ac64e8616647 100644
--- a/nixos/modules/services/continuous-integration/jenkins/default.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/default.nix
@@ -48,11 +48,33 @@ in {
         '';
       };
 
+      listenAddress = mkOption {
+        default = "0.0.0.0";
+        example = "localhost";
+        type = types.str;
+        description = ''
+          Specifies the bind address on which the jenkins HTTP interface listens.
+          The default is the wildcard address.
+        '';
+      };
+
       port = mkOption {
         default = 8080;
         type = types.int;
         description = ''
-          Specifies port number on which the jenkins HTTP interface listens. The default is 8080.
+          Specifies port number on which the jenkins HTTP interface listens.
+          The default is 8080.
+        '';
+      };
+
+      prefix = mkOption {
+        default = "";
+        example = "/jenkins";
+        type = types.str;
+        description = ''
+          Specifies a urlPrefix to use with jenkins.
+          If the example /jenkins is given, the jenkins server will be
+          accessible using localhost:8080/jenkins.
         '';
       };
 
@@ -80,7 +102,7 @@ in {
       extraOptions = mkOption {
         type = types.listOf types.str;
         default = [ ];
-        example = [ "--debug=9" "--httpListenAddress=localhost" ];
+        example = [ "--debug=9" ];
         description = ''
           Additional command line arguments to pass to Jenkins.
         '';
@@ -134,15 +156,18 @@ in {
       '';
 
       script = ''
-        ${pkgs.jdk}/bin/java -jar ${pkgs.jenkins} --httpPort=${toString cfg.port} ${concatStringsSep " " cfg.extraOptions}
+        ${pkgs.jdk}/bin/java -jar ${pkgs.jenkins} --httpListenAddress=${cfg.listenAddress} \
+                                                  --httpPort=${toString cfg.port} \
+                                                  --prefix=${cfg.prefix} \
+                                                  ${concatStringsSep " " cfg.extraOptions}
       '';
 
       postStart = ''
-        until ${pkgs.curl.bin}/bin/curl -s -L localhost:${toString cfg.port} ; do
+        until ${pkgs.curl.bin}/bin/curl -s -L ${cfg.listenAddress}:${toString cfg.port}${cfg.prefix} ; do
           sleep 10
         done
         while true ; do
-          index=`${pkgs.curl.bin}/bin/curl -s -L localhost:${toString cfg.port}`
+          index=`${pkgs.curl.bin}/bin/curl -s -L ${cfg.listenAddress}:${toString cfg.port}${cfg.prefix}`
           if [[ !("$index" =~ 'Please wait while Jenkins is restarting' ||
                   "$index" =~ 'Please wait while Jenkins is getting ready to work') ]]; then
             exit 0
diff --git a/nixos/modules/services/continuous-integration/jenkins/job-builder.nix b/nixos/modules/services/continuous-integration/jenkins/job-builder.nix
index ec15a6a3d706..702d452279f8 100644
--- a/nixos/modules/services/continuous-integration/jenkins/job-builder.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/job-builder.nix
@@ -144,7 +144,7 @@ in {
             done
 
             echo "Asking Jenkins to reload config"
-            curl --silent -X POST http://localhost:${toString jenkinsCfg.port}/reload
+            curl --silent -X POST http://${jenkinsCfg.listenAddress}:${toString jenkinsCfg.port}${jenkinsCfg.prefix}/reload
           '';
       serviceConfig = {
         User = jenkinsCfg.user;
diff --git a/nixos/modules/services/hardware/udisks2.nix b/nixos/modules/services/hardware/udisks2.nix
index fd6d8886348e..ad5dc8e8a49b 100644
--- a/nixos/modules/services/hardware/udisks2.nix
+++ b/nixos/modules/services/hardware/udisks2.nix
@@ -39,7 +39,7 @@ with lib;
         mkdir -m 0755 -p /var/lib/udisks2
       '';
 
-    #services.udev.packages = [ pkgs.udisks2 ];
+    services.udev.packages = [ pkgs.udisks2 ];
     
     systemd.services.udisks2 = {
       description = "Udisks2 service";
diff --git a/nixos/modules/services/mail/dovecot.nix b/nixos/modules/services/mail/dovecot.nix
index fca0d2a7f616..7ca4faae5d4f 100644
--- a/nixos/modules/services/mail/dovecot.nix
+++ b/nixos/modules/services/mail/dovecot.nix
@@ -3,137 +3,178 @@
 with lib;
 
 let
-
   cfg = config.services.dovecot2;
+  dovecotPkg = cfg.package;
 
-  dovecotConf =
-    ''
-      base_dir = /var/run/dovecot2/
+  baseDir = "/run/dovecot2";
+  stateDir = "/var/lib/dovecot";
 
-      protocols = ${optionalString cfg.enableImap "imap"} ${optionalString cfg.enablePop3 "pop3"} ${optionalString cfg.enableLmtp "lmtp"}
+  protocols = concatStrings [
+    (optionalString cfg.enableImap "imap")
+    (optionalString cfg.enablePop3 "pop3")
+    (optionalString cfg.enableLmtp "lmtp")
+  ];
+
+  dovecotConf = concatStrings [
     ''
-    + (if cfg.sslServerCert!="" then
+      base_dir = ${baseDir}
+      protocols = ${protocols}
     ''
+
+    (if isNull cfg.sslServerCert then ''
+      ssl = no
+      disable_plaintext_auth = no
+    '' else ''
       ssl_cert = <${cfg.sslServerCert}
       ssl_key = <${cfg.sslServerKey}
-      ssl_ca = <${cfg.sslCACert}
+      ${optionalString (!(isNull cfg.sslCACert)) ("ssl_ca = <" + cfg.sslCACert)}
       disable_plaintext_auth = yes
-    '' else ''
-      ssl = no
-      disable_plaintext_auth = no
     '')
 
-    + ''
+    ''
       default_internal_user = ${cfg.user}
 
       mail_location = ${cfg.mailLocation}
 
       maildir_copy_with_hardlinks = yes
+      pop3_uidl_format = %08Xv%08Xu
 
       auth_mechanisms = plain login
+
       service auth {
         user = root
       }
+    ''
+
+    (optionalString cfg.enablePAM ''
       userdb {
         driver = passwd
       }
+
       passdb {
         driver = pam
         args = ${optionalString cfg.showPAMFailure "failure_show_msg=yes"} dovecot2
       }
+    '')
 
-      pop3_uidl_format = %08Xv%08Xu
-    '' + cfg.extraConfig;
+    cfg.extraConfig
+  ];
 
-in
+  modulesDir = pkgs.symlinkJoin "dovecot-modules"
+    (map (module: "${module}/lib/dovecot") cfg.modules);
 
+in
 {
 
-  ###### interface
-
-  options = {
+  options.services.dovecot2 = {
+    enable = mkEnableOption "Dovecot 2.x POP3/IMAP server";
 
-    services.dovecot2 = {
+    enablePop3 = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Start the POP3 listener (when Dovecot is enabled).";
+    };
 
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable the Dovecot 2.x POP3/IMAP server.";
-      };
+    enableImap = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Start the IMAP listener (when Dovecot is enabled).";
+    };
 
-      enablePop3 = mkOption {
-        default = true;
-        description = "Start the POP3 listener (when Dovecot is enabled).";
-      };
+    enableLmtp = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Start the LMTP listener (when Dovecot is enabled).";
+    };
 
-      enableImap = mkOption {
-        default = true;
-        description = "Start the IMAP listener (when Dovecot is enabled).";
-      };
+    package = mkOption {
+      type = types.package;
+      default = pkgs.dovecot22;
+      description = "Dovecot package to use.";
+    };
 
-      enableLmtp = mkOption {
-        default = false;
-        description = "Start the LMTP listener (when Dovecot is enabled).";
-      };
+    user = mkOption {
+      type = types.str;
+      default = "dovecot2";
+      description = "Dovecot user name.";
+    };
 
-      user = mkOption {
-        default = "dovecot2";
-        description = "Dovecot user name.";
-      };
+    group = mkOption {
+      type = types.str;
+      default = "dovecot2";
+      description = "Dovecot group name.";
+    };
 
-      group = mkOption {
-        default = "dovecot2";
-        description = "Dovecot group name.";
-      };
+    extraConfig = mkOption {
+      type = types.str;
+      default = "";
+      example = "mail_debug = yes";
+      description = "Additional entries to put verbatim into Dovecot's config file.";
+    };
 
-      extraConfig = mkOption {
-        default = "";
-        example = "mail_debug = yes";
-        description = "Additional entries to put verbatim into Dovecot's config file.";
-      };
+    configFile = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Config file used for the whole dovecot configuration.";
+      apply = v: if v != null then v else pkgs.writeText "dovecot.conf" dovecotConf;
+    };
 
-      configFile = mkOption {
-        default = null;
-        description = "Config file used for the whole dovecot configuration.";
-        apply = v: if v != null then v else pkgs.writeText "dovecot.conf" dovecotConf;
-      };
+    mailLocation = mkOption {
+      type = types.str;
+      default = "maildir:/var/spool/mail/%u"; /* Same as inbox, as postfix */
+      example = "maildir:~/mail:INBOX=/var/spool/mail/%u";
+      description = ''
+        Location that dovecot will use for mail folders. Dovecot mail_location option.
+      '';
+    };
 
-      mailLocation = mkOption {
-        default = "maildir:/var/spool/mail/%u"; /* Same as inbox, as postfix */
-        example = "maildir:~/mail:INBOX=/var/spool/mail/%u";
-        description = ''
-          Location that dovecot will use for mail folders. Dovecot mail_location option.
-        '';
-      };
+    modules = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = [ pkgs.dovecot_pigeonhole ];
+      description = ''
+        Symlinks the contents of lib/dovecot of every given package into
+        /var/lib/dovecot/modules. This will make the given modules available
+        if a dovecot package with the module_dir patch applied (like
+        pkgs.dovecot22, the default) is being used.
+      '';
+    };
 
-      sslServerCert = mkOption {
-        default = "";
-        description = "Server certificate";
-      };
+    sslCACert = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Path to the server's CA certificate key.";
+    };
 
-      sslCACert = mkOption {
-        default = "";
-        description = "CA certificate used by the server certificate.";
-      };
+    sslServerCert = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Path to the server's public key.";
+    };
 
-      sslServerKey = mkOption {
-        default = "";
-        description = "Server key.";
-      };
+    sslServerKey = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Path to the server's private key.";
+    };
 
-      showPAMFailure = mkOption {
-        default = false;
-        description = "Show the PAM failure message on authentication error (useful for OTPW).";
-      };
+    enablePAM = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Wether to create a own Dovecot PAM service and configure PAM user logins.";
     };
 
+    showPAMFailure = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Show the PAM failure message on authentication error (useful for OTPW).";
+    };
   };
 
 
-  ###### implementation
-
-  config = mkIf config.services.dovecot2.enable {
+  config = mkIf cfg.enable {
 
-    security.pam.services.dovecot2 = {};
+    security.pam.services.dovecot2 = mkIf cfg.enablePAM {};
 
     users.extraUsers = [
       { name = cfg.user;
@@ -148,36 +189,47 @@ in
       }
     ];
 
-    users.extraGroups = singleton
-      { name = cfg.group;
-        gid = config.ids.gids.dovecot2;
-      };
-
-    systemd.services.dovecot2 =
-      { description = "Dovecot IMAP/POP3 server";
-
-        after = [ "network.target" ];
-        wantedBy = [ "multi-user.target" ];
-
-        preStart =
-          ''
-            ${pkgs.coreutils}/bin/mkdir -p /var/run/dovecot2 /var/run/dovecot2/login
-            ${pkgs.coreutils}/bin/chown -R ${cfg.user}:${cfg.group} /var/run/dovecot2
-          '';
-
-        serviceConfig = {
-          ExecStart = "${pkgs.dovecot}/sbin/dovecot -F -c ${cfg.configFile}";
-          Restart = "on-failure";
-          RestartSec = "1s";
-          StartLimitInterval = "1min";
-        };
+    users.extraGroups = singleton {
+      name = cfg.group;
+      gid = config.ids.gids.dovecot2;
+    };
 
+    systemd.services.dovecot2 = {
+      description = "Dovecot IMAP/POP3 server";
+
+      after = [ "keys.target" "network.target" ];
+      wants = [ "keys.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        mkdir -p "${baseDir}/login"
+        chown -R ${cfg.user}:${cfg.group} "${baseDir}"
+        rm -f "${stateDir}/modules"
+        ln -s "${modulesDir}" "${stateDir}/modules"
+      '';
+
+      serviceConfig = {
+        ExecStart = "${dovecotPkg}/sbin/dovecot -F -c ${cfg.configFile}";
+        Restart = "on-failure";
+        RestartSec = "1s";
+        StartLimitInterval = "1min";
       };
+    };
 
-    environment.systemPackages = [ pkgs.dovecot ];
+    environment.systemPackages = [ dovecotPkg ];
 
-    assertions = [{ assertion = cfg.enablePop3 || cfg.enableImap;
-                    message = "dovecot needs at least one of the IMAP or POP3 listeners enabled";}];
+    assertions = [
+      { assertion = cfg.enablePop3 || cfg.enableImap;
+        message = "dovecot needs at least one of the IMAP or POP3 listeners enabled";
+      }
+      { assertion = isNull cfg.sslServerCert == isNull cfg.sslServerKey
+          && (!(isNull cfg.sslCACert) -> !(isNull cfg.sslServerCert || isNull cfg.sslServerKey));
+        message = "dovecot needs both sslServerCert and sslServerKey defined for working crypto";
+      }
+      { assertion = cfg.showPAMFailure -> cfg.enablePAM;
+        message = "dovecot is configured with showPAMFailure while enablePAM is disabled";
+      }
+    ];
 
   };
 
diff --git a/nixos/modules/services/networking/avahi-daemon.nix b/nixos/modules/services/networking/avahi-daemon.nix
index 284b2b84e6c7..8b178ee93980 100644
--- a/nixos/modules/services/networking/avahi-daemon.nix
+++ b/nixos/modules/services/networking/avahi-daemon.nix
@@ -1,5 +1,5 @@
 # Avahi daemon.
-{ config, lib, pkgs, ... }:
+{ config, lib, utils, pkgs, ... }:
 
 with lib;
 
@@ -7,7 +7,9 @@ let
 
   cfg = config.services.avahi;
 
-  inherit (pkgs) avahi;
+  # We must escape interfaces due to the systemd interpretation
+  subsystemDevice = interface:
+    "sys-subsystem-net-devices-${utils.escapeSystemdPath interface}.device";
 
   avahiDaemonConf = with cfg; pkgs.writeText "avahi-daemon.conf" ''
     [server]
@@ -21,12 +23,18 @@ let
     browse-domains=${concatStringsSep ", " browseDomains}
     use-ipv4=${if ipv4 then "yes" else "no"}
     use-ipv6=${if ipv6 then "yes" else "no"}
+    ${optionalString (interfaces!=null) "allow-interfaces=${concatStringsSep "," interfaces}"}
 
     [wide-area]
     enable-wide-area=${if wideArea then "yes" else "no"}
 
     [publish]
-    disable-publishing=${if publishing then "no" else "yes"}
+    disable-publishing=${if publish.enable then "no" else "yes"}
+    disable-user-service-publishing=${if publish.userServices then "no" else "yes"}
+    publish-addresses=${if publish.userServices || publish.addresses then "yes" else "no"}
+    publish-hinfo=${if publish.hinfo then "yes" else "no"}
+    publish-workstation=${if publish.workstation then "yes" else "no"}
+    publish-domain=${if publish.domain then "yes" else "no"}
   '';
 
 in
@@ -74,14 +82,55 @@ in
         description = ''Whether to use IPv6'';
       };
 
+      interfaces = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        description = ''
+          List of network interfaces that should be used by the <command>avahi-daemon</command>.
+          Other interfaces will be ignored. If <literal>null</literal> all local interfaces
+          except loopback and point-to-point will be used.
+        '';
+      };
+
       wideArea = mkOption {
         default = true;
         description = ''Whether to enable wide-area service discovery.'';
       };
 
-      publishing = mkOption {
-        default = true;
-        description = ''Whether to allow publishing.'';
+      publish = {
+        enable = mkOption {
+          default = false;
+          description = ''Whether to allow publishing in general.'';
+        };
+
+        userServices = mkOption {
+          default = false;
+          description = ''Whether to publish user services. Will set <literal>addresses=true</literal>.'';
+        };
+
+        addresses = mkOption {
+          default = false;
+          description = ''Whether to register mDNS address records for all local IP addresses.'';
+        };
+
+        hinfo = mkOption {
+          default = false;
+          description = ''
+            Whether to register an mDNS HINFO record which contains information about the
+            local operating system and CPU.
+          '';
+        };
+
+        workstation = mkOption {
+          default = false;
+          description = ''Whether to register a service of type "_workstation._tcp" on the local LAN.'';
+        };
+
+        domain = mkOption {
+          default = false;
+          description = ''Whether to announce the locally used domain name for browsing by other hosts.'';
+        };
+
       };
 
       nssmdns = mkOption {
@@ -118,29 +167,36 @@ in
 
     system.nssModules = optional cfg.nssmdns pkgs.nssmdns;
 
-    environment.systemPackages = [ avahi ];
+    environment.systemPackages = [ pkgs.avahi ];
 
-    jobs.avahi_daemon =
-      { name = "avahi-daemon";
+    systemd.services.avahi-daemon =
+      let
+        deps = optionals (cfg.interfaces!=null) (map subsystemDevice cfg.interfaces);
+      in
+      { description = "Avahi daemon";
+        wantedBy = [ "ip-up.target" ];
+        bindsTo = deps;
+        after = deps;
+        before = [ "ip-up.target" ];
+        # Receive restart event after resume
+        partOf = [ "post-resume.target" ];
 
-        startOn = "ip-up";
+        path = [ pkgs.coreutils pkgs.avahi ];
+
+        preStart = "mkdir -p /var/run/avahi-daemon";
 
         script =
           ''
-            export PATH="${avahi}/bin:${avahi}/sbin:$PATH"
-
             # Make NSS modules visible so that `avahi_nss_support ()' can
             # return a sensible value.
             export LD_LIBRARY_PATH="${config.system.nssModules.path}"
 
-            mkdir -p /var/run/avahi-daemon
-
-            exec ${avahi}/sbin/avahi-daemon --syslog -f "${avahiDaemonConf}"
+            exec ${pkgs.avahi}/sbin/avahi-daemon --syslog -f "${avahiDaemonConf}"
           '';
       };
 
     services.dbus.enable = true;
-    services.dbus.packages = [avahi];
+    services.dbus.packages = [ pkgs.avahi ];
 
     # Enabling Avahi without exposing it in the firewall doesn't make
     # sense.
diff --git a/nixos/modules/services/networking/hostapd.nix b/nixos/modules/services/networking/hostapd.nix
index 5a6ca139ddad..287964aab072 100644
--- a/nixos/modules/services/networking/hostapd.nix
+++ b/nixos/modules/services/networking/hostapd.nix
@@ -2,21 +2,17 @@
 
 # TODO:
 #
-# asserts 
+# asserts
 #   ensure that the nl80211 module is loaded/compiled in the kernel
-#   hwMode must be a/b/g
-#   channel must be between 1 and 13 (maybe)
 #   wpa_supplicant and hostapd on the same wireless interface doesn't make any sense
-#   perhaps an assertion that there is a dhcp server and a dns server on the IP address serviced by the hostapd?
 
 with lib;
 
 let
 
   cfg = config.services.hostapd;
-  
-  configFile = pkgs.writeText "hostapd.conf"  
-    ''
+
+  configFile = pkgs.writeText "hostapd.conf" ''
     interface=${cfg.interface}
     driver=${cfg.driver}
     ssid=${cfg.ssid}
@@ -37,8 +33,8 @@ let
       wpa_passphrase=${cfg.wpaPassphrase}
       '' else ""}
 
-    ${cfg.extraCfg}
-    '' ;
+    ${cfg.extraConfig}
+  '' ;
 
 in
 
@@ -65,9 +61,9 @@ in
 
       interface = mkOption {
         default = "";
-        example = "wlan0";
+        example = "wlp2s0";
         description = ''
-          The interfaces <command>hostapd</command> will use. 
+          The interfaces <command>hostapd</command> will use.
         '';
       };
 
@@ -89,8 +85,7 @@ in
       };
 
       hwMode = mkOption {
-        default = "b";
-        example = "g";
+        default = "g";
         type = types.string;
         description = ''
           Operation mode.
@@ -98,17 +93,16 @@ in
         '';
       };
 
-      channel = mkOption { 
+      channel = mkOption {
         default = 7;
         example = 11;
         type = types.int;
-        description = 
-          ''
+        description = ''
           Channel number (IEEE 802.11)
           Please note that some drivers do not use this value from
           <command>hostapd</command> and the channel will need to be configured
           separately with <command>iwconfig</command>.
-          '';
+        '';
       };
 
       group = mkOption {
@@ -131,16 +125,15 @@ in
         default = "my_sekret";
         example = "any_64_char_string";
         type = types.string;
-        description = 
-          ''
+        description = ''
           WPA-PSK (pre-shared-key) passphrase. Clients will need this
           passphrase to associate with this access point.
           Warning: This passphrase will get put into a world-readable file in
           the Nix store!
-          '';
+        '';
       };
 
-      extraCfg = mkOption {
+      extraConfig = mkOption {
         default = "";
         example = ''
           auth_algo=0
@@ -158,17 +151,25 @@ in
 
   config = mkIf cfg.enable {
 
+    assertions = [
+      { assertion = (cfg.hwMode == "a" || cfg.hwMode == "b" || cfg.hwMode == "g");
+        message = "hwMode must be a/b/g";
+      }
+      { assertion = (cfg.channel >= 1 && cfg.channel <= 13);
+        message = "channel must be between 1 and 13";
+      }];
+
     environment.systemPackages =  [ pkgs.hostapd ];
 
     systemd.services.hostapd =
       { description = "hostapd wireless AP";
 
-        path = [ pkgs.hostapd ]; 
+        path = [ pkgs.hostapd ];
         wantedBy = [ "network.target" ];
 
         after = [ "${cfg.interface}-cfg.service" "nat.service" "bind.service" "dhcpd.service"];
 
-        serviceConfig = 
+        serviceConfig =
           { ExecStart = "${pkgs.hostapd}/bin/hostapd ${configFile}";
             Restart = "always";
           };
diff --git a/nixos/modules/services/networking/miniupnpd.nix b/nixos/modules/services/networking/miniupnpd.nix
index e654eb80b177..19400edb68f9 100644
--- a/nixos/modules/services/networking/miniupnpd.nix
+++ b/nixos/modules/services/networking/miniupnpd.nix
@@ -30,7 +30,7 @@ in
 
       internalIPs = mkOption {
         type = types.listOf types.str;
-        example = [ "192.168.1.0/24" ];
+        example = [ "192.168.1.1/24" "enp1s0" ];
         description = ''
           The IP address ranges to listen on.
         '';
@@ -57,13 +57,42 @@ in
   };
 
   config = mkIf cfg.enable {
+    # from miniupnpd/netfilter/iptables_init.sh
+    networking.firewall.extraCommands = ''
+      iptables -t nat -N MINIUPNPD
+      iptables -t nat -A PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
+      iptables -t mangle -N MINIUPNPD
+      iptables -t mangle -A PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
+      iptables -t filter -N MINIUPNPD
+      iptables -t filter -A FORWARD -i ${cfg.externalInterface} ! -o ${cfg.externalInterface} -j MINIUPNPD
+      iptables -t nat -N MINIUPNPD-PCP-PEER
+      iptables -t nat -A POSTROUTING -o ${cfg.externalInterface} -j MINIUPNPD-PCP-PEER
+    '';
+
+    # from miniupnpd/netfilter/iptables_removeall.sh
+    networking.firewall.extraStopCommands = ''
+      iptables -t nat -F MINIUPNPD
+      iptables -t nat -D PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
+      iptables -t nat -X MINIUPNPD
+      iptables -t mangle -F MINIUPNPD
+      iptables -t mangle -D PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
+      iptables -t mangle -X MINIUPNPD
+      iptables -t filter -F MINIUPNPD
+      iptables -t filter -D FORWARD -i ${cfg.externalInterface} ! -o ${cfg.externalInterface} -j MINIUPNPD
+      iptables -t filter -X MINIUPNPD
+      iptables -t nat -F MINIUPNPD-PCP-PEER
+      iptables -t nat -D POSTROUTING -o ${cfg.externalInterface} -j MINIUPNPD-PCP-PEER
+      iptables -t nat -X MINIUPNPD-PCP-PEER
+    '';
+
     systemd.services.miniupnpd = {
       description = "MiniUPnP daemon";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.miniupnpd ];
       serviceConfig = {
-        ExecStart = "${pkgs.miniupnpd}/bin/miniupnpd -d -f ${configFile}";
+        ExecStart = "${pkgs.miniupnpd}/bin/miniupnpd -f ${configFile}";
+        PIDFile = "/var/run/miniupnpd.pid";
+        Type = "forking";
       };
     };
   };
diff --git a/nixos/modules/services/networking/murmur.nix b/nixos/modules/services/networking/murmur.nix
index 4f91a4947479..1cc19a2c9e09 100644
--- a/nixos/modules/services/networking/murmur.nix
+++ b/nixos/modules/services/networking/murmur.nix
@@ -39,6 +39,9 @@ let
     certrequired=${if cfg.clientCertRequired then "true" else "false"}
     ${if cfg.sslCert == "" then "" else "sslCert="+cfg.sslCert}
     ${if cfg.sslKey  == "" then "" else "sslKey="+cfg.sslKey}
+    ${if cfg.sslCa   == "" then "" else "sslCA="+cfg.sslCa}
+    
+    ${cfg.extraConfig}
   '';
 in
 {
@@ -219,6 +222,18 @@ in
         default = "";
         description = "Path to your SSL key.";
       };
+
+      sslCa = mkOption {
+        type = types.str;
+        default = "";
+        description = "Path to your SSL CA certificate.";
+      };
+
+      extraConfig = mkOption {
+        type = types.str;
+        default = "";
+        description = "Extra configuration to put into mumur.ini.";
+      };
     };
   };
 
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index 7df194fa419b..8ab4cfcc114a 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -223,9 +223,11 @@ in {
     }
     {
       name = "nm-openvpn";
+      gid = config.ids.gids.nm-openvpn;
     }];
     users.extraUsers = [{
       name = "nm-openvpn";
+      uid = config.ids.uids.nm-openvpn;
     }];
 
     systemd.packages = cfg.packages;
diff --git a/nixos/modules/services/networking/shairport-sync.nix b/nixos/modules/services/networking/shairport-sync.nix
new file mode 100644
index 000000000000..a523e66d09b9
--- /dev/null
+++ b/nixos/modules/services/networking/shairport-sync.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.shairport-sync;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.shairport-sync = {
+
+      enable = mkOption {
+        default = false;
+        description = ''
+          Enable the shairport-sync daemon.
+
+          Running with a local system-wide or remote pulseaudio server
+          is recommended.
+        '';
+      };
+
+      arguments = mkOption {
+        default = "-v -o pulse";
+        description = ''
+          Arguments to pass to the daemon. Defaults to a local pulseaudio
+          server.
+        '';
+      };
+
+      user = mkOption {
+        default = "shairport";
+        description = ''
+          User account name under which to run shairport-sync. The account
+          will be created.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.shairport-sync.enable {
+
+    services.avahi.enable = true;
+
+    users.extraUsers = singleton
+      { name = cfg.user;
+        description = "Shairport user";
+        isSystemUser = true;
+        createHome = true;
+        home = "/var/lib/shairport-sync";
+        extraGroups = [ "audio" ] ++ optional config.hardware.pulseaudio.enable "pulse";
+      };
+
+    systemd.services.shairport-sync =
+      {
+        description = "shairport-sync";
+        after = [ "network.target" "avahi-daemon.service" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = cfg.user;
+          ExecStart = "${pkgs.shairport-sync}/bin/shairport-sync ${cfg.arguments}";
+        };
+      };
+
+    environment.systemPackages = [ pkgs.shairport-sync ];
+
+  };
+
+}
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index 69c76cf97cfd..0fe25b66da08 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -247,6 +247,8 @@ in
 
         wantedBy = [ "multi-user.target" ];
         wants = [ "cups.service" "avahi-daemon.service" ];
+        bindsTo = [ "cups.service" "avahi-daemon.service" ];
+        partOf = [ "cups.service" "avahi-daemon.service" ];
         after = [ "cups.service" "avahi-daemon.service" ];
 
         path = [ cups ];
diff --git a/nixos/modules/services/security/clamav.nix b/nixos/modules/services/security/clamav.nix
index a4d54301fc17..548aee29b266 100644
--- a/nixos/modules/services/security/clamav.nix
+++ b/nixos/modules/services/security/clamav.nix
@@ -3,78 +3,115 @@ with lib;
 let
   clamavUser = "clamav";
   stateDir = "/var/lib/clamav";
+  runDir = "/var/run/clamav";
+  logDir = "/var/log/clamav";
   clamavGroup = clamavUser;
   cfg = config.services.clamav;
+  clamdConfigFile = pkgs.writeText "clamd.conf" ''
+    DatabaseDirectory ${stateDir}
+    LocalSocket ${runDir}/clamd.ctl
+    LogFile ${logDir}/clamav.log
+    PidFile ${runDir}/clamd.pid
+    User clamav
+
+    ${cfg.daemon.extraConfig}
+  '';
 in
 {
-  ###### interface
-
   options = {
-
     services.clamav = {
+      daemon = {
+        enable = mkEnableOption "clamd daemon";
+
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          description = ''
+            Extra configuration for clamd. Contents will be added verbatim to the
+            configuration file.
+          '';
+        };
+      };
       updater = {
-	enable = mkOption {
-	  default = false;
-	  description = ''
-	    Whether to enable automatic ClamAV virus definitions database updates.
-	  '';
-	};
+        enable = mkEnableOption "freshclam updater";
 
-	frequency = mkOption {
-	  default = 12;
-	  description = ''
-	    Number of database checks per day.
-	  '';
-	};
+        frequency = mkOption {
+          default = 12;
+          description = ''
+            Number of database checks per day.
+          '';
+        };
 
-	config = mkOption {
-	  default = "";
-	  description = ''
-	    Extra configuration for freshclam. Contents will be added verbatim to the
-	    configuration file.
-	  '';
-	};
+        config = mkOption {
+          default = "";
+          description = ''
+            Extra configuration for freshclam. Contents will be added verbatim to the
+            configuration file.
+          '';
+        };
       };
     };
   };
 
-  ###### implementation
-
-  config = mkIf cfg.updater.enable {
+  config = mkIf cfg.updater.enable or cfg.daemon.enable {
     environment.systemPackages = [ pkgs.clamav ];
-    users.extraUsers = singleton
-      { name = clamavUser;
-        uid = config.ids.uids.clamav;
-        description = "ClamAV daemon user";
-        home = stateDir;
-      };
+    users.extraUsers = singleton {
+      name = clamavUser;
+      uid = config.ids.uids.clamav;
+      description = "ClamAV daemon user";
+      home = stateDir;
+    };
 
-    users.extraGroups = singleton
-      { name = clamavGroup;
-        gid = config.ids.gids.clamav;
-      };
+    users.extraGroups = singleton {
+      name = clamavGroup;
+      gid = config.ids.gids.clamav;
+    };
 
-    services.clamav.updater.config = ''
+    services.clamav.updater.config = mkIf cfg.updater.enable ''
       DatabaseDirectory ${stateDir}
       Foreground yes
       Checks ${toString cfg.updater.frequency}
       DatabaseMirror database.clamav.net
     '';
 
-    jobs = {
-      clamav_updater = {
-	name = "clamav-updater";
-          startOn = "started network-interfaces";
-          stopOn = "stopping network-interfaces";
-
-          preStart = ''
-            mkdir -m 0755 -p ${stateDir}
-            chown ${clamavUser}:${clamavGroup} ${stateDir}
-          '';
-          exec = "${pkgs.clamav}/bin/freshclam --daemon --config-file=${pkgs.writeText "freshclam.conf" cfg.updater.config}";
-      }; 
+    systemd.services.clamd = mkIf cfg.daemon.enable {
+      description = "ClamAV daemon (clamd)";
+      path = [ pkgs.clamav ];
+      after = [ "network.target" "freshclam.service" ];
+      requires = [ "freshclam.service" ];
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        mkdir -m 0755 -p ${logDir}
+        mkdir -m 0755 -p ${runDir}
+        chown ${clamavUser}:${clamavGroup} ${logDir}
+        chown ${clamavUser}:${clamavGroup} ${runDir}
+      '';
+      serviceConfig = {
+        ExecStart = "${pkgs.clamav}/bin/clamd --config-file=${clamdConfigFile}";
+        Type = "forking";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "on-failure";
+        RestartSec = "10s";
+        StartLimitInterval = "1min";
+      };
     };
 
+    systemd.services.freshclam = mkIf cfg.updater.enable {
+      description = "ClamAV updater (freshclam)";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.clamav ];
+      preStart = ''
+        mkdir -m 0755 -p ${stateDir}
+        chown ${clamavUser}:${clamavGroup} ${stateDir}
+      '';
+      serviceConfig = {
+        ExecStart = "${pkgs.clamav}/bin/freshclam --daemon --config-file=${pkgs.writeText "freshclam.conf" cfg.updater.config}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "on-failure";
+        RestartSec = "10s";
+        StartLimitInterval = "1min";
+      };
+    };
   };
-
 }
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index e32d6669b046..b9c97a54fd2c 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -140,9 +140,6 @@ in {
           # Update user dirs as described in http://freedesktop.org/wiki/Software/xdg-user-dirs/
           ${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update
 
-          # Find the mouse
-          export XCURSOR_PATH=~/.icons:${config.system.path}/share/icons
-
           ${gnome3.gnome_session}/bin/gnome-session&
           waitPID=$!
         '';
diff --git a/nixos/modules/services/x11/desktop-managers/kde5.nix b/nixos/modules/services/x11/desktop-managers/kde5.nix
index 0f3cb5735e75..2aeb4f67d771 100644
--- a/nixos/modules/services/x11/desktop-managers/kde5.nix
+++ b/nixos/modules/services/x11/desktop-managers/kde5.nix
@@ -8,9 +8,7 @@ let
   cfg = xcfg.desktopManager.kde5;
   xorg = pkgs.xorg;
 
-  kf5 = pkgs.kf5_stable;
-  plasma5 = pkgs.plasma5_stable;
-  kdeApps = pkgs.kdeApps_stable;
+  kde5 = pkgs.kde5;
 
 in
 
@@ -57,12 +55,12 @@ in
     services.xserver.desktopManager.session = singleton {
       name = "kde5";
       bgSupport = true;
-      start = ''exec ${plasma5.plasma-workspace}/bin/startkde;'';
+      start = ''exec ${kde5.plasma-workspace}/bin/startkde;'';
     };
 
     security.setuidOwners = singleton {
       program = "kcheckpass";
-      source = "${plasma5.plasma-workspace}/lib/libexec/kcheckpass";
+      source = "${kde5.plasma-workspace}/lib/libexec/kcheckpass";
       owner = "root";
       group = "root";
       setuid = true;
@@ -72,61 +70,61 @@ in
       [
         pkgs.qt4 # qtconfig is the only way to set Qt 4 theme
 
-        kf5.frameworkintegration
-        kf5.kinit
-
-        plasma5.breeze
-        plasma5.kde-cli-tools
-        plasma5.kdeplasma-addons
-        plasma5.kgamma5
-        plasma5.khelpcenter
-        plasma5.khotkeys
-        plasma5.kinfocenter
-        plasma5.kmenuedit
-        plasma5.kscreen
-        plasma5.ksysguard
-        plasma5.kwayland
-        plasma5.kwin
-        plasma5.kwrited
-        plasma5.milou
-        plasma5.oxygen
-        plasma5.polkit-kde-agent
-        plasma5.systemsettings
-
-        plasma5.plasma-desktop
-        plasma5.plasma-workspace
-        plasma5.plasma-workspace-wallpapers
-
-        kdeApps.ark
-        kdeApps.dolphin
-        kdeApps.dolphin-plugins
-        kdeApps.ffmpegthumbs
-        kdeApps.gwenview
-        kdeApps.kate
-        kdeApps.kdegraphics-thumbnailers
-        kdeApps.konsole
-        kdeApps.okular
-        kdeApps.print-manager
+        kde5.frameworkintegration
+        kde5.kinit
+
+        kde5.breeze
+        kde5.kde-cli-tools
+        kde5.kdeplasma-addons
+        kde5.kgamma5
+        kde5.khelpcenter
+        kde5.khotkeys
+        kde5.kinfocenter
+        kde5.kmenuedit
+        kde5.kscreen
+        kde5.ksysguard
+        kde5.kwayland
+        kde5.kwin
+        kde5.kwrited
+        kde5.milou
+        kde5.oxygen
+        kde5.polkit-kde-agent
+        kde5.systemsettings
+
+        kde5.plasma-desktop
+        kde5.plasma-workspace
+        kde5.plasma-workspace-wallpapers
+
+        kde5.ark
+        kde5.dolphin
+        kde5.dolphin-plugins
+        kde5.ffmpegthumbs
+        kde5.gwenview
+        kde5.kate
+        kde5.kdegraphics-thumbnailers
+        kde5.konsole
+        kde5.okular
+        kde5.print-manager
 
         # Oxygen icons moved to KDE Frameworks 5.16 and later.
-        (kdeApps.oxygen-icons or kf5.oxygen-icons5)
+        (kde5.oxygen-icons or kde5.oxygen-icons5)
         pkgs.hicolor_icon_theme
 
-        plasma5.kde-gtk-config
+        kde5.kde-gtk-config
       ]
 
       # Plasma 5.5 and later has a Breeze GTK theme.
       # If it is not available, Orion is very similar to Breeze.
-      ++ lib.optional (!(lib.hasAttr "breeze-gtk" plasma5)) pkgs.orion
+      ++ lib.optional (!(lib.hasAttr "breeze-gtk" kde5)) pkgs.orion
 
       # Install Breeze icons if available
-      ++ lib.optional (lib.hasAttr "breeze-icons" kf5) kf5.breeze-icons
+      ++ lib.optional (lib.hasAttr "breeze-icons" kde5) kde5.breeze-icons
 
       # Optional hardware support features
-      ++ lib.optional config.hardware.bluetooth.enable plasma5.bluedevil
-      ++ lib.optional config.networking.networkmanager.enable plasma5.plasma-nm
-      ++ lib.optional config.hardware.pulseaudio.enable plasma5.plasma-pa
-      ++ lib.optional config.powerManagement.enable plasma5.powerdevil
+      ++ lib.optional config.hardware.bluetooth.enable kde5.bluedevil
+      ++ lib.optional config.networking.networkmanager.enable kde5.plasma-nm
+      ++ lib.optional config.hardware.pulseaudio.enable kde5.plasma-pa
+      ++ lib.optional config.powerManagement.enable kde5.powerdevil
 
       ++ lib.optionals cfg.phonon.gstreamer.enable
         [
@@ -137,7 +135,7 @@ in
           pkgs.gst_all.gstPluginsUgly
           pkgs.gst_all.gstPluginsBad
           pkgs.gst_all.gstFfmpeg # for mp3 playback
-          pkgs.phonon_qt5_backend_gstreamer
+          pkgs.qt55.phonon-backend-gstreamer
           pkgs.gst_all_1.gstreamer
           pkgs.gst_all_1.gst-plugins-base
           pkgs.gst_all_1.gst-plugins-good
@@ -149,7 +147,7 @@ in
       ++ lib.optionals cfg.phonon.vlc.enable
         [
           pkgs.phonon_qt5_backend_vlc
-          pkgs.phonon_backend_vlc
+          pkgs.qt55.phonon-backend-vlc
         ];
 
     environment.pathsToLink = [ "/share" ];
@@ -166,9 +164,14 @@ in
         GST_PLUGIN_SYSTEM_PATH_1_0 = [ "/lib/gstreamer-1.0" ];
       };
 
-    fonts.fonts = [ (plasma5.oxygen-fonts or pkgs.noto-fonts) ];
+    # Enable GTK applications to load SVG icons
+    environment.variables = mkIf (lib.hasAttr "breeze-icons" kde5) {
+      GDK_PIXBUF_MODULE_FILE = "${pkgs.librsvg}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache";
+    };
+
+    fonts.fonts = [ (kde5.oxygen-fonts or pkgs.noto-fonts) ];
 
-    programs.ssh.askPassword = "${plasma5.ksshaskpass}/bin/ksshaskpass";
+    programs.ssh.askPassword = "${kde5.ksshaskpass}/bin/ksshaskpass";
 
     # Enable helpful DBus services.
     services.udisks2.enable = true;
@@ -180,8 +183,8 @@ in
     services.xserver.displayManager.sddm = {
       theme = "breeze";
       themes = [
-        plasma5.plasma-workspace
-        (kdeApps.oxygen-icons or kf5.oxygen-icons5)
+        kde5.plasma-workspace
+        (kde5.oxygen-icons or kde5.oxygen-icons5)
       ];
     };
 
diff --git a/nixos/modules/services/x11/display-managers/default.nix b/nixos/modules/services/x11/display-managers/default.nix
index 4f6d490bd91d..3aa09193a66e 100644
--- a/nixos/modules/services/x11/display-managers/default.nix
+++ b/nixos/modules/services/x11/display-managers/default.nix
@@ -37,7 +37,7 @@ let
   # file provided by services.xserver.displayManager.session.script
   xsession = wm: dm: pkgs.writeScript "xsession"
     ''
-      #! /bin/sh
+      #! ${pkgs.bash}/bin/bash
 
       . /etc/profile
       cd "$HOME"
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index c8ccf43029dc..ded694d90d50 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -13,9 +13,16 @@ let
   # lightdm runs with clearenv(), but we need a few things in the enviornment for X to startup
   xserverWrapper = writeScript "xserver-wrapper"
     ''
-      #! /bin/sh
+      #! ${pkgs.bash}/bin/bash
       ${concatMapStrings (n: "export ${n}=\"${getAttr n xEnv}\"\n") (attrNames xEnv)}
-      exec ${dmcfg.xserverBin} ${dmcfg.xserverArgs}
+
+      display=$(echo "$@" | xargs -n 1 | grep -P ^:\\d\$ | head -n 1 | sed s/^://)
+      if [ -z "$display" ]
+      then additionalArgs=":0 -logfile /var/log/X.0.log"
+      else additionalArgs="-logfile /var/log/X.$display.log"
+      fi
+
+      exec ${dmcfg.xserverBin} ${dmcfg.xserverArgs} $additionalArgs "$@"
     '';
 
   usersConf = writeText "users.conf"
@@ -39,7 +46,6 @@ let
       greeter-session = ${cfg.greeter.name}
       ${cfg.extraSeatDefaults}
     '';
-
 in
 {
   # Note: the order in which lightdm greeter modules are imported
@@ -98,7 +104,6 @@ in
   };
 
   config = mkIf cfg.enable {
-
     services.xserver.displayManager.slim.enable = false;
 
     services.xserver.displayManager.job = {
@@ -149,5 +154,7 @@ in
 
     services.xserver.displayManager.lightdm.background = mkDefault "${pkgs.nixos-artwork}/share/artwork/gnome/Gnome_Dark.png";
 
+    services.xserver.tty     = null; # We might start multiple X servers so let the tty increment themselves..
+    services.xserver.display = null; # We specify our own display (and logfile) in xserver-wrapper up there
   };
 }
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index ae947a5d2d4b..6b344822977f 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -17,6 +17,16 @@ let
     exec ${dmcfg.xserverBin} ${dmcfg.xserverArgs} "$@"
   '';
 
+  Xsetup = pkgs.writeScript "Xsetup" ''
+    #!/bin/sh
+    ${cfg.setupScript}
+  '';
+
+  Xstop = pkgs.writeScript "Xstop" ''
+    #!/bin/sh
+    ${cfg.stopScript}
+  '';
+
   cfgFile = pkgs.writeText "sddm.conf" ''
     [General]
     HaltCommand=${pkgs.systemd}/bin/systemctl poweroff
@@ -39,6 +49,8 @@ let
     SessionCommand=${dmcfg.session.script}
     SessionDir=${dmcfg.session.desktops}
     XauthPath=${pkgs.xorg.xauth}/bin/xauth
+    DisplayCommand=${Xsetup}
+    DisplayStopCommand=${Xstop}
 
     ${optionalString cfg.autoLogin.enable ''
     [Autologin]
@@ -98,6 +110,27 @@ in
         '';
       };
 
+      setupScript = mkOption {
+        type = types.str;
+        default = "";
+        example = ''
+          # workaround for using NVIDIA Optimus without Bumblebee
+          xrandr --setprovideroutputsource modesetting NVIDIA-0
+          xrandr --auto
+        '';
+        description = ''
+          A script to execute when starting the display server.
+        '';
+      };
+
+      stopScript = mkOption {
+        type = types.str;
+        default = "";
+        description = ''
+          A script to execute when stopping the display server.
+        '';
+      };
+
       autoLogin = mkOption {
         default = {};
         description = ''
@@ -105,7 +138,7 @@ in
         '';
 
         type = types.submodule {
-	  options = {
+          options = {
             enable = mkOption {
               type = types.bool;
               default = false;
@@ -130,7 +163,7 @@ in
                 will work only the first time.
               '';
             };
-	  };
+          };
         };
       };
 
@@ -142,14 +175,16 @@ in
 
     assertions = [
       { assertion = cfg.autoLogin.enable -> cfg.autoLogin.user != null;
-        message = "SDDM auto-login requires services.xserver.displayManager.sddm.autoLogin.user to be set";
+        message = ''
+          SDDM auto-login requires services.xserver.displayManager.sddm.autoLogin.user to be set
+        '';
       }
       { assertion = cfg.autoLogin.enable -> elem defaultSessionName dmcfg.session.names;
         message = ''
           SDDM auto-login requires that services.xserver.desktopManager.default and
-	  services.xserver.windowMananger.default are set to valid values. The current
-	  default session: ${defaultSessionName} is not valid.
-	'';
+          services.xserver.windowMananger.default are set to valid values. The current
+          default session: ${defaultSessionName} is not valid.
+        '';
       }
     ];
 
diff --git a/nixos/modules/services/x11/redshift.nix b/nixos/modules/services/x11/redshift.nix
index d40373ec2e55..6614be261e50 100644
--- a/nixos/modules/services/x11/redshift.nix
+++ b/nixos/modules/services/x11/redshift.nix
@@ -98,13 +98,16 @@ in {
       requires = [ "display-manager.service" ];
       after = [ "display-manager.service" ];
       wantedBy = [ "graphical.target" ];
-      serviceConfig.ExecStart = ''
-        ${cfg.package}/bin/redshift \
-          -l ${cfg.latitude}:${cfg.longitude} \
-          -t ${toString cfg.temperature.day}:${toString cfg.temperature.night} \
-          -b ${toString cfg.brightness.day}:${toString cfg.brightness.night} \
-          ${lib.strings.concatStringsSep " " cfg.extraOptions}
-      '';
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/redshift \
+            -l ${cfg.latitude}:${cfg.longitude} \
+            -t ${toString cfg.temperature.day}:${toString cfg.temperature.night} \
+            -b ${toString cfg.brightness.day}:${toString cfg.brightness.night} \
+            ${lib.strings.concatStringsSep " " cfg.extraOptions}
+        '';
+	RestartSec = 3;
+      };
       environment = { DISPLAY = ":0"; };
       serviceConfig.Restart = "always";
     };
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 3860b50f2497..aec6fb0cc045 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -280,6 +280,13 @@ in
         '';
       };
 
+      xkbDir = mkOption {
+        type = types.path;
+        description = ''
+          Path used for -xkbdir xserver parameter.
+        '';
+      };
+
       config = mkOption {
         type = types.lines;
         description = ''
@@ -381,13 +388,13 @@ in
       };
 
       tty = mkOption {
-        type = types.int;
+        type = types.nullOr types.int;
         default = 7;
         description = "Virtual console for the X server.";
       };
 
       display = mkOption {
-        type = types.int;
+        type = types.nullOr types.int;
         default = 0;
         description = "Display number for the X server.";
       };
@@ -409,6 +416,16 @@ in
           if possible.
         '';
       };
+
+      enableCtrlAltBackspace = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable the DontZap option, which binds Ctrl+Alt+Backspace
+          to forcefully kill X. This can lead to data loss and is disabled
+          by default.
+        '';
+      };
     };
 
   };
@@ -452,7 +469,7 @@ in
             target = "X11/xorg.conf";
           }
           # -xkbdir command line option does not seems to be passed to xkbcomp.
-          { source = "${pkgs.xkeyboard_config}/etc/X11/xkb";
+          { source = "${cfg.xkbDir}";
             target = "X11/xkb";
           }
         ]);
@@ -517,11 +534,12 @@ in
     services.xserver.displayManager.xserverArgs =
       [ "-ac"
         "-terminate"
-        "-logfile" "/var/log/X.${toString cfg.display}.log"
         "-config ${configFile}"
-        ":${toString cfg.display}" "vt${toString cfg.tty}"
-        "-xkbdir" "${pkgs.xkeyboard_config}/etc/X11/xkb"
-      ] ++ optional (!cfg.enableTCP) "-nolisten tcp";
+        "-xkbdir" "${cfg.xkbDir}"
+      ] ++ optional (cfg.display != null) ":${toString cfg.display}"
+        ++ optional (cfg.tty     != null) "vt${toString cfg.tty}"
+        ++ optionals (cfg.display != null) [ "-logfile" "/var/log/X.${toString cfg.display}.log" ]
+        ++ optional (!cfg.enableTCP) "-nolisten tcp";
 
     services.xserver.modules =
       concatLists (catAttrs "modules" cfg.drivers) ++
@@ -529,10 +547,13 @@ in
         xorg.xf86inputevdev
       ];
 
+    services.xserver.xkbDir = mkDefault "${pkgs.xkeyboard_config}/etc/X11/xkb";
+
     services.xserver.config =
       ''
         Section "ServerFlags"
           Option "AllowMouseOpenFail" "on"
+          Option "DontZap" "${if cfg.enableCtrlAltBackspace then "off" else "on"}"
           ${cfg.serverFlagsSection}
         EndSection
 
diff --git a/nixos/modules/system/boot/loader/grub/grub.nix b/nixos/modules/system/boot/loader/grub/grub.nix
index 87dbbd7cd51f..47605e3685ca 100644
--- a/nixos/modules/system/boot/loader/grub/grub.nix
+++ b/nixos/modules/system/boot/loader/grub/grub.nix
@@ -10,8 +10,11 @@ let
 
   realGrub = if cfg.version == 1 then pkgs.grub
     else if cfg.zfsSupport then pkgs.grub2.override { zfsSupport = true; }
-    else if cfg.enableTrustedBoot then pkgs.trustedGrub
-           else pkgs.grub2;
+    else if cfg.trustedBoot.enable
+         then if cfg.trustedBoot.isHPLaptop
+              then pkgs.trustedGrub-for-HP
+              else pkgs.trustedGrub
+         else pkgs.grub2;
 
   grub =
     # Don't include GRUB if we're only generating a GRUB menu (e.g.,
@@ -369,24 +372,37 @@ in
         '';
       };
 
-      enableTrustedBoot = mkOption {
-        default = false;
-        type = types.bool;
-        description = ''
-          Enable trusted boot. GRUB will measure all critical components during
-          the boot process to offer TCG (TPM) support.
-        '';
-      };
+      trustedBoot = {
+
+        enable = mkOption {
+          default = false;
+          type = types.bool;
+          description = ''
+            Enable trusted boot. GRUB will measure all critical components during
+            the boot process to offer TCG (TPM) support.
+          '';
+        };
+
+        systemHasTPM = mkOption {
+          default = "";
+          example = "YES_TPM_is_activated";
+          type = types.string;
+          description = ''
+            Assertion that the target system has an activated TPM. It is a safety
+            check before allowing the activation of 'trustedBoot.enable'. TrustedBoot
+            WILL FAIL TO BOOT YOUR SYSTEM if no TPM is available.
+          '';
+        };
+
+        isHPLaptop = mkOption {
+          default = false;
+          type = types.bool;
+          description = ''
+            Use a special version of TrustedGRUB that is needed by some HP laptops
+            and works only for the HP laptops.
+          '';
+        };
 
-      systemHasTPM = mkOption {
-        default = "";
-        example = "YES_TPM_is_activated";
-        type = types.string;
-        description = ''
-          Assertion that the target system has an activated TPM. It is a safety
-          check before allowing the activation of 'enableTrustedBoot'. TrustedBoot
-          WILL FAIL TO BOOT YOUR SYSTEM if no TPM is available.
-        '';
       };
 
     };
@@ -452,19 +468,19 @@ in
           message = "You cannot have duplicated devices in mirroredBoots";
         }
         {
-          assertion = !cfg.enableTrustedBoot || cfg.version == 2;
+          assertion = !cfg.trustedBoot.enable || cfg.version == 2;
           message = "Trusted GRUB is only available for GRUB 2";
         }
         {
-          assertion = !cfg.efiSupport || !cfg.enableTrustedBoot;
+          assertion = !cfg.efiSupport || !cfg.trustedBoot.enable;
           message = "Trusted GRUB does not have EFI support";
         }
         {
-          assertion = !cfg.zfsSupport || !cfg.enableTrustedBoot;
+          assertion = !cfg.zfsSupport || !cfg.trustedBoot.enable;
           message = "Trusted GRUB does not have ZFS support";
         }
         {
-          assertion = !cfg.enableTrustedBoot || cfg.systemHasTPM == "YES_TPM_is_activated";
+          assertion = !cfg.trustedBoot.enable || cfg.trustedBoot.systemHasTPM == "YES_TPM_is_activated";
           message = "Trusted GRUB can break the system! Confirm that the system has an activated TPM by setting 'systemHasTPM'.";
         }
       ] ++ flip concatMap cfg.mirroredBoots (args: [
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index 675bd3d232a6..dedd3f5ca451 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -214,7 +214,7 @@ in
             done
             ''] ++ (map (pool: ''
             echo "importing root ZFS pool \"${pool}\"..."
-            zpool import -N $ZFS_FORCE "${pool}"
+            zpool import -d /dev/disk/by-id -N $ZFS_FORCE "${pool}"
         '') rootPools));
       };
 
@@ -255,7 +255,7 @@ in
             };
             script = ''
               zpool_cmd="${zfsUserPkg}/sbin/zpool"
-              ("$zpool_cmd" list "${pool}" >/dev/null) || "$zpool_cmd" import -N ${optionalString cfgZfs.forceImportAll "-f"} "${pool}"
+              ("$zpool_cmd" list "${pool}" >/dev/null) || "$zpool_cmd" import -d /dev/disk/by-id -N ${optionalString cfgZfs.forceImportAll "-f"} "${pool}"
             '';
           };
       in listToAttrs (map createImportService dataPools) // {
diff --git a/nixos/modules/tasks/network-interfaces-scripted.nix b/nixos/modules/tasks/network-interfaces-scripted.nix
index 80b7f718580e..f07e7baeb119 100644
--- a/nixos/modules/tasks/network-interfaces-scripted.nix
+++ b/nixos/modules/tasks/network-interfaces-scripted.nix
@@ -83,13 +83,13 @@ in
                   # FIXME: get rid of "|| true" (necessary to make it idempotent).
                   ip route add default via "${cfg.defaultGateway}" ${
                     optionalString (cfg.defaultGatewayWindowSize != null)
-                      "window ${cfg.defaultGatewayWindowSize}"} || true
+                      "window ${toString cfg.defaultGatewayWindowSize}"} || true
                 ''}
                 ${optionalString (cfg.defaultGateway6 != null && cfg.defaultGateway6 != "") ''
                   # FIXME: get rid of "|| true" (necessary to make it idempotent).
                   ip -6 route add ::/0 via "${cfg.defaultGateway6}" ${
                     optionalString (cfg.defaultGatewayWindowSize != null)
-                      "window ${cfg.defaultGatewayWindowSize}"} || true
+                      "window ${toString cfg.defaultGatewayWindowSize}"} || true
                 ''}
               '';
           };
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index bf2364a0459d..7ccc9df740e5 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -11,7 +11,7 @@ with lib;
 let cfg = config.ec2; in
 
 {
-  imports = [ ../profiles/headless.nix ./ec2-data.nix ./amazon-grow-partition.nix ];
+  imports = [ ../profiles/headless.nix ./ec2-data.nix ./amazon-grow-partition.nix ./amazon-init.nix ];
 
   config = {
 
diff --git a/nixos/modules/virtualisation/amazon-init.nix b/nixos/modules/virtualisation/amazon-init.nix
index 21cbbfda0b68..96cd57e6db5d 100644
--- a/nixos/modules/virtualisation/amazon-init.nix
+++ b/nixos/modules/virtualisation/amazon-init.nix
@@ -44,7 +44,6 @@ let
     nixos-rebuild switch
   '';
 in {
-  imports = [ "${modulesPath}/virtualisation/amazon-image.nix" ];
   boot.postBootCommands = ''
     ${bootScript} &
   '';
diff --git a/nixos/modules/virtualisation/azure-agent.nix b/nixos/modules/virtualisation/azure-agent.nix
index e657cc519396..ef4e3e1e48d4 100644
--- a/nixos/modules/virtualisation/azure-agent.nix
+++ b/nixos/modules/virtualisation/azure-agent.nix
@@ -156,6 +156,12 @@ in
       after = [ "ip-up.target" ];
       wants = [ "ip-up.target" ];
 
+      environment = {
+        GIT_SSL_CAINFO = "/etc/ssl/certs/ca-certificates.crt";
+        OPENSSL_X509_CERT_FILE = "/etc/ssl/certs/ca-certificates.crt";
+        SSL_CERT_FILE = "/etc/ssl/certs/ca-certificates.crt";
+      };
+
       path = [ pkgs.e2fsprogs ];
       description = "Windows Azure Agent Service";
       unitConfig.ConditionPathExists = "/etc/waagent.conf";
diff --git a/nixos/modules/virtualisation/docker.nix b/nixos/modules/virtualisation/docker.nix
index 718ca0851477..97b2927cf1bd 100644
--- a/nixos/modules/virtualisation/docker.nix
+++ b/nixos/modules/virtualisation/docker.nix
@@ -69,7 +69,8 @@ in
         description = ''
           The postStart phase of the systemd service. You may need to
           override this if you are passing in flags to docker which
-          don't cause the socket file to be created.
+          don't cause the socket file to be created. This option is ignored
+          if socket activation is used.
         '';
       };
 
@@ -81,22 +82,29 @@ in
   config = mkIf cfg.enable (mkMerge [
     { environment.systemPackages = [ pkgs.docker ];
       users.extraGroups.docker.gid = config.ids.gids.docker;
-    }
-    (mkIf cfg.socketActivation {
-
       systemd.services.docker = {
         description = "Docker Application Container Engine";
-        after = [ "network.target" "docker.socket" ];
-        requires = [ "docker.socket" ];
+        wantedBy = optional (!cfg.socketActivation) "multi-user.target";
+        after = [ "network.target" ] ++ (optional cfg.socketActivation "docker.socket") ;
+        requires = optional cfg.socketActivation "docker.socket";
         serviceConfig = {
-          ExecStart = "${pkgs.docker}/bin/docker daemon --host=fd:// --group=docker --storage-driver=${cfg.storageDriver} ${cfg.extraOptions}";
+          ExecStart = "${pkgs.docker}/bin/docker daemon --group=docker --storage-driver=${cfg.storageDriver} ${optionalString cfg.socketActivation "--host=fd://"} ${cfg.extraOptions}";
           #  I'm not sure if that limits aren't too high, but it's what
           #  goes in config bundled with docker itself
           LimitNOFILE = 1048576;
           LimitNPROC = 1048576;
         } // proxy_env;
-      };
 
+        path = [ pkgs.kmod ] ++ (optional (cfg.storageDriver == "zfs") pkgs.zfs);
+        environment.MODULE_DIR = "/run/current-system/kernel-modules/lib/modules";
+
+        postStart = if cfg.socketActivation then "" else cfg.postStart;
+
+        # Presumably some containers are running we don't want to interrupt
+        restartIfChanged = false;
+      };
+    }
+    (mkIf cfg.socketActivation {
       systemd.sockets.docker = {
         description = "Docker Socket for the API";
         wantedBy = [ "sockets.target" ];
@@ -108,29 +116,6 @@ in
         };
       };
     })
-    (mkIf (!cfg.socketActivation) {
-
-      systemd.services.docker = {
-        description = "Docker Application Container Engine";
-        wantedBy = [ "multi-user.target" ];
-        after = [ "network.target" ];
-        serviceConfig = {
-          ExecStart = "${pkgs.docker}/bin/docker daemon --group=docker --storage-driver=${cfg.storageDriver} ${cfg.extraOptions}";
-          #  I'm not sure if that limits aren't too high, but it's what
-          #  goes in config bundled with docker itself
-          LimitNOFILE = 1048576;
-          LimitNPROC = 1048576;
-        } // proxy_env;
-
-        path = [ pkgs.kmod ] ++ (optional (cfg.storageDriver == "zfs") pkgs.zfs);
-        environment.MODULE_DIR = "/run/current-system/kernel-modules/lib/modules";
-
-        postStart = cfg.postStart;
-
-        # Presumably some containers are running we don't want to interrupt
-        restartIfChanged = false;
-      };
-    })
   ]);
 
 }
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index 15b0da3bab74..5c4686044430 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -40,16 +40,17 @@ let
       if [ -z "$TMPDIR" -o -z "$USE_TMPDIR" ]; then
           TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
       fi
+
       # Create a directory for exchanging data with the VM.
       mkdir -p $TMPDIR/xchg
 
       ${if cfg.useBootLoader then ''
-        # Create a writable copy/snapshot of the boot disk
-        # A writable boot disk can be booted from automatically
+        # Create a writable copy/snapshot of the boot disk.
+        # A writable boot disk can be booted from automatically.
         ${pkgs.qemu_kvm}/bin/qemu-img create -f qcow2 -b ${bootDisk}/disk.img $TMPDIR/disk.img || exit 1
 
         ${if cfg.useEFIBoot then ''
-          # VM needs a writable flash BIOS
+          # VM needs a writable flash BIOS.
           cp ${bootDisk}/bios.bin $TMPDIR || exit 1
           chmod 0644 $TMPDIR/bios.bin || exit 1
         '' else ''
@@ -76,14 +77,14 @@ let
           -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \
           -virtfs local,path=''${SHARED_DIR:-$TMPDIR/xchg},security_model=none,mount_tag=shared \
           ${if cfg.useBootLoader then ''
-            -drive index=0,id=drive1,file=$NIX_DISK_IMAGE,if=${cfg.qemu.diskInterface},cache=writeback,werror=report \
+            -drive index=0,id=drive1,file=$NIX_DISK_IMAGE,if=${cfg.qemu.diskInterface},cache=none,werror=report \
             -drive index=1,id=drive2,file=$TMPDIR/disk.img,media=disk \
             ${if cfg.useEFIBoot then ''
               -pflash $TMPDIR/bios.bin \
             '' else ''
             ''}
           '' else ''
-            -drive index=0,id=drive1,file=$NIX_DISK_IMAGE,if=${cfg.qemu.diskInterface},cache=writeback,werror=report \
+            -drive index=0,id=drive1,file=$NIX_DISK_IMAGE,if=${cfg.qemu.diskInterface},cache=none,werror=report \
             -kernel ${config.system.build.toplevel}/kernel \
             -initrd ${config.system.build.toplevel}/initrd \
             -append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo} ${kernelConsole} $QEMU_KERNEL_PARAMS" \
@@ -297,6 +298,7 @@ in
     virtualisation.qemu = {
       options =
         mkOption {
+          type = types.listOf types.unspecified;
           default = [];
           example = [ "-vga std" ];
           description = "Options passed to QEMU.";
@@ -425,19 +427,19 @@ in
         ${if cfg.writableStore then "/nix/.ro-store" else "/nix/store"} =
           { device = "store";
             fsType = "9p";
-            options = "trans=virtio,version=9p2000.L,msize=1048576,cache=loose";
+            options = "trans=virtio,version=9p2000.L,cache=loose";
             neededForBoot = true;
           };
         "/tmp/xchg" =
           { device = "xchg";
             fsType = "9p";
-            options = "trans=virtio,version=9p2000.L,msize=1048576,cache=loose";
+            options = "trans=virtio,version=9p2000.L,cache=loose";
             neededForBoot = true;
           };
         "/tmp/shared" =
           { device = "shared";
             fsType = "9p";
-            options = "trans=virtio,version=9p2000.L,msize=1048576";
+            options = "trans=virtio,version=9p2000.L";
             neededForBoot = true;
           };
       } // optionalAttrs cfg.writableStore