about summary refs log tree commit diff
path: root/nixpkgs/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos')
-rwxr-xr-xnixpkgs/nixos/doc/manual/development/releases.xml6
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing.xml2
-rw-r--r--nixpkgs/nixos/doc/manual/installation/upgrading.xml20
-rw-r--r--nixpkgs/nixos/doc/manual/man-nixos-build-vms.xml22
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1903.xml42
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1909.xml36
-rw-r--r--nixpkgs/nixos/modules/config/no-x-libs.nix2
-rw-r--r--nixpkgs/nixos/modules/config/system-path.nix2
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix8
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nixos-build-vms/nixos-build-vms.sh61
-rw-r--r--nixpkgs/nixos/modules/module-list.nix8
-rw-r--r--nixpkgs/nixos/modules/programs/bash/bash.nix2
-rw-r--r--nixpkgs/nixos/modules/programs/fish.nix1
-rw-r--r--nixpkgs/nixos/modules/programs/xonsh.nix1
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/zsh.nix1
-rw-r--r--nixpkgs/nixos/modules/rename.nix4
-rw-r--r--nixpkgs/nixos/modules/security/pam.nix2
-rw-r--r--nixpkgs/nixos/modules/services/audio/ympd.nix4
-rw-r--r--nixpkgs/nixos/modules/services/backup/bacula.nix6
-rw-r--r--nixpkgs/nixos/modules/services/databases/couchdb.nix2
-rw-r--r--nixpkgs/nixos/modules/services/databases/mongodb.nix2
-rw-r--r--nixpkgs/nixos/modules/services/databases/openldap.nix4
-rw-r--r--nixpkgs/nixos/modules/services/databases/redis.nix2
-rw-r--r--nixpkgs/nixos/modules/services/databases/rethinkdb.nix2
-rw-r--r--nixpkgs/nixos/modules/services/desktops/deepin/dde-daemon.nix41
-rw-r--r--nixpkgs/nixos/modules/services/desktops/deepin/deepin-menu.nix29
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome3/evince.nix35
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome3/glib-networking.nix33
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome3/gnome-keyring.nix2
-rw-r--r--nixpkgs/nixos/modules/services/hardware/triggerhappy.nix11
-rw-r--r--nixpkgs/nixos/modules/services/mail/pfix-srsd.nix4
-rw-r--r--nixpkgs/nixos/modules/services/mail/postgrey.nix4
-rw-r--r--nixpkgs/nixos/modules/services/mail/roundcube.nix44
-rw-r--r--nixpkgs/nixos/modules/services/mail/spamassassin.nix2
-rw-r--r--nixpkgs/nixos/modules/services/misc/matrix-synapse.nix2
-rw-r--r--nixpkgs/nixos/modules/services/misc/mbpfan.nix2
-rw-r--r--nixpkgs/nixos/modules/services/misc/spice-vdagentd.nix2
-rw-r--r--nixpkgs/nixos/modules/services/misc/svnserve.nix2
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/nagios.nix2
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix28
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix466
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix166
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix2
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/asterisk.nix4
-rw-r--r--nixpkgs/nixos/modules/services/networking/avahi-daemon.nix4
-rw-r--r--nixpkgs/nixos/modules/services/networking/bind.nix8
-rw-r--r--nixpkgs/nixos/modules/services/networking/hostapd.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/htpdate.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/hylafax/default.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/hylafax/faxq-default.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/iodine.nix8
-rw-r--r--nixpkgs/nixos/modules/services/networking/ircd-hybrid/ircd.conf2
-rw-r--r--nixpkgs/nixos/modules/services/networking/lldpd.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/miniupnpd.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/networkmanager.nix8
-rw-r--r--nixpkgs/nixos/modules/services/networking/ocserv.nix8
-rw-r--r--nixpkgs/nixos/modules/services/networking/racoon.nix4
-rw-r--r--nixpkgs/nixos/modules/services/networking/ssh/sshd.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/strongswan.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/supplicant.nix4
-rw-r--r--nixpkgs/nixos/modules/services/networking/tox-node.nix95
-rw-r--r--nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/xrdp.nix8
-rw-r--r--nixpkgs/nixos/modules/services/networking/znc/default.nix2
-rw-r--r--nixpkgs/nixos/modules/services/printing/cupsd.nix2
-rw-r--r--nixpkgs/nixos/modules/services/scheduling/fcron.nix4
-rw-r--r--nixpkgs/nixos/modules/services/search/kibana.nix44
-rw-r--r--nixpkgs/nixos/modules/services/search/solr.nix12
-rw-r--r--nixpkgs/nixos/modules/services/security/hologram-agent.nix2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/codimd.nix2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/documize.nix67
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/miniflux.nix97
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nextcloud.nix4
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/restya-board.nix2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/selfoss.nix2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/tt-rss.nix2
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix5
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/mighttpd2.nix2
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix9
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/kodi.nix2
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.nix12
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix73
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix7
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix4
-rw-r--r--nixpkgs/nixos/modules/system/boot/kernel.nix18
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix20
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl6
-rw-r--r--nixpkgs/nixos/modules/system/boot/stage-1.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker.nix2
-rw-r--r--nixpkgs/nixos/modules/virtualisation/openvswitch.nix18
-rw-r--r--nixpkgs/nixos/tests/all-tests.nix3
-rw-r--r--nixpkgs/nixos/tests/common/letsencrypt/default.nix2
-rw-r--r--nixpkgs/nixos/tests/documize.nix58
-rw-r--r--nixpkgs/nixos/tests/elk.nix24
-rw-r--r--nixpkgs/nixos/tests/miniflux.nix52
-rw-r--r--nixpkgs/nixos/tests/nghttpx.nix2
-rw-r--r--nixpkgs/nixos/tests/nginx.nix1
-rw-r--r--nixpkgs/nixos/tests/osquery.nix4
-rw-r--r--nixpkgs/nixos/tests/printing.nix2
-rw-r--r--nixpkgs/nixos/tests/prometheus-2.nix67
-rw-r--r--nixpkgs/nixos/tests/solr.nix102
102 files changed, 1652 insertions, 382 deletions
diff --git a/nixpkgs/nixos/doc/manual/development/releases.xml b/nixpkgs/nixos/doc/manual/development/releases.xml
index dcedad540e1f..f45fecd16c35 100755
--- a/nixpkgs/nixos/doc/manual/development/releases.xml
+++ b/nixpkgs/nixos/doc/manual/development/releases.xml
@@ -177,6 +177,12 @@
     </listitem>
     <listitem>
      <para>
+      Update "Chapter 4. Upgrading NixOS" section of the manual to match 
+      new stable release version.
+     </para>
+    </listitem>
+    <listitem>
+     <para>
       Update http://nixos.org/nixos/download.html and
       http://nixos.org/nixos/manual in
       https://github.com/NixOS/nixos-org-configurations
diff --git a/nixpkgs/nixos/doc/manual/installation/installing.xml b/nixpkgs/nixos/doc/manual/installation/installing.xml
index f4f8d470f802..9687c21a01e6 100644
--- a/nixpkgs/nixos/doc/manual/installation/installing.xml
+++ b/nixpkgs/nixos/doc/manual/installation/installing.xml
@@ -54,7 +54,7 @@
 
    <para>
     To manually configure the network on the graphical installer, first disable
-    network-manager with <command>systemctl stop network-manager</command>.
+    network-manager with <command>systemctl stop NetworkManager</command>.
    </para>
 
    <para>
diff --git a/nixpkgs/nixos/doc/manual/installation/upgrading.xml b/nixpkgs/nixos/doc/manual/installation/upgrading.xml
index 69668b1d4bd6..35b4d266e12e 100644
--- a/nixpkgs/nixos/doc/manual/installation/upgrading.xml
+++ b/nixpkgs/nixos/doc/manual/installation/upgrading.xml
@@ -14,11 +14,11 @@
     <para>
      <emphasis>Stable channels</emphasis>, such as
      <literal
-    xlink:href="https://nixos.org/channels/nixos-17.03">nixos-17.03</literal>.
+    xlink:href="https://nixos.org/channels/nixos-19.03">nixos-19.03</literal>.
      These only get conservative bug fixes and package upgrades. For instance,
      a channel update may cause the Linux kernel on your system to be upgraded
-     from 4.9.16 to 4.9.17 (a minor bug fix), but not from
-     4.9.<replaceable>x</replaceable> to 4.11.<replaceable>x</replaceable> (a
+     from 4.19.34 to 4.19.38 (a minor bug fix), but not from
+     4.19.<replaceable>x</replaceable> to 4.20.<replaceable>x</replaceable> (a
      major change that has the potential to break things). Stable channels are
      generally maintained until the next stable branch is created.
     </para>
@@ -38,7 +38,7 @@
     <para>
      <emphasis>Small channels</emphasis>, such as
      <literal
-    xlink:href="https://nixos.org/channels/nixos-17.03-small">nixos-17.03-small</literal>
+    xlink:href="https://nixos.org/channels/nixos-19.03-small">nixos-19.03-small</literal>
      or
      <literal
     xlink:href="https://nixos.org/channels/nixos-unstable-small">nixos-unstable-small</literal>.
@@ -63,8 +63,8 @@
  <para>
   When you first install NixOS, you’re automatically subscribed to the NixOS
   channel that corresponds to your installation source. For instance, if you
-  installed from a 17.03 ISO, you will be subscribed to the
-  <literal>nixos-17.03</literal> channel. To see which NixOS channel you’re
+  installed from a 19.03 ISO, you will be subscribed to the
+  <literal>nixos-19.03</literal> channel. To see which NixOS channel you’re
   subscribed to, run the following as root:
 <screen>
 # nix-channel --list | grep nixos
@@ -75,13 +75,13 @@ nixos https://nixos.org/channels/nixos-unstable
 # nix-channel --add https://nixos.org/channels/<replaceable>channel-name</replaceable> nixos
 </screen>
   (Be sure to include the <literal>nixos</literal> parameter at the end.) For
-  instance, to use the NixOS 17.03 stable channel:
+  instance, to use the NixOS 19.03 stable channel:
 <screen>
-# nix-channel --add https://nixos.org/channels/nixos-17.03 nixos
+# nix-channel --add https://nixos.org/channels/nixos-19.03 nixos
 </screen>
   If you have a server, you may want to use the “small” channel instead:
 <screen>
-# nix-channel --add https://nixos.org/channels/nixos-17.03-small nixos
+# nix-channel --add https://nixos.org/channels/nixos-19.03-small nixos
 </screen>
   And if you want to live on the bleeding edge:
 <screen>
@@ -127,7 +127,7 @@ nixos https://nixos.org/channels/nixos-unstable
    current channel. (To see when the service runs, see <command>systemctl
    list-timers</command>.) You can also specify a channel explicitly, e.g.
 <programlisting>
-<xref linkend="opt-system.autoUpgrade.channel"/> = https://nixos.org/channels/nixos-17.03;
+<xref linkend="opt-system.autoUpgrade.channel"/> = https://nixos.org/channels/nixos-19.03;
 </programlisting>
   </para>
  </section>
diff --git a/nixpkgs/nixos/doc/manual/man-nixos-build-vms.xml b/nixpkgs/nixos/doc/manual/man-nixos-build-vms.xml
index 87e4f3dae869..7d6e04e0dd90 100644
--- a/nixpkgs/nixos/doc/manual/man-nixos-build-vms.xml
+++ b/nixpkgs/nixos/doc/manual/man-nixos-build-vms.xml
@@ -24,8 +24,14 @@
     
    <arg>
     <option>--help</option>
-   </arg>
-    
+  </arg>
+
+  <arg>
+    <option>--option</option>
+    <replaceable>name</replaceable>
+    <replaceable>value</replaceable>
+  </arg>
+
    <arg choice="plain">
     <replaceable>network.nix</replaceable>
    </arg>
@@ -115,6 +121,18 @@
      </para>
     </listitem>
    </varlistentry>
+   <varlistentry>
+    <term>
+     <option>--option</option> <replaceable>name</replaceable> <replaceable>value</replaceable>
+    </term>
+    <listitem>
+     <para>Set the Nix configuration option
+      <replaceable>name</replaceable> to <replaceable>value</replaceable>.
+      This overrides settings in the Nix configuration file (see
+      <citerefentry><refentrytitle>nix.conf</refentrytitle><manvolnum>5</manvolnum></citerefentry>).
+     </para>
+    </listitem>
+   </varlistentry>
   </variablelist>
  </refsection>
 </refentry>
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1903.xml b/nixpkgs/nixos/doc/manual/release-notes/rl-1903.xml
index 7c94f6e9473e..e9c6cd7e9acb 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-1903.xml
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1903.xml
@@ -31,33 +31,22 @@
     </para>
     <note>
      <para>
-      <varname>services.xserver.desktopManager.pantheon</varname> default
-      enables lightdm as a display manager and using Pantheon's greeter.
+      By default, <varname>services.xserver.desktopManager.pantheon</varname>
+      enables LightDM as a display manager, as pantheon's screen locking
+      implementation relies on it.
      </para>
      <para>
-      This is because of limitations with the screenlocking implementation,
-      whereas the screenlocker would be non-functional without it.
-     </para>
-     <para>
-      Because of that it is recommended to retain this precaution, however if
-      you'd like to change this set:
-     </para>
-     <itemizedlist>
-      <listitem>
-       <para>
-        <option>services.xserver.displayManager.lightdm.enable</option>
-       </para>
-      </listitem>
-      <listitem>
-       <para>
-        <option>services.xserver.displayManager.lightdm.greeters.pantheon.enable</option>
-       </para>
-      </listitem>
-     </itemizedlist>
-     <para>
-      to <literal>false</literal> and enable your preferred display manager.
+      Because of that it is recommended to leave LightDM enabled. If you'd like
+      to disable it anyway, set
+      <option>services.xserver.displayManager.lightdm.enable</option> to
+      <literal>false</literal> and enable your preferred display manager.
      </para>
     </note>
+    <para>
+     Also note that Pantheon's LightDM greeter is not enabled by default,
+     because it has numerous issues in NixOS and isn't optimal for use here
+     yet.
+    </para>
    </listitem>
    <listitem>
     <para>
@@ -534,6 +523,13 @@
       Same applies to the new <literal>users.ldap.daemon.rootpwmodpwFile</literal> option.
     </para>
    </listitem>
+   <listitem>
+     <para>
+       <literal>nodejs-6_x</literal> is end-of-life.
+       <literal>nodejs-6_x</literal>, <literal>nodejs-slim-6_x</literal> and
+       <literal>nodePackages_6_x</literal> are removed.
+     </para>
+   </listitem>
   </itemizedlist>
  </section>
 
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1909.xml b/nixpkgs/nixos/doc/manual/release-notes/rl-1909.xml
index 2cfaec49c02e..2dc2dc41ff24 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-1909.xml
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1909.xml
@@ -35,11 +35,6 @@
    The following new services were added since the last release:
   </para>
 
-  <itemizedlist>
-   <listitem>
-    <para />
-   </listitem>
-  </itemizedlist>
  </section>
 
  <section xmlns="http://docbook.org/ns/docbook"
@@ -72,6 +67,22 @@
      accordingly.
     </para>
    </listitem>
+   <listitem>
+    <para>
+      The options <option>services.prometheus.alertmanager.user</option> and
+      <option>services.prometheus.alertmanager.group</option> have been removed
+      because the alertmanager service is now using systemd's <link
+      xlink:href="http://0pointer.net/blog/dynamic-users-with-systemd.html">
+      DynamicUser mechanism</link> which obviates these options.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+      The NetworkManager systemd unit was renamed back from network-manager.service to
+      NetworkManager.service for better compatibility with other applications expecting this name.
+      The same applies to ModemManager where modem-manager.service is now called ModemManager.service again.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 
@@ -109,6 +120,21 @@
      Accelerated Video Playback</link> for better transcoding performance.
     </para>
    </listitem>
+   <listitem>
+    <para>
+     The following changes apply if the <literal>stateVersion</literal> is
+     changed to 19.09 or higher. For <literal>stateVersion = "19.03"</literal>
+     or lower the old behavior is preserved.
+    </para>
+    <itemizedlist>
+     <listitem>
+      <para>
+       <literal>solr.package</literal> defaults to
+       <literal>pkgs.solr_8</literal>.
+      </para>
+     </listitem>
+    </itemizedlist>
+   </listitem>
   </itemizedlist>
  </section>
 </section>
diff --git a/nixpkgs/nixos/modules/config/no-x-libs.nix b/nixpkgs/nixos/modules/config/no-x-libs.nix
index 9d2023477020..aad02a9ca4e3 100644
--- a/nixpkgs/nixos/modules/config/no-x-libs.nix
+++ b/nixpkgs/nixos/modules/config/no-x-libs.nix
@@ -34,7 +34,7 @@ with lib;
       networkmanager-openvpn = super.networkmanager-openvpn.override { withGnome = false; };
       networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
       networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
-      pinentry = super.pinentry.override { gtk2 = null; qt = null; };
+      pinentry = super.pinentry.override { gtk2 = null; gcr = null; qt = null; };
       gobject-introspection = super.gobject-introspection.override { x11Support = false; };
     }));
   };
diff --git a/nixpkgs/nixos/modules/config/system-path.nix b/nixpkgs/nixos/modules/config/system-path.nix
index 7a65e44e828d..fae2fc740082 100644
--- a/nixpkgs/nixos/modules/config/system-path.nix
+++ b/nixpkgs/nixos/modules/config/system-path.nix
@@ -7,7 +7,7 @@ with lib;
 
 let
 
-  requiredPackages = map lib.lowPrio
+  requiredPackages = map (pkg: setPrio ((pkg.meta.priority or 5) + 3) pkg)
     [ config.nix.package
       pkgs.acl
       pkgs.attr
diff --git a/nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix b/nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix
index 5d431df4b114..b9ab2053c41f 100644
--- a/nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix
+++ b/nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix
@@ -1,6 +1,6 @@
 {
-  x86_64-linux = "/nix/store/pid1yakjasch4pwl63nzbj22z9zf0q26-nix-2.2";
-  i686-linux = "/nix/store/qpkl0cxy0xh4h432lv2qsjrmhvx5x2vy-nix-2.2";
-  aarch64-linux = "/nix/store/0jg7h94x986d8cskg6gcfza9x67spdbp-nix-2.2";
-  x86_64-darwin = "/nix/store/a48whqkmxnsfhwbk6nay74iyc1cf0lr2-nix-2.2";
+  x86_64-linux = "/nix/store/hbhdjn5ik3byg642d1m11k3k3s0kn3py-nix-2.2.2";
+  i686-linux = "/nix/store/fz5cikwvj3n0a6zl44h6l2z3cin64mda-nix-2.2.2";
+  aarch64-linux = "/nix/store/2gba4cyl4wvxzfbhmli90jy4n5aj0kjj-nix-2.2.2";
+  x86_64-darwin = "/nix/store/87i4fp46jfw9yl8c7i9gx75m5yph7irl-nix-2.2.2";
 }
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/nixos-build-vms.sh b/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/nixos-build-vms.sh
index 4e981c074a57..25106733087e 100644
--- a/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/nixos-build-vms.sh
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/nixos-build-vms.sh
@@ -9,49 +9,44 @@ showUsage() {
 
 # Parse valid argument options
 
-PARAMS=`getopt -n $0 -o h -l no-out-link,show-trace,help -- "$@"`
+nixBuildArgs=()
+networkExpr=
 
-if [ $? != 0 ]
-then
-    showUsage
-    exit 1
-fi
-
-eval set -- "$PARAMS"
-
-# Evaluate valid options
-
-while [ "$1" != "--" ]
-do
+while [ $# -gt 0 ]; do
     case "$1" in
-	--no-out-link)
-	    noOutLinkArg="--no-out-link"
-	    ;;
-	--show-trace)
-	    showTraceArg="--show-trace"
-	    ;;
-	-h|--help)
-	    showUsage
-	    exit 0
-	    ;;
+      --no-out-link)
+        nixBuildArgs+=("--no-out-link")
+        ;;
+      --show-trace)
+        nixBuildArgs+=("--show-trace")
+        ;;
+      -h|--help)
+        showUsage
+        exit 0
+        ;;
+      --option)
+        shift
+        nixBuildArgs+=("--option" "$1" "$2"); shift
+        ;;
+      *)
+        if [ ! -z "$networkExpr" ]; then
+          echo "Network expression already set!"
+          showUsage
+          exit 1
+        fi
+        networkExpr="$(readlink -f $1)"
+        ;;
     esac
-    
+
     shift
 done
 
-shift
-
-# Validate the given options
-
-if [ "$1" = "" ]
+if [ -z "$networkExpr" ]
 then
     echo "ERROR: A network expression must be specified!" >&2
     exit 1
-else
-    networkExpr=$(readlink -f $1)
 fi
 
 # Build a network of VMs
-
 nix-build '<nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix>' \
-    --argstr networkExpr $networkExpr $noOutLinkArg $showTraceArg
+    --argstr networkExpr $networkExpr "${nixBuildArgs[@]}"
diff --git a/nixpkgs/nixos/modules/module-list.nix b/nixpkgs/nixos/modules/module-list.nix
index bd8d00db872c..5012a3a303c9 100644
--- a/nixpkgs/nixos/modules/module-list.nix
+++ b/nixpkgs/nixos/modules/module-list.nix
@@ -255,6 +255,8 @@
   ./services/databases/virtuoso.nix
   ./services/desktops/accountsservice.nix
   ./services/desktops/bamf.nix
+  ./services/desktops/deepin/dde-daemon.nix
+  ./services/desktops/deepin/deepin-menu.nix
   ./services/desktops/dleyna-renderer.nix
   ./services/desktops/dleyna-server.nix
   ./services/desktops/pantheon/contractor.nix
@@ -265,8 +267,10 @@
   ./services/desktops/pipewire.nix
   ./services/desktops/gnome3/at-spi2-core.nix
   ./services/desktops/gnome3/chrome-gnome-shell.nix
+  ./services/desktops/gnome3/evince.nix
   ./services/desktops/gnome3/evolution-data-server.nix
   ./services/desktops/gnome3/file-roller.nix
+  ./services/desktops/gnome3/glib-networking.nix
   ./services/desktops/gnome3/gnome-disks.nix
   ./services/desktops/gnome3/gnome-documents.nix
   ./services/desktops/gnome3/gnome-keyring.nix
@@ -488,6 +492,7 @@
   ./services/monitoring/prometheus/default.nix
   ./services/monitoring/prometheus/alertmanager.nix
   ./services/monitoring/prometheus/exporters.nix
+  ./services/monitoring/prometheus/pushgateway.nix
   ./services/monitoring/riemann.nix
   ./services/monitoring/riemann-dash.nix
   ./services/monitoring/riemann-tools.nix
@@ -672,6 +677,7 @@
   ./services/networking/tinydns.nix
   ./services/networking/tftpd.nix
   ./services/networking/tox-bootstrapd.nix
+  ./services/networking/tox-node.nix
   ./services/networking/toxvpn.nix
   ./services/networking/tvheadend.nix
   ./services/networking/unbound.nix
@@ -744,10 +750,12 @@
   ./services/web-apps/atlassian/crowd.nix
   ./services/web-apps/atlassian/jira.nix
   ./services/web-apps/codimd.nix
+  ./services/web-apps/documize.nix
   ./services/web-apps/frab.nix
   ./services/web-apps/icingaweb2/icingaweb2.nix
   ./services/web-apps/icingaweb2/module-monitoring.nix
   ./services/web-apps/mattermost.nix
+  ./services/web-apps/miniflux.nix
   ./services/web-apps/nextcloud.nix
   ./services/web-apps/nexus.nix
   ./services/web-apps/pgpkeyserver-lite.nix
diff --git a/nixpkgs/nixos/modules/programs/bash/bash.nix b/nixpkgs/nixos/modules/programs/bash/bash.nix
index d53c6b318f1d..27b5f9e4b642 100644
--- a/nixpkgs/nixos/modules/programs/bash/bash.nix
+++ b/nixpkgs/nixos/modules/programs/bash/bash.nix
@@ -226,9 +226,7 @@ in
 
     environment.shells =
       [ "/run/current-system/sw/bin/bash"
-        "/var/run/current-system/sw/bin/bash"
         "/run/current-system/sw/bin/sh"
-        "/var/run/current-system/sw/bin/sh"
         "${pkgs.bashInteractive}/bin/bash"
         "${pkgs.bashInteractive}/bin/sh"
       ];
diff --git a/nixpkgs/nixos/modules/programs/fish.nix b/nixpkgs/nixos/modules/programs/fish.nix
index bcb5a3f341b5..622d2f96fe41 100644
--- a/nixpkgs/nixos/modules/programs/fish.nix
+++ b/nixpkgs/nixos/modules/programs/fish.nix
@@ -232,7 +232,6 @@ in
 
     environment.shells = [
       "/run/current-system/sw/bin/fish"
-      "/var/run/current-system/sw/bin/fish"
       "${pkgs.fish}/bin/fish"
     ];
 
diff --git a/nixpkgs/nixos/modules/programs/xonsh.nix b/nixpkgs/nixos/modules/programs/xonsh.nix
index f967ca82ac8c..ceab9b5db931 100644
--- a/nixpkgs/nixos/modules/programs/xonsh.nix
+++ b/nixpkgs/nixos/modules/programs/xonsh.nix
@@ -50,7 +50,6 @@ in
 
     environment.shells =
       [ "/run/current-system/sw/bin/xonsh"
-        "/var/run/current-system/sw/bin/xonsh"
         "${pkgs.xonsh}/bin/xonsh"
       ];
 
diff --git a/nixpkgs/nixos/modules/programs/zsh/zsh.nix b/nixpkgs/nixos/modules/programs/zsh/zsh.nix
index deb94922da80..b7117e5f90d7 100644
--- a/nixpkgs/nixos/modules/programs/zsh/zsh.nix
+++ b/nixpkgs/nixos/modules/programs/zsh/zsh.nix
@@ -230,7 +230,6 @@ in
 
     environment.shells =
       [ "/run/current-system/sw/bin/zsh"
-        "/var/run/current-system/sw/bin/zsh"
         "${pkgs.zsh}/bin/zsh"
       ];
 
diff --git a/nixpkgs/nixos/modules/rename.nix b/nixpkgs/nixos/modules/rename.nix
index 325f92308402..f6c112d9cfab 100644
--- a/nixpkgs/nixos/modules/rename.nix
+++ b/nixpkgs/nixos/modules/rename.nix
@@ -45,6 +45,8 @@ with lib;
     (mkRemovedOptionModule [ "services" "neo4j" "port" ] "Use services.neo4j.http.listenAddress instead.")
     (mkRemovedOptionModule [ "services" "neo4j" "boltPort" ] "Use services.neo4j.bolt.listenAddress instead.")
     (mkRemovedOptionModule [ "services" "neo4j" "httpsPort" ] "Use services.neo4j.https.listenAddress instead.")
+    (mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "user" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a user setting.")
+    (mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "group" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a group setting.")
     (mkRenamedOptionModule [ "services" "tor" "relay" "portSpec" ] [ "services" "tor" "relay" "port" ])
     (mkRenamedOptionModule [ "services" "vmwareGuest" ] [ "virtualisation" "vmware" "guest" ])
     (mkRenamedOptionModule [ "jobs" ] [ "systemd" "services" ])
@@ -134,7 +136,7 @@ with lib;
         inetPort = [ "services" "postgrey" "inetPort" ];
       in
         if value inetAddr == null
-        then { path = "/var/run/postgrey.sock"; }
+        then { path = "/run/postgrey.sock"; }
         else { addr = value inetAddr; port = value inetPort; }
     ))
 
diff --git a/nixpkgs/nixos/modules/security/pam.nix b/nixpkgs/nixos/modules/security/pam.nix
index 46ce274a2a9a..89e71c5136e4 100644
--- a/nixpkgs/nixos/modules/security/pam.nix
+++ b/nixpkgs/nixos/modules/security/pam.nix
@@ -410,6 +410,8 @@ let
               "password sufficient ${pam_krb5}/lib/security/pam_krb5.so use_first_pass"}
           ${optionalString config.services.samba.syncPasswordsByPam
               "password optional ${pkgs.samba}/lib/security/pam_smbpass.so nullok use_authtok try_first_pass"}
+          ${optionalString cfg.enableGnomeKeyring
+              "password optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so use_authtok"}
 
           # Session management.
           ${optionalString cfg.setEnvironment ''
diff --git a/nixpkgs/nixos/modules/services/audio/ympd.nix b/nixpkgs/nixos/modules/services/audio/ympd.nix
index d34c1c9d83cc..919b76622510 100644
--- a/nixpkgs/nixos/modules/services/audio/ympd.nix
+++ b/nixpkgs/nixos/modules/services/audio/ympd.nix
@@ -15,7 +15,7 @@ in {
       enable = mkEnableOption "ympd, the MPD Web GUI";
 
       webPort = mkOption {
-        type = types.string;
+        type = types.either types.str types.port; # string for backwards compat
         default = "8080";
         description = "The port where ympd's web interface will be available.";
         example = "ssl://8080:/path/to/ssl-private-key.pem";
@@ -49,7 +49,7 @@ in {
     systemd.services.ympd = {
       description = "Standalone MPD Web GUI written in C";
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.ExecStart = "${pkgs.ympd}/bin/ympd --host ${cfg.mpd.host} --port ${toString cfg.mpd.port} --webport ${cfg.webPort} --user nobody";
+      serviceConfig.ExecStart = "${pkgs.ympd}/bin/ympd --host ${cfg.mpd.host} --port ${toString cfg.mpd.port} --webport ${toString cfg.webPort} --user nobody";
     };
 
   };
diff --git a/nixpkgs/nixos/modules/services/backup/bacula.nix b/nixpkgs/nixos/modules/services/backup/bacula.nix
index 24cad6128260..41bda7893a75 100644
--- a/nixpkgs/nixos/modules/services/backup/bacula.nix
+++ b/nixpkgs/nixos/modules/services/backup/bacula.nix
@@ -15,7 +15,7 @@ let
         Name = "${fd_cfg.name}";
         FDPort = ${toString fd_cfg.port};
         WorkingDirectory = "${libDir}";
-        Pid Directory = "/var/run";
+        Pid Directory = "/run";
         ${fd_cfg.extraClientConfig}
       }
      
@@ -41,7 +41,7 @@ let
         Name = "${sd_cfg.name}";
         SDPort = ${toString sd_cfg.port};
         WorkingDirectory = "${libDir}";
-        Pid Directory = "/var/run";
+        Pid Directory = "/run";
         ${sd_cfg.extraStorageConfig}
       }
  
@@ -77,7 +77,7 @@ let
       Password = "${dir_cfg.password}";
       DirPort = ${toString dir_cfg.port};
       Working Directory = "${libDir}";
-      Pid Directory = "/var/run/";
+      Pid Directory = "/run/";
       QueryFile = "${pkgs.bacula}/etc/query.sql";
       ${dir_cfg.extraDirectorConfig}
     }
diff --git a/nixpkgs/nixos/modules/services/databases/couchdb.nix b/nixpkgs/nixos/modules/services/databases/couchdb.nix
index ca89b1198205..84d108d9c747 100644
--- a/nixpkgs/nixos/modules/services/databases/couchdb.nix
+++ b/nixpkgs/nixos/modules/services/databases/couchdb.nix
@@ -85,7 +85,7 @@ in {
 
       uriFile = mkOption {
         type = types.path;
-        default = "/var/run/couchdb/couchdb.uri";
+        default = "/run/couchdb/couchdb.uri";
         description = ''
           This file contains the full URI that can be used to access this
           instance of CouchDB. It is used to help discover the port CouchDB is
diff --git a/nixpkgs/nixos/modules/services/databases/mongodb.nix b/nixpkgs/nixos/modules/services/databases/mongodb.nix
index 4c46d9228e5f..3fe4af2f2619 100644
--- a/nixpkgs/nixos/modules/services/databases/mongodb.nix
+++ b/nixpkgs/nixos/modules/services/databases/mongodb.nix
@@ -65,7 +65,7 @@ in
       };
 
       pidFile = mkOption {
-        default = "/var/run/mongodb.pid";
+        default = "/run/mongodb.pid";
         description = "Location of MongoDB pid file";
       };
 
diff --git a/nixpkgs/nixos/modules/services/databases/openldap.nix b/nixpkgs/nixos/modules/services/databases/openldap.nix
index bb658918cb0d..c101e7375af9 100644
--- a/nixpkgs/nixos/modules/services/databases/openldap.nix
+++ b/nixpkgs/nixos/modules/services/databases/openldap.nix
@@ -226,8 +226,8 @@ in
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       preStart = ''
-        mkdir -p /var/run/slapd
-        chown -R "${cfg.user}:${cfg.group}" /var/run/slapd
+        mkdir -p /run/slapd
+        chown -R "${cfg.user}:${cfg.group}" /run/slapd
         ${optionalString (cfg.declarativeContents != null) ''
           rm -Rf "${cfg.dataDir}"
         ''}
diff --git a/nixpkgs/nixos/modules/services/databases/redis.nix b/nixpkgs/nixos/modules/services/databases/redis.nix
index cc7b51982d1d..c04cc1283b2e 100644
--- a/nixpkgs/nixos/modules/services/databases/redis.nix
+++ b/nixpkgs/nixos/modules/services/databases/redis.nix
@@ -95,7 +95,7 @@ in
         type = with types; nullOr path;
         default = null;
         description = "The path to the socket to bind to.";
-        example = "/var/run/redis.sock";
+        example = "/run/redis.sock";
       };
 
       logLevel = mkOption {
diff --git a/nixpkgs/nixos/modules/services/databases/rethinkdb.nix b/nixpkgs/nixos/modules/services/databases/rethinkdb.nix
index 789d9c851d64..4828e594b328 100644
--- a/nixpkgs/nixos/modules/services/databases/rethinkdb.nix
+++ b/nixpkgs/nixos/modules/services/databases/rethinkdb.nix
@@ -41,7 +41,7 @@ in
       };
 
       pidpath = mkOption {
-        default = "/var/run/rethinkdb";
+        default = "/run/rethinkdb";
         description = "Location where each instance's pid file is located.";
       };
 
diff --git a/nixpkgs/nixos/modules/services/desktops/deepin/dde-daemon.nix b/nixpkgs/nixos/modules/services/desktops/deepin/dde-daemon.nix
new file mode 100644
index 000000000000..057da4e2d7f2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/deepin/dde-daemon.nix
@@ -0,0 +1,41 @@
+# dde-daemon
+
+{ config, pkgs, lib, ... }:
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.deepin.dde-daemon = {
+
+      enable = lib.mkEnableOption
+        "A daemon for handling Deepin Desktop Environment session settings";
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = lib.mkIf config.services.deepin.dde-daemon.enable {
+
+    environment.systemPackages = [ pkgs.deepin.dde-daemon ];
+
+    services.dbus.packages = [ pkgs.deepin.dde-daemon ];
+
+    systemd.packages = [ pkgs.deepin.dde-daemon ];
+
+    users.groups.dde-daemon = { };
+
+    users.users.dde-daemon = {
+      description = "Deepin daemon user";
+      group = "dde-daemon";
+      isSystemUser = true;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/deepin/deepin-menu.nix b/nixpkgs/nixos/modules/services/desktops/deepin/deepin-menu.nix
new file mode 100644
index 000000000000..23fe5a741c42
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/deepin/deepin-menu.nix
@@ -0,0 +1,29 @@
+# deepin-menu
+
+{ config, pkgs, lib, ... }:
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.deepin.deepin-menu = {
+
+      enable = lib.mkEnableOption
+        "DBus service for unified menus in Deepin Desktop Environment";
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = lib.mkIf config.services.deepin.deepin-menu.enable {
+
+    services.dbus.packages = [ pkgs.deepin.deepin-menu ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome3/evince.nix b/nixpkgs/nixos/modules/services/desktops/gnome3/evince.nix
new file mode 100644
index 000000000000..5f040a16f067
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome3/evince.nix
@@ -0,0 +1,35 @@
+# Evince.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.gnome3.evince = {
+
+      enable = mkEnableOption
+        "systemd and dbus services for Evince, the GNOME document viewer";
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.gnome3.evince.enable {
+
+    environment.systemPackages = [ pkgs.evince ];
+
+    services.dbus.packages = [ pkgs.evince ];
+
+    systemd.packages = [ pkgs.evince ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome3/glib-networking.nix b/nixpkgs/nixos/modules/services/desktops/gnome3/glib-networking.nix
new file mode 100644
index 000000000000..186668d7d385
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome3/glib-networking.nix
@@ -0,0 +1,33 @@
+# GLib Networking
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.gnome3.glib-networking = {
+
+      enable = mkEnableOption "network extensions for GLib";
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.gnome3.glib-networking.enable {
+
+    services.dbus.packages = [ pkgs.gnome3.glib-networking ];
+
+    systemd.packages = [ pkgs.gnome3.glib-networking ];
+
+    environment.variables.GIO_EXTRA_MODULES = [ "${pkgs.gnome3.glib-networking.out}/lib/gio/modules" ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome3/gnome-keyring.nix b/nixpkgs/nixos/modules/services/desktops/gnome3/gnome-keyring.nix
index 5ea4350be5b4..4c350d8bb1c6 100644
--- a/nixpkgs/nixos/modules/services/desktops/gnome3/gnome-keyring.nix
+++ b/nixpkgs/nixos/modules/services/desktops/gnome3/gnome-keyring.nix
@@ -35,6 +35,8 @@ with lib;
 
     services.dbus.packages = [ pkgs.gnome3.gnome-keyring pkgs.gcr ];
 
+    security.pam.services.login.enableGnomeKeyring = true;
+
   };
 
 }
diff --git a/nixpkgs/nixos/modules/services/hardware/triggerhappy.nix b/nixpkgs/nixos/modules/services/hardware/triggerhappy.nix
index 81d4a1ae65bf..bffe7353b10e 100644
--- a/nixpkgs/nixos/modules/services/hardware/triggerhappy.nix
+++ b/nixpkgs/nixos/modules/services/hardware/triggerhappy.nix
@@ -57,6 +57,15 @@ in
         '';
       };
 
+      user = mkOption {
+        type = types.str;
+        default = "nobody";
+        example = "root";
+        description = ''
+          User account under which <command>triggerhappy</command> runs.
+        '';
+      };
+
       bindings = mkOption {
         type = types.listOf (types.submodule bindingCfg);
         default = [];
@@ -96,7 +105,7 @@ in
       after = [ "local-fs.target" ];
       description = "Global hotkey daemon";
       serviceConfig = {
-        ExecStart = "${pkgs.triggerhappy}/bin/thd --user nobody --socket ${socket} --triggers ${configFile} --deviceglob /dev/input/event*";
+        ExecStart = "${pkgs.triggerhappy}/bin/thd ${optionalString (cfg.user != "root") "--user ${cfg.user}"} --socket ${socket} --triggers ${configFile} --deviceglob /dev/input/event*";
       };
     };
 
diff --git a/nixpkgs/nixos/modules/services/mail/pfix-srsd.nix b/nixpkgs/nixos/modules/services/mail/pfix-srsd.nix
index ab5f4c39e8c2..9599854352c9 100644
--- a/nixpkgs/nixos/modules/services/mail/pfix-srsd.nix
+++ b/nixpkgs/nixos/modules/services/mail/pfix-srsd.nix
@@ -48,8 +48,8 @@ with lib;
       requiredBy = [ "postfix.service" ];
       serviceConfig = {
         Type = "forking";
-        PIDFile = "/var/run/pfix-srsd.pid";
-        ExecStart = "${pkgs.pfixtools}/bin/pfix-srsd -p /var/run/pfix-srsd.pid -I ${config.services.pfix-srsd.domain} ${config.services.pfix-srsd.secretsFile}";
+        PIDFile = "/run/pfix-srsd.pid";
+        ExecStart = "${pkgs.pfixtools}/bin/pfix-srsd -p /run/pfix-srsd.pid -I ${config.services.pfix-srsd.domain} ${config.services.pfix-srsd.secretsFile}";
       };
     };
   };
diff --git a/nixpkgs/nixos/modules/services/mail/postgrey.nix b/nixpkgs/nixos/modules/services/mail/postgrey.nix
index 241f75eae279..8e2b9c5dbc56 100644
--- a/nixpkgs/nixos/modules/services/mail/postgrey.nix
+++ b/nixpkgs/nixos/modules/services/mail/postgrey.nix
@@ -29,7 +29,7 @@ with lib; let
     options = {
       path = mkOption {
         type = path;
-        default = "/var/run/postgrey.sock";
+        default = "/run/postgrey.sock";
         description = "Path of the unix socket";
       };
 
@@ -53,7 +53,7 @@ in {
       socket = mkOption {
         type = socket;
         default = {
-          path = "/var/run/postgrey.sock";
+          path = "/run/postgrey.sock";
           mode = "0777";
         };
         example = {
diff --git a/nixpkgs/nixos/modules/services/mail/roundcube.nix b/nixpkgs/nixos/modules/services/mail/roundcube.nix
index 66b1c1e3e6f9..e8b2e11bf726 100644
--- a/nixpkgs/nixos/modules/services/mail/roundcube.nix
+++ b/nixpkgs/nixos/modules/services/mail/roundcube.nix
@@ -141,27 +141,31 @@ in
 
     systemd.services.roundcube-setup = let
       pgSuperUser = config.services.postgresql.superUser;
-    in {
-      requires = [ "postgresql.service" ];
-      after = [ "postgresql.service" ];
-      wantedBy = [ "multi-user.target" ];
-      path = [ config.services.postgresql.package ];
-      script = ''
-        mkdir -p /var/lib/roundcube
-        if [ ! -f /var/lib/roundcube/db-created ]; then
-          if [ "${cfg.database.host}" = "localhost" ]; then
-            ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "create role ${cfg.database.username} with login password '${cfg.database.password}'";
-            ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "create database ${cfg.database.dbname} with owner ${cfg.database.username}";
+    in mkMerge [
+      (mkIf (cfg.database.host == "localhost") {
+        requires = [ "postgresql.service" ];
+        after = [ "postgresql.service" ];
+        path = [ config.services.postgresql.package ];
+      })
+      {
+        wantedBy = [ "multi-user.target" ];
+        script = ''
+          mkdir -p /var/lib/roundcube
+          if [ ! -f /var/lib/roundcube/db-created ]; then
+            if [ "${cfg.database.host}" = "localhost" ]; then
+              ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "create role ${cfg.database.username} with login password '${cfg.database.password}'";
+              ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "create database ${cfg.database.dbname} with owner ${cfg.database.username}";
+            fi
+            PGPASSWORD=${cfg.database.password} ${pkgs.postgresql}/bin/psql -U ${cfg.database.username} \
+              -f ${cfg.package}/SQL/postgres.initial.sql \
+              -h ${cfg.database.host} ${cfg.database.dbname}
+            touch /var/lib/roundcube/db-created
           fi
-          PGPASSWORD=${cfg.database.password} ${pkgs.postgresql}/bin/psql -U ${cfg.database.username} \
-            -f ${cfg.package}/SQL/postgres.initial.sql \
-            -h ${cfg.database.host} ${cfg.database.dbname}
-          touch /var/lib/roundcube/db-created
-        fi
 
-        ${pkgs.php}/bin/php ${cfg.package}/bin/update.sh
-      '';
-      serviceConfig.Type = "oneshot";
-    };
+          ${pkgs.php}/bin/php ${cfg.package}/bin/update.sh
+        '';
+        serviceConfig.Type = "oneshot";
+      }
+    ];
   };
 }
diff --git a/nixpkgs/nixos/modules/services/mail/spamassassin.nix b/nixpkgs/nixos/modules/services/mail/spamassassin.nix
index 0c11ea431368..1fe77ce5a0c7 100644
--- a/nixpkgs/nixos/modules/services/mail/spamassassin.nix
+++ b/nixpkgs/nixos/modules/services/mail/spamassassin.nix
@@ -174,7 +174,7 @@ in
       after = [ "network.target" ];
 
       serviceConfig = {
-        ExecStart = "${pkgs.spamassassin}/bin/spamd ${optionalString cfg.debug "-D"} --username=spamd --groupname=spamd --siteconfigpath=${spamdEnv} --virtual-config-dir=/var/lib/spamassassin/user-%u --allow-tell --pidfile=/var/run/spamd.pid";
+        ExecStart = "${pkgs.spamassassin}/bin/spamd ${optionalString cfg.debug "-D"} --username=spamd --groupname=spamd --siteconfigpath=${spamdEnv} --virtual-config-dir=/var/lib/spamassassin/user-%u --allow-tell --pidfile=/run/spamd.pid";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
       };
 
diff --git a/nixpkgs/nixos/modules/services/misc/matrix-synapse.nix b/nixpkgs/nixos/modules/services/misc/matrix-synapse.nix
index 87999c3614fc..5e465926b832 100644
--- a/nixpkgs/nixos/modules/services/misc/matrix-synapse.nix
+++ b/nixpkgs/nixos/modules/services/misc/matrix-synapse.nix
@@ -30,7 +30,7 @@ ${optionalString (cfg.bind_host != null) ''
 bind_host: "${cfg.bind_host}"
 ''}
 server_name: "${cfg.server_name}"
-pid_file: "/var/run/matrix-synapse.pid"
+pid_file: "/run/matrix-synapse.pid"
 web_client: ${boolToString cfg.web_client}
 ${optionalString (cfg.public_baseurl != null) ''
 public_baseurl: "${cfg.public_baseurl}"
diff --git a/nixpkgs/nixos/modules/services/misc/mbpfan.nix b/nixpkgs/nixos/modules/services/misc/mbpfan.nix
index 50f6f80ad00c..e22d1ed61f99 100644
--- a/nixpkgs/nixos/modules/services/misc/mbpfan.nix
+++ b/nixpkgs/nixos/modules/services/misc/mbpfan.nix
@@ -101,7 +101,7 @@ in {
         Type = "simple";
         ExecStart = "${cfg.package}/bin/mbpfan -f${verbose}";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
-        PIDFile = "/var/run/mbpfan.pid";
+        PIDFile = "/run/mbpfan.pid";
         Restart = "always";
       };
     };
diff --git a/nixpkgs/nixos/modules/services/misc/spice-vdagentd.nix b/nixpkgs/nixos/modules/services/misc/spice-vdagentd.nix
index f322ba4cbd58..2dd9fcf68ab0 100644
--- a/nixpkgs/nixos/modules/services/misc/spice-vdagentd.nix
+++ b/nixpkgs/nixos/modules/services/misc/spice-vdagentd.nix
@@ -19,7 +19,7 @@ in
       description = "spice-vdagent daemon";
       wantedBy = [ "graphical.target" ];
       preStart = ''
-        mkdir -p "/var/run/spice-vdagentd/"
+        mkdir -p "/run/spice-vdagentd/"
       '';
       serviceConfig = {
         Type = "forking";
diff --git a/nixpkgs/nixos/modules/services/misc/svnserve.nix b/nixpkgs/nixos/modules/services/misc/svnserve.nix
index 04a6cd7bfa9b..6292bc52b1e3 100644
--- a/nixpkgs/nixos/modules/services/misc/svnserve.nix
+++ b/nixpkgs/nixos/modules/services/misc/svnserve.nix
@@ -38,7 +38,7 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       preStart = "mkdir -p ${cfg.svnBaseDir}";
-      script = "${pkgs.subversion.out}/bin/svnserve -r ${cfg.svnBaseDir} -d --foreground --pid-file=/var/run/svnserve.pid";
+      script = "${pkgs.subversion.out}/bin/svnserve -r ${cfg.svnBaseDir} -d --foreground --pid-file=/run/svnserve.pid";
     };
   };
 }
diff --git a/nixpkgs/nixos/modules/services/monitoring/nagios.nix b/nixpkgs/nixos/modules/services/monitoring/nagios.nix
index e5496209f827..7f65236ed3d3 100644
--- a/nixpkgs/nixos/modules/services/monitoring/nagios.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/nagios.nix
@@ -24,7 +24,7 @@ let
       status_file=${nagiosState}/status.dat
       object_cache_file=${nagiosState}/objects.cache
       temp_file=${nagiosState}/nagios.tmp
-      lock_file=/var/run/nagios.lock # Not used I think.
+      lock_file=/run/nagios.lock # Not used I think.
       state_retention_file=${nagiosState}/retention.dat
       query_socket=${nagiosState}/nagios.qh
       check_result_path=${nagiosState}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
index 7d790b6b590b..11d85e9c4fc3 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
@@ -40,22 +40,6 @@ in {
         '';
       };
 
-      user = mkOption {
-        type = types.str;
-        default = "nobody";
-        description = ''
-          User name under which Alertmanager shall be run.
-        '';
-      };
-
-      group = mkOption {
-        type = types.str;
-        default = "nogroup";
-        description = ''
-          Group under which Alertmanager shall be run.
-        '';
-      };
-
       configuration = mkOption {
         type = types.nullOr types.attrs;
         default = null;
@@ -151,17 +135,13 @@ in {
       systemd.services.alertmanager = {
         wantedBy = [ "multi-user.target" ];
         after    = [ "network.target" ];
-        script = ''
-          ${cfg.package}/bin/alertmanager \
-            ${concatStringsSep " \\\n  " cmdlineArgs}
-        '';
-
         serviceConfig = {
-          User = cfg.user;
-          Group = cfg.group;
           Restart  = "always";
-          PrivateTmp = true;
+          DynamicUser = true;
           WorkingDirectory = "/tmp";
+          ExecStart = "${cfg.package}/bin/alertmanager" +
+            optionalString (length cmdlineArgs != 0) (" \\\n  " +
+              concatStringsSep " \\\n  " cmdlineArgs);
           ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
         };
       };
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix
index cc703573d8cd..e7ac12c07d33 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix
@@ -4,9 +4,24 @@ with lib;
 
 let
   cfg = config.services.prometheus;
+  cfg2 = config.services.prometheus2;
   promUser = "prometheus";
   promGroup = "prometheus";
 
+  stateDir =
+    if cfg.stateDir != null
+    then cfg.stateDir
+    else
+      if cfg.dataDir != null
+      then
+        # This assumes /var/lib/ is a prefix of cfg.dataDir.
+        # This is checked as an assertion below.
+        removePrefix stateDirBase cfg.dataDir
+      else "prometheus";
+  stateDirBase = "/var/lib/";
+  workingDir  = stateDirBase + stateDir;
+  workingDir2 = stateDirBase + cfg2.stateDir;
+
   # Get a submodule without any embedded metadata:
   _filter = x: filterAttrs (k: v: k != "_module") x;
 
@@ -17,38 +32,96 @@ let
     promtool ${what} $out
   '';
 
+  # a wrapper that verifies that the configuration is valid for
+  # prometheus 2
+  prom2toolCheck = what: name: file:
+    pkgs.runCommand
+      "${name}-${replaceStrings [" "] [""] what}-checked"
+      { buildInputs = [ cfg2.package ]; } ''
+    ln -s ${file} $out
+    promtool ${what} $out
+  '';
+
   # Pretty-print JSON to a file
   writePrettyJSON = name: x:
     pkgs.runCommand name { preferLocalBuild = true; } ''
       echo '${builtins.toJSON x}' | ${pkgs.jq}/bin/jq . > $out
     '';
 
-  # This becomes the main config file
+  # This becomes the main config file for Prometheus 1
   promConfig = {
     global = cfg.globalConfig;
     rule_files = map (promtoolCheck "check-rules" "rules") (cfg.ruleFiles ++ [
       (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
     ]);
-    scrape_configs = cfg.scrapeConfigs;
+    scrape_configs = filterEmpty cfg.scrapeConfigs;
   };
 
   generatedPrometheusYml = writePrettyJSON "prometheus.yml" promConfig;
 
   prometheusYml = let
-    yml =  if cfg.configText != null then
+    yml = if cfg.configText != null then
       pkgs.writeText "prometheus.yml" cfg.configText
       else generatedPrometheusYml;
     in promtoolCheck "check-config" "prometheus.yml" yml;
 
   cmdlineArgs = cfg.extraFlags ++ [
-    "-storage.local.path=${cfg.dataDir}/metrics"
+    "-storage.local.path=${workingDir}/metrics"
     "-config.file=${prometheusYml}"
     "-web.listen-address=${cfg.listenAddress}"
     "-alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
     "-alertmanager.timeout=${toString cfg.alertmanagerTimeout}s"
-    (optionalString (cfg.alertmanagerURL != []) "-alertmanager.url=${concatStringsSep "," cfg.alertmanagerURL}")
-    (optionalString (cfg.webExternalUrl != null) "-web.external-url=${cfg.webExternalUrl}")
-  ];
+  ] ++
+  optional (cfg.alertmanagerURL != []) "-alertmanager.url=${concatStringsSep "," cfg.alertmanagerURL}" ++
+  optional (cfg.webExternalUrl != null) "-web.external-url=${cfg.webExternalUrl}";
+
+  # This becomes the main config file for Prometheus 2
+  promConfig2 = {
+    global = cfg2.globalConfig;
+    rule_files = map (prom2toolCheck "check rules" "rules") (cfg2.ruleFiles ++ [
+      (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg2.rules))
+    ]);
+    scrape_configs = filterEmpty cfg2.scrapeConfigs;
+    alerting = optionalAttrs (cfg2.alertmanagerURL != []) {
+      alertmanagers = [{
+        static_configs = [{
+          targets = cfg2.alertmanagerURL;
+        }];
+      }];
+    };
+  };
+
+  generatedPrometheus2Yml = writePrettyJSON "prometheus.yml" promConfig2;
+
+  prometheus2Yml = let
+    yml = if cfg2.configText != null then
+      pkgs.writeText "prometheus.yml" cfg2.configText
+      else generatedPrometheus2Yml;
+    in prom2toolCheck "check config" "prometheus.yml" yml;
+
+  cmdlineArgs2 = cfg2.extraFlags ++ [
+    "--storage.tsdb.path=${workingDir2}/data/"
+    "--config.file=${prometheus2Yml}"
+    "--web.listen-address=${cfg2.listenAddress}"
+    "--alertmanager.notification-queue-capacity=${toString cfg2.alertmanagerNotificationQueueCapacity}"
+    "--alertmanager.timeout=${toString cfg2.alertmanagerTimeout}s"
+  ] ++
+  optional (cfg2.webExternalUrl != null) "--web.external-url=${cfg2.webExternalUrl}";
+
+  filterEmpty = filterAttrsListRecursive (_n: v: !(v == null || v == [] || v == {}));
+  filterAttrsListRecursive = pred: x:
+    if isAttrs x then
+      listToAttrs (
+        concatMap (name:
+          let v = x.${name}; in
+          if pred name v then [
+            (nameValuePair name (filterAttrsListRecursive pred v))
+          ] else []
+        ) (attrNames x)
+      )
+    else if isList x then
+      map (filterAttrsListRecursive pred) x
+    else x;
 
   promTypes.globalConfig = types.submodule {
     options = {
@@ -179,6 +252,14 @@ let
           Optional http login credentials for metrics scraping.
         '';
       };
+      tls_config = mkOption {
+        type = types.nullOr promTypes.tls_config;
+        default = null;
+        apply = x: mapNullable _filter x;
+        description = ''
+          Configures the scrape request's TLS settings.
+        '';
+      };
       dns_sd_configs = mkOption {
         type = types.listOf promTypes.dns_sd_config;
         default = [];
@@ -211,6 +292,14 @@ let
           List of labeled target groups for this job.
         '';
       };
+      ec2_sd_configs = mkOption {
+        type = types.listOf promTypes.ec2_sd_config;
+        default = [];
+        apply = x: map _filter x;
+        description = ''
+          List of EC2 service discovery configurations.
+        '';
+      };
       relabel_configs = mkOption {
         type = types.listOf promTypes.relabel_config;
         default = [];
@@ -240,6 +329,96 @@ let
     };
   };
 
+  promTypes.ec2_sd_config = types.submodule {
+    options = {
+      region = mkOption {
+        type = types.str;
+        description = ''
+          The AWS Region.
+        '';
+      };
+      endpoint = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Custom endpoint to be used.
+        '';
+      };
+      access_key = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          The AWS API key id. If blank, the environment variable
+          <literal>AWS_ACCESS_KEY_ID</literal> is used.
+        '';
+      };
+      secret_key = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          The AWS API key secret. If blank, the environment variable
+           <literal>AWS_SECRET_ACCESS_KEY</literal> is used.
+        '';
+      };
+      profile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Named AWS profile used to connect to the API.
+        '';
+      };
+      role_arn = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          AWS Role ARN, an alternative to using AWS API keys.
+        '';
+      };
+      refresh_interval = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Refresh interval to re-read the instance list.
+        '';
+      };
+      port = mkOption {
+        type = types.int;
+        default = 80;
+        description = ''
+          The port to scrape metrics from. If using the public IP
+          address, this must instead be specified in the relabeling
+          rule.
+        '';
+      };
+      filters = mkOption {
+        type = types.nullOr (types.listOf promTypes.filter);
+        default = null;
+        description = ''
+          Filters can be used optionally to filter the instance list by other criteria.
+        '';
+      };
+    };
+  };
+
+  promTypes.filter = types.submodule {
+    options = {
+      name = mkOption {
+        type = types.str;
+        description = ''
+          See <link xlink:href="https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html">this list</link>
+          for the available filters.
+        '';
+      };
+      value = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Value of the filter.
+        '';
+      };
+    };
+  };
+
   promTypes.dns_sd_config = types.submodule {
     options = {
       names = mkOption {
@@ -373,6 +552,47 @@ let
     };
   };
 
+  promTypes.tls_config = types.submodule {
+    options = {
+      ca_file = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          CA certificate to validate API server certificate with.
+        '';
+      };
+      cert_file = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Certificate file for client cert authentication to the server.
+        '';
+      };
+      key_file = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Key file for client cert authentication to the server.
+        '';
+      };
+      server_name = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          ServerName extension to indicate the name of the server.
+          http://tools.ietf.org/html/rfc4366#section-3.1
+        '';
+      };
+      insecure_skip_verify = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Disable validation of the server certificate.
+        '';
+      };
+    };
+  };
+
 in {
   options = {
     services.prometheus = {
@@ -403,10 +623,21 @@ in {
       };
 
       dataDir = mkOption {
-        type = types.path;
-        default = "/var/lib/prometheus";
+        type = types.nullOr types.path;
+        default = null;
         description = ''
           Directory to store Prometheus metrics data.
+          This option is deprecated, please use <option>services.prometheus.stateDir</option>.
+        '';
+      };
+
+      stateDir = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Directory below <literal>${stateDirBase}</literal> to store Prometheus metrics data.
+          This directory will be created automatically using systemd's StateDirectory mechanism.
+          Defaults to <literal>prometheus</literal>.
         '';
       };
 
@@ -497,30 +728,201 @@ in {
         '';
       };
     };
-  };
+    services.prometheus2 = {
 
-  config = mkIf cfg.enable {
-    users.groups.${promGroup}.gid = config.ids.gids.prometheus;
-    users.users.${promUser} = {
-      description = "Prometheus daemon user";
-      uid = config.ids.uids.prometheus;
-      group = promGroup;
-      home = cfg.dataDir;
-      createHome = true;
-    };
-    systemd.services.prometheus = {
-      wantedBy = [ "multi-user.target" ];
-      after    = [ "network.target" ];
-      script = ''
-        #!/bin/sh
-        exec ${cfg.package}/bin/prometheus \
-          ${concatStringsSep " \\\n  " cmdlineArgs}
-      '';
-      serviceConfig = {
-        User = promUser;
-        Restart  = "always";
-        WorkingDirectory = cfg.dataDir;
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable the Prometheus 2 monitoring daemon.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.prometheus_2;
+        defaultText = "pkgs.prometheus_2";
+        description = ''
+          The prometheus2 package that should be used.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0:9090";
+        description = ''
+          Address to listen on for the web interface, API, and telemetry.
+        '';
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "prometheus2";
+        description = ''
+          Directory below <literal>${stateDirBase}</literal> to store Prometheus metrics data.
+          This directory will be created automatically using systemd's StateDirectory mechanism.
+          Defaults to <literal>prometheus2</literal>.
+        '';
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Extra commandline options when launching Prometheus 2.
+        '';
+      };
+
+      configText = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = ''
+          If non-null, this option defines the text that is written to
+          prometheus.yml. If null, the contents of prometheus.yml is generated
+          from the structured config options.
+        '';
+      };
+
+      globalConfig = mkOption {
+        type = promTypes.globalConfig;
+        default = {};
+        apply = _filter;
+        description = ''
+          Parameters that are valid in all  configuration contexts. They
+          also serve as defaults for other configuration sections
+        '';
+      };
+
+      rules = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Alerting and/or Recording rules to evaluate at runtime.
+        '';
+      };
+
+      ruleFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = ''
+          Any additional rules files to include in this configuration.
+        '';
+      };
+
+      scrapeConfigs = mkOption {
+        type = types.listOf promTypes.scrape_config;
+        default = [];
+        apply = x: map _filter x;
+        description = ''
+          A list of scrape configurations.
+        '';
+      };
+
+      alertmanagerURL = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          List of Alertmanager URLs to send notifications to.
+        '';
+      };
+
+      alertmanagerNotificationQueueCapacity = mkOption {
+        type = types.int;
+        default = 10000;
+        description = ''
+          The capacity of the queue for pending alert manager notifications.
+        '';
+      };
+
+      alertmanagerTimeout = mkOption {
+        type = types.int;
+        default = 10;
+        description = ''
+          Alert manager HTTP API timeout (in seconds).
+        '';
+      };
+
+      webExternalUrl = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "https://example.com/";
+        description = ''
+          The URL under which Prometheus is externally reachable (for example,
+          if Prometheus is served via a reverse proxy).
+        '';
       };
     };
-  };
+   };
+
+  config = mkMerge [
+    (mkIf (cfg.enable || cfg2.enable) {
+      users.groups.${promGroup}.gid = config.ids.gids.prometheus;
+      users.users.${promUser} = {
+        description = "Prometheus daemon user";
+        uid = config.ids.uids.prometheus;
+        group = promGroup;
+      };
+    })
+    (mkIf cfg.enable {
+      warnings =
+        optional (cfg.dataDir != null) ''
+          The option services.prometheus.dataDir is deprecated, please use
+          services.prometheus.stateDir.
+        '';
+      assertions = [
+        {
+          assertion = !(cfg.dataDir != null && cfg.stateDir != null);
+          message =
+            "The options services.prometheus.dataDir and services.prometheus.stateDir" +
+            " can't both be set at the same time! It's recommended to only set the latter" +
+            " since the former is deprecated.";
+        }
+        {
+          assertion = cfg.dataDir != null -> hasPrefix stateDirBase cfg.dataDir;
+          message =
+            "The option services.prometheus.dataDir should have ${stateDirBase} as a prefix!";
+        }
+        {
+          assertion = cfg.stateDir != null -> !hasPrefix "/" cfg.stateDir;
+          message =
+            "The option services.prometheus.stateDir shouldn't be an absolute directory." +
+            " It should be a directory relative to ${stateDirBase}.";
+        }
+        {
+          assertion = cfg2.stateDir != null -> !hasPrefix "/" cfg2.stateDir;
+          message =
+            "The option services.prometheus2.stateDir shouldn't be an absolute directory." +
+            " It should be a directory relative to ${stateDirBase}.";
+        }
+      ];
+      systemd.services.prometheus = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network.target" ];
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/prometheus" +
+            optionalString (length cmdlineArgs != 0) (" \\\n  " +
+              concatStringsSep " \\\n  " cmdlineArgs);
+          User = promUser;
+          Restart  = "always";
+          WorkingDirectory = workingDir;
+          StateDirectory = stateDir;
+        };
+      };
+    })
+    (mkIf cfg2.enable {
+      systemd.services.prometheus2 = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network.target" ];
+        serviceConfig = {
+          ExecStart = "${cfg2.package}/bin/prometheus" +
+            optionalString (length cmdlineArgs2 != 0) (" \\\n  " +
+              concatStringsSep " \\\n  " cmdlineArgs2);
+          User = promUser;
+          Restart  = "always";
+          WorkingDirectory = workingDir2;
+          StateDirectory = cfg2.stateDir;
+        };
+      };
+    })
+  ];
 }
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix
new file mode 100644
index 000000000000..f8fcc3eb97ef
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix
@@ -0,0 +1,166 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.pushgateway;
+
+  cmdlineArgs =
+       opt "web.listen-address" cfg.web.listen-address
+    ++ opt "web.telemetry-path" cfg.web.telemetry-path
+    ++ opt "web.external-url" cfg.web.external-url
+    ++ opt "web.route-prefix" cfg.web.route-prefix
+    ++ optional cfg.persistMetrics ''--persistence.file="/var/lib/${cfg.stateDir}/metrics"''
+    ++ opt "persistence.interval" cfg.persistence.interval
+    ++ opt "log.level" cfg.log.level
+    ++ opt "log.format" cfg.log.format
+    ++ cfg.extraFlags;
+
+  opt = k : v : optional (v != null) ''--${k}="${v}"'';
+
+in {
+  options = {
+    services.prometheus.pushgateway = {
+      enable = mkEnableOption "Prometheus Pushgateway";
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.prometheus-pushgateway;
+        defaultText = "pkgs.prometheus-pushgateway";
+        description = ''
+          Package that should be used for the prometheus pushgateway.
+        '';
+      };
+
+      web.listen-address = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Address to listen on for the web interface, API and telemetry.
+
+          <literal>null</literal> will default to <literal>:9091</literal>.
+        '';
+      };
+
+      web.telemetry-path = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Path under which to expose metrics.
+
+          <literal>null</literal> will default to <literal>/metrics</literal>.
+        '';
+      };
+
+      web.external-url = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          The URL under which Pushgateway is externally reachable.
+        '';
+      };
+
+      web.route-prefix = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Prefix for the internal routes of web endpoints.
+
+          Defaults to the path of
+          <option>services.prometheus.pushgateway.web.external-url</option>.
+        '';
+      };
+
+      persistence.interval = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "10m";
+        description = ''
+          The minimum interval at which to write out the persistence file.
+
+          <literal>null</literal> will default to <literal>5m</literal>.
+        '';
+      };
+
+      log.level = mkOption {
+        type = types.nullOr (types.enum ["debug" "info" "warn" "error" "fatal"]);
+        default = null;
+        description = ''
+          Only log messages with the given severity or above.
+
+          <literal>null</literal> will default to <literal>info</literal>.
+        '';
+      };
+
+      log.format = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "logger:syslog?appname=bob&local=7";
+        description = ''
+          Set the log target and format.
+
+          <literal>null</literal> will default to <literal>logger:stderr</literal>.
+        '';
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Extra commandline options when launching the Pushgateway.
+        '';
+      };
+
+      persistMetrics = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to persist metrics to a file.
+
+          When enabled metrics will be saved to a file called
+          <literal>metrics</literal> in the directory
+          <literal>/var/lib/pushgateway</literal>. The directory below
+          <literal>/var/lib</literal> can be set using
+          <option>services.prometheus.pushgateway.stateDir</option>.
+        '';
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "pushgateway";
+        description = ''
+          Directory below <literal>/var/lib</literal> to store metrics.
+
+          This directory will be created automatically using systemd's
+          StateDirectory mechanism when
+          <option>services.prometheus.pushgateway.persistMetrics</option>
+          is enabled.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !hasPrefix "/" cfg.stateDir;
+        message =
+          "The option services.prometheus.pushgateway.stateDir" +
+          " shouldn't be an absolute directory." +
+          " It should be a directory relative to /var/lib.";
+      }
+    ];
+    systemd.services.pushgateway = {
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" ];
+      serviceConfig = {
+        Restart  = "always";
+        DynamicUser = true;
+        ExecStart = "${cfg.package}/bin/pushgateway" +
+          optionalString (length cmdlineArgs != 0) (" \\\n  " +
+            concatStringsSep " \\\n  " cmdlineArgs);
+        StateDirectory = if cfg.persistMetrics then cfg.stateDir else null;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix
index 426cf9bf86ef..0519e7c2ad6a 100644
--- a/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix
@@ -9,7 +9,7 @@ let
 
   zabbix = cfg.package;
 
-  stateDir = "/var/run/zabbix";
+  stateDir = "/run/zabbix";
 
   logDir = "/var/log/zabbix";
 
diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix
index 5f9fc12832fc..fdeab6af4417 100644
--- a/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix
@@ -7,7 +7,7 @@ let
 
   cfg = config.services.zabbixServer;
 
-  stateDir = "/var/run/zabbix";
+  stateDir = "/run/zabbix";
 
   logDir = "/var/log/zabbix";
 
diff --git a/nixpkgs/nixos/modules/services/networking/asterisk.nix b/nixpkgs/nixos/modules/services/networking/asterisk.nix
index b8ec2b25a227..03a2544b9a7e 100644
--- a/nixpkgs/nixos/modules/services/networking/asterisk.nix
+++ b/nixpkgs/nixos/modules/services/networking/asterisk.nix
@@ -45,7 +45,7 @@ let
       astdatadir => /var/lib/asterisk
       astagidir => /var/lib/asterisk/agi-bin
       astspooldir => /var/spool/asterisk
-      astrundir => /var/run/asterisk
+      astrundir => /run/asterisk
       astlogdir => /var/log/asterisk
       astsbindir => ${cfg.package}/sbin
     '';
@@ -257,7 +257,7 @@ in
         ExecReload = ''${cfg.package}/bin/asterisk -x "core reload"
           '';
         Type = "forking";
-        PIDFile = "/var/run/asterisk/asterisk.pid";
+        PIDFile = "/run/asterisk/asterisk.pid";
       };
     };
   };
diff --git a/nixpkgs/nixos/modules/services/networking/avahi-daemon.nix b/nixpkgs/nixos/modules/services/networking/avahi-daemon.nix
index 488d9877b5e0..4c91a0c415b6 100644
--- a/nixpkgs/nixos/modules/services/networking/avahi-daemon.nix
+++ b/nixpkgs/nixos/modules/services/networking/avahi-daemon.nix
@@ -214,7 +214,7 @@ in
 
     systemd.sockets.avahi-daemon =
       { description = "Avahi mDNS/DNS-SD Stack Activation Socket";
-        listenStreams = [ "/var/run/avahi-daemon/socket" ];
+        listenStreams = [ "/run/avahi-daemon/socket" ];
         wantedBy = [ "sockets.target" ];
       };
 
@@ -229,7 +229,7 @@ in
 
         path = [ pkgs.coreutils pkgs.avahi ];
 
-        preStart = "mkdir -p /var/run/avahi-daemon";
+        preStart = "mkdir -p /run/avahi-daemon";
 
         script =
           ''
diff --git a/nixpkgs/nixos/modules/services/networking/bind.nix b/nixpkgs/nixos/modules/services/networking/bind.nix
index abcd1ef6ff5d..98486cefd528 100644
--- a/nixpkgs/nixos/modules/services/networking/bind.nix
+++ b/nixpkgs/nixos/modules/services/networking/bind.nix
@@ -25,8 +25,8 @@ let
         blackhole { badnetworks; };
         forward first;
         forwarders { ${concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
-        directory "/var/run/named";
-        pid-file "/var/run/named/named.pid";
+        directory "/run/named";
+        pid-file "/run/named/named.pid";
         ${cfg.extraOptions}
       };
 
@@ -187,8 +187,8 @@ in
           ${pkgs.bind.out}/sbin/rndc-confgen -r /dev/urandom -c /etc/bind/rndc.key -u ${bindUser} -a -A hmac-sha256 2>/dev/null
         fi
 
-        ${pkgs.coreutils}/bin/mkdir -p /var/run/named
-        chown ${bindUser} /var/run/named
+        ${pkgs.coreutils}/bin/mkdir -p /run/named
+        chown ${bindUser} /run/named
       '';
 
       serviceConfig = {
diff --git a/nixpkgs/nixos/modules/services/networking/hostapd.nix b/nixpkgs/nixos/modules/services/networking/hostapd.nix
index 9f74e4963296..3fbc08e90607 100644
--- a/nixpkgs/nixos/modules/services/networking/hostapd.nix
+++ b/nixpkgs/nixos/modules/services/networking/hostapd.nix
@@ -25,7 +25,7 @@ let
     logger_stdout=-1
     logger_stdout_level=2
 
-    ctrl_interface=/var/run/hostapd
+    ctrl_interface=/run/hostapd
     ctrl_interface_group=${cfg.group}
 
     ${if cfg.wpa then ''
diff --git a/nixpkgs/nixos/modules/services/networking/htpdate.nix b/nixpkgs/nixos/modules/services/networking/htpdate.nix
index f5d512c7cd5a..6954e5b060c4 100644
--- a/nixpkgs/nixos/modules/services/networking/htpdate.nix
+++ b/nixpkgs/nixos/modules/services/networking/htpdate.nix
@@ -62,7 +62,7 @@ in
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         Type = "forking";
-        PIDFile = "/var/run/htpdate.pid";
+        PIDFile = "/run/htpdate.pid";
         ExecStart = concatStringsSep " " [
           "${htpdate}/bin/htpdate"
           "-D -u nobody"
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/default.nix b/nixpkgs/nixos/modules/services/networking/hylafax/default.nix
index 4c63b822d165..d8ffa3fc04d2 100644
--- a/nixpkgs/nixos/modules/services/networking/hylafax/default.nix
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/default.nix
@@ -26,4 +26,6 @@
     }];
   };
 
+  meta.maintainers = [ lib.maintainers.yarny ];
+
 }
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/faxq-default.nix b/nixpkgs/nixos/modules/services/networking/hylafax/faxq-default.nix
index a2630ce66b71..9b634650cf79 100644
--- a/nixpkgs/nixos/modules/services/networking/hylafax/faxq-default.nix
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/faxq-default.nix
@@ -4,7 +4,7 @@
 
 {
 
-  ModemGroup = [ ''"any:.*"'' ];
+  ModemGroup = [ ''"any:0:.*"'' ];
   ServerTracing = "0x78701";
   SessionTracing = "0x78701";
   UUCPLockDir = "/var/lock";
diff --git a/nixpkgs/nixos/modules/services/networking/iodine.nix b/nixpkgs/nixos/modules/services/networking/iodine.nix
index 58ad0df4ff20..344f84374bbd 100644
--- a/nixpkgs/nixos/modules/services/networking/iodine.nix
+++ b/nixpkgs/nixos/modules/services/networking/iodine.nix
@@ -63,7 +63,7 @@ in
             passwordFile = mkOption {
               type = types.str;
               default = "";
-              description = "File that containts password";
+              description = "File that contains password";
             };
           };
         }));
@@ -100,7 +100,7 @@ in
         passwordFile = mkOption {
           type = types.str;
           default = "";
-          description = "File that containts password";
+          description = "File that contains password";
         };
       };
 
@@ -120,7 +120,7 @@ in
         description = "iodine client - ${name}";
         after = [ "network.target" ];
         wantedBy = [ "multi-user.target" ];
-        script = "${pkgs.iodine}/bin/iodine -f -u ${iodinedUser} ${cfg.extraConfig} ${optionalString (cfg.passwordFile != "") "-P $(cat \"${cfg.passwordFile}\")"} ${cfg.relay} ${cfg.server}";
+        script = "exec ${pkgs.iodine}/bin/iodine -f -u ${iodinedUser} ${cfg.extraConfig} ${optionalString (cfg.passwordFile != "") "< \"${cfg.passwordFile}\""} ${cfg.relay} ${cfg.server}";
         serviceConfig = {
           RestartSec = "30s";
           Restart = "always";
@@ -136,7 +136,7 @@ in
         description = "iodine, ip over dns server daemon";
         after = [ "network.target" ];
         wantedBy = [ "multi-user.target" ];
-        script = "${pkgs.iodine}/bin/iodined -f -u ${iodinedUser} ${cfg.server.extraConfig} ${optionalString (cfg.server.passwordFile != "") "-P $(cat \"${cfg.server.passwordFile}\")"} ${cfg.server.ip} ${cfg.server.domain}";
+        script = "exec ${pkgs.iodine}/bin/iodined -f -u ${iodinedUser} ${cfg.server.extraConfig} ${optionalString (cfg.server.passwordFile != "") "< \"${cfg.server.passwordFile}\""} ${cfg.server.ip} ${cfg.server.domain}";
       };
     };
 
diff --git a/nixpkgs/nixos/modules/services/networking/ircd-hybrid/ircd.conf b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/ircd.conf
index bb22832dbdb2..17ef203840af 100644
--- a/nixpkgs/nixos/modules/services/networking/ircd-hybrid/ircd.conf
+++ b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/ircd.conf
@@ -987,7 +987,7 @@ general {
 	 * egdpool_path: path to EGD pool. Not necessary for OpenSSL >= 0.9.7
 	 * which automatically finds the path.
 	 */
-#	egdpool_path = "/var/run/egd-pool";
+#	egdpool_path = "/run/egd-pool";
 
 
 	/*
diff --git a/nixpkgs/nixos/modules/services/networking/lldpd.nix b/nixpkgs/nixos/modules/services/networking/lldpd.nix
index dec30cc92f6a..d5de9c45d84b 100644
--- a/nixpkgs/nixos/modules/services/networking/lldpd.nix
+++ b/nixpkgs/nixos/modules/services/networking/lldpd.nix
@@ -23,7 +23,7 @@ in
     users.users._lldpd = {
       description = "lldpd user";
       group = "_lldpd";
-      home = "/var/run/lldpd";
+      home = "/run/lldpd";
       isSystemUser = true;
     };
     users.groups._lldpd = {};
diff --git a/nixpkgs/nixos/modules/services/networking/miniupnpd.nix b/nixpkgs/nixos/modules/services/networking/miniupnpd.nix
index ab714a6ac75e..c095d9948546 100644
--- a/nixpkgs/nixos/modules/services/networking/miniupnpd.nix
+++ b/nixpkgs/nixos/modules/services/networking/miniupnpd.nix
@@ -71,7 +71,7 @@ in
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         ExecStart = "${pkgs.miniupnpd}/bin/miniupnpd -f ${configFile}";
-        PIDFile = "/var/run/miniupnpd.pid";
+        PIDFile = "/run/miniupnpd.pid";
         Type = "forking";
       };
     };
diff --git a/nixpkgs/nixos/modules/services/networking/networkmanager.nix b/nixpkgs/nixos/modules/services/networking/networkmanager.nix
index d372dfd8f412..5e5544471818 100644
--- a/nixpkgs/nixos/modules/services/networking/networkmanager.nix
+++ b/nixpkgs/nixos/modules/services/networking/networkmanager.nix
@@ -466,7 +466,7 @@ in {
 
     systemd.packages = cfg.packages;
 
-    systemd.services."network-manager" = {
+    systemd.services."NetworkManager" = {
       wantedBy = [ "network.target" ];
       restartTriggers = [ configFile ];
 
@@ -478,9 +478,9 @@ in {
     };
 
     systemd.services.nm-setup-hostsdirs = mkIf dynamicHostsEnabled {
-      wantedBy = [ "network-manager.service" ];
-      before = [ "network-manager.service" ];
-      partOf = [ "network-manager.service" ];
+      wantedBy = [ "NetworkManager.service" ];
+      before = [ "NetworkManager.service" ];
+      partOf = [ "NetworkManager.service" ];
       script = concatStrings (mapAttrsToList (n: d: ''
         mkdir -p "/run/NetworkManager/hostsdirs/${n}"
         chown "${d.user}:${d.group}" "/run/NetworkManager/hostsdirs/${n}"
diff --git a/nixpkgs/nixos/modules/services/networking/ocserv.nix b/nixpkgs/nixos/modules/services/networking/ocserv.nix
index 61473a9fabf9..dc26ffeafeef 100644
--- a/nixpkgs/nixos/modules/services/networking/ocserv.nix
+++ b/nixpkgs/nixos/modules/services/networking/ocserv.nix
@@ -31,7 +31,7 @@ in
         udp-port = 443
         run-as-user = nobody
         run-as-group = nogroup
-        socket-file = /var/run/ocserv-socket
+        socket-file = /run/ocserv-socket
         server-cert = certs/server-cert.pem
         server-key = certs/server-key.pem
         keepalive = 32400
@@ -50,7 +50,7 @@ in
         rekey-time = 172800
         rekey-method = ssl
         use-occtl = true
-        pid-file = /var/run/ocserv.pid
+        pid-file = /run/ocserv.pid
         device = vpns
         predictable-ips = true
         default-domain = example.com
@@ -90,8 +90,8 @@ in
 
       serviceConfig = {
         PrivateTmp = true;
-        PIDFile = "/var/run/ocserv.pid";
-        ExecStart = "${pkgs.ocserv}/bin/ocserv --foreground --pid-file /var/run/ocesrv.pid --config /etc/ocserv/ocserv.conf";
+        PIDFile = "/run/ocserv.pid";
+        ExecStart = "${pkgs.ocserv}/bin/ocserv --foreground --pid-file /run/ocesrv.pid --config /etc/ocserv/ocserv.conf";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
       };
     };
diff --git a/nixpkgs/nixos/modules/services/networking/racoon.nix b/nixpkgs/nixos/modules/services/networking/racoon.nix
index 86e13d1ea0d6..328f4cb1497f 100644
--- a/nixpkgs/nixos/modules/services/networking/racoon.nix
+++ b/nixpkgs/nixos/modules/services/networking/racoon.nix
@@ -32,12 +32,12 @@ in {
           else cfg.configPath
         }";
         ExecReload = "${pkgs.ipsecTools}/bin/racoonctl reload-config";
-        PIDFile = "/var/run/racoon.pid";
+        PIDFile = "/run/racoon.pid";
         Type = "forking";
         Restart = "always";
       };
       preStart = ''
-        rm /var/run/racoon.pid || true
+        rm /run/racoon.pid || true
         mkdir -p /var/racoon
       '';
     };
diff --git a/nixpkgs/nixos/modules/services/networking/ssh/sshd.nix b/nixpkgs/nixos/modules/services/networking/ssh/sshd.nix
index b9b5d40c4574..cbb305cd3825 100644
--- a/nixpkgs/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixpkgs/nixos/modules/services/networking/ssh/sshd.nix
@@ -431,8 +431,6 @@ in
 
     services.openssh.extraConfig = mkOrder 0
       ''
-        Protocol 2
-
         UsePAM yes
 
         AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
diff --git a/nixpkgs/nixos/modules/services/networking/strongswan.nix b/nixpkgs/nixos/modules/services/networking/strongswan.nix
index 707d24b9220f..41b69039ba7a 100644
--- a/nixpkgs/nixos/modules/services/networking/strongswan.nix
+++ b/nixpkgs/nixos/modules/services/networking/strongswan.nix
@@ -54,7 +54,7 @@ in
     enable = mkEnableOption "strongSwan";
 
     secrets = mkOption {
-      type = types.listOf types.path;
+      type = types.listOf types.str;
       default = [];
       example = [ "/run/keys/ipsec-foo.secret" ];
       description = ''
diff --git a/nixpkgs/nixos/modules/services/networking/supplicant.nix b/nixpkgs/nixos/modules/services/networking/supplicant.nix
index 3c4321ab9e9d..35c1e649e2e1 100644
--- a/nixpkgs/nixos/modules/services/networking/supplicant.nix
+++ b/nixpkgs/nixos/modules/services/networking/supplicant.nix
@@ -132,7 +132,7 @@ in
           extraCmdArgs = mkOption {
             type = types.str;
             default = "";
-            example = "-e/var/run/wpa_supplicant/entropy.bin";
+            example = "-e/run/wpa_supplicant/entropy.bin";
             description =
               "Command line arguments to add when executing <literal>wpa_supplicant</literal>.";
           };
@@ -164,7 +164,7 @@ in
   
             socketDir = mkOption {
               type = types.str;
-              default = "/var/run/wpa_supplicant";
+              default = "/run/wpa_supplicant";
               description = "Directory of sockets for controlling wpa_supplicant.";
             };
   
diff --git a/nixpkgs/nixos/modules/services/networking/tox-node.nix b/nixpkgs/nixos/modules/services/networking/tox-node.nix
new file mode 100644
index 000000000000..c24e7fd12850
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tox-node.nix
@@ -0,0 +1,95 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  pkg = pkgs.tox-node;
+  cfg = config.services.tox-node;
+  homeDir = "/var/lib/tox-node";
+
+  configFile = let
+    # fetchurl should be switched to getting this file from tox-node.src once
+    # the dpkg directory is in a release
+    src = pkgs.fetchurl {
+      url = "https://raw.githubusercontent.com/tox-rs/tox-node/master/dpkg/config.yml";
+      sha256 = "1431wzpzm786mcvyzk1rp7ar418n45dr75hdggxvlm7pkpam31xa";
+    };
+    confJSON = pkgs.writeText "config.json" (
+      builtins.toJSON {
+        log-type = cfg.logType;
+        keys-file = cfg.keysFile;
+        udp-address = cfg.udpAddress;
+        tcp-addresses = cfg.tcpAddresses;
+        tcp-connections-limit = cfg.tcpConnectionLimit;
+        lan-discovery = cfg.lanDiscovery;
+        threads = cfg.threads;
+        motd = cfg.motd;
+      }
+    );
+  in with pkgs; runCommand "config.yml" {} ''
+    ${remarshal}/bin/remarshal -if yaml -of json ${src} -o src.json
+    ${jq}/bin/jq -s '(.[0] | with_entries( select(.key == "bootstrap-nodes"))) * .[1]' src.json ${confJSON} > $out
+  '';
+
+in {
+  options.services.tox-node = {
+    enable = mkEnableOption "Tox Node service";
+
+    logType = mkOption {
+      type = types.enum [ "Stderr" "Stdout" "Syslog" "None" ];
+      default = "Stderr";
+      description = "Logging implementation.";
+    };
+    keysFile = mkOption {
+      type = types.str;
+      default = "${homeDir}/keys";
+      description = "Path to the file where DHT keys are stored.";
+    };
+    udpAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0:33445";
+      description = "UDP address to run DHT node.";
+    };
+    tcpAddresses = mkOption {
+      type = types.listOf types.str;
+      default = [ "0.0.0.0:33445" ];
+      description = "TCP addresses to run TCP relay.";
+    };
+    tcpConnectionLimit = mkOption {
+      type = types.int;
+      default = 8192;
+      description = "Maximum number of active TCP connections relay can hold";
+    };
+    lanDiscovery = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Enable local network discovery.";
+    };
+    threads = mkOption {
+      type = types.int;
+      default = 1;
+      description = "Number of threads for execution";
+    };
+    motd = mkOption {
+      type = types.str;
+      default = "Hi from tox-rs! I'm up {{uptime}}. TCP: incoming {{tcp_packets_in}}, outgoing {{tcp_packets_out}}, UDP: incoming {{udp_packets_in}}, outgoing {{udp_packets_out}}";
+      description = "Message of the day";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.tox-node = {
+      description = "Tox Node";
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkg}/bin/tox-node config ${configFile}";
+        StateDirectory = "tox-node";
+        DynamicUser = true;
+        Restart = "always";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix b/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix
index cdfe98aa0341..0bd9edf4a41c 100644
--- a/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix
@@ -6,7 +6,7 @@ let
   cfg = config.networking.wireless;
   configFile = if cfg.networks != {} then pkgs.writeText "wpa_supplicant.conf" ''
     ${optionalString cfg.userControlled.enable ''
-      ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=${cfg.userControlled.group}
+      ctrl_interface=DIR=/run/wpa_supplicant GROUP=${cfg.userControlled.group}
       update_config=1''}
     ${cfg.extraConfig}
     ${concatStringsSep "\n" (mapAttrsToList (ssid: config: with config; let
diff --git a/nixpkgs/nixos/modules/services/networking/xrdp.nix b/nixpkgs/nixos/modules/services/networking/xrdp.nix
index cc18f6d0064c..b7dd1c5d99dd 100644
--- a/nixpkgs/nixos/modules/services/networking/xrdp.nix
+++ b/nixpkgs/nixos/modules/services/networking/xrdp.nix
@@ -17,7 +17,7 @@ let
     chmod +x $out/startwm.sh
 
     substituteInPlace $out/xrdp.ini \
-      --replace "#rsakeys_ini=" "rsakeys_ini=/var/run/xrdp/rsakeys.ini" \
+      --replace "#rsakeys_ini=" "rsakeys_ini=/run/xrdp/rsakeys.ini" \
       --replace "certificate=" "certificate=${cfg.sslCert}" \
       --replace "key_file=" "key_file=${cfg.sslKey}" \
       --replace LogFile=xrdp.log LogFile=/dev/null \
@@ -132,9 +132,9 @@ in
             chown root:xrdp ${cfg.sslKey} ${cfg.sslCert}
             chmod 440 ${cfg.sslKey} ${cfg.sslCert}
           fi
-          if [ ! -s /var/run/xrdp/rsakeys.ini ]; then
-            mkdir -p /var/run/xrdp
-            ${cfg.package}/bin/xrdp-keygen xrdp /var/run/xrdp/rsakeys.ini
+          if [ ! -s /run/xrdp/rsakeys.ini ]; then
+            mkdir -p /run/xrdp
+            ${cfg.package}/bin/xrdp-keygen xrdp /run/xrdp/rsakeys.ini
           fi
         '';
         serviceConfig = {
diff --git a/nixpkgs/nixos/modules/services/networking/znc/default.nix b/nixpkgs/nixos/modules/services/networking/znc/default.nix
index bce5b15a19ec..1ad8855b86db 100644
--- a/nixpkgs/nixos/modules/services/networking/znc/default.nix
+++ b/nixpkgs/nixos/modules/services/networking/znc/default.nix
@@ -151,7 +151,7 @@ in
         '';
         description = ''
           Configuration for ZNC, see
-          <literal>https://wiki.znc.in/Configuration</literal> for details. The
+          <link xlink:href="https://wiki.znc.in/Configuration"/> for details. The
           Nix value declared here will be translated directly to the xml-like
           format ZNC expects. This is much more flexible than the legacy options
           under <option>services.znc.confOptions.*</option>, but also can't do
diff --git a/nixpkgs/nixos/modules/services/printing/cupsd.nix b/nixpkgs/nixos/modules/services/printing/cupsd.nix
index 854c76cc0a16..9e9bdedff126 100644
--- a/nixpkgs/nixos/modules/services/printing/cupsd.nix
+++ b/nixpkgs/nixos/modules/services/printing/cupsd.nix
@@ -74,7 +74,7 @@ let
     ${concatMapStrings (addr: ''
       Listen ${addr}
     '') cfg.listenAddresses}
-    Listen /var/run/cups/cups.sock
+    Listen /run/cups/cups.sock
 
     SetEnv PATH /var/lib/cups/path/lib/cups/filter:/var/lib/cups/path/bin
 
diff --git a/nixpkgs/nixos/modules/services/scheduling/fcron.nix b/nixpkgs/nixos/modules/services/scheduling/fcron.nix
index ae3828977753..f77b3bcd5921 100644
--- a/nixpkgs/nixos/modules/services/scheduling/fcron.nix
+++ b/nixpkgs/nixos/modules/services/scheduling/fcron.nix
@@ -100,8 +100,8 @@ in
             in
             pkgs.writeText "fcron.conf" ''
               fcrontabs   =       /var/spool/fcron
-              pidfile     =       /var/run/fcron.pid
-              fifofile    =       /var/run/fcron.fifo
+              pidfile     =       /run/fcron.pid
+              fifofile    =       /run/fcron.fifo
               fcronallow  =       /etc/fcron.allow
               fcrondeny   =       /etc/fcron.deny
               shell       =       /bin/sh
diff --git a/nixpkgs/nixos/modules/services/search/kibana.nix b/nixpkgs/nixos/modules/services/search/kibana.nix
index 3539b3ddb4f1..ba58630a467a 100644
--- a/nixpkgs/nixos/modules/services/search/kibana.nix
+++ b/nixpkgs/nixos/modules/services/search/kibana.nix
@@ -5,6 +5,9 @@ with lib;
 let
   cfg = config.services.kibana;
 
+  ge7 = builtins.compareVersions cfg.package.version "7" >= 0;
+  lt6_6 = builtins.compareVersions cfg.package.version "6.6" < 0;
+
   cfgFile = pkgs.writeText "kibana.json" (builtins.toJSON (
     (filterAttrsRecursive (n: v: v != null) ({
       server.host = cfg.listenAddress;
@@ -16,6 +19,7 @@ let
       kibana.defaultAppId = cfg.defaultAppId;
 
       elasticsearch.url = cfg.elasticsearch.url;
+      elasticsearch.hosts = cfg.elasticsearch.hosts;
       elasticsearch.username = cfg.elasticsearch.username;
       elasticsearch.password = cfg.elasticsearch.password;
 
@@ -67,9 +71,30 @@ in {
 
     elasticsearch = {
       url = mkOption {
-        description = "Elasticsearch url";
-        default = "http://localhost:9200";
-        type = types.str;
+        description = ''
+          Elasticsearch url.
+
+          Defaults to <literal>"http://localhost:9200"</literal>.
+
+          Don't set this when using Kibana >= 7.0.0 because it will result in a
+          configuration error. Use <option>services.kibana.elasticsearch.hosts</option>
+          instead.
+        '';
+        default = null;
+        type = types.nullOr types.str;
+      };
+
+      hosts = mkOption {
+        description = ''
+          The URLs of the Elasticsearch instances to use for all your queries.
+          All nodes listed here must be on the same cluster.
+
+          Defaults to <literal>[ "http://localhost:9200" ]</literal>.
+
+          This option is only valid when using kibana >= 6.6.
+        '';
+        default = null;
+        type = types.nullOr (types.listOf types.str);
       };
 
       username = mkOption {
@@ -143,6 +168,19 @@ in {
   };
 
   config = mkIf (cfg.enable) {
+    assertions = [
+      {
+        assertion = ge7 -> cfg.elasticsearch.url == null;
+        message =
+          "The option services.kibana.elasticsearch.url has been removed when using kibana >= 7.0.0. " +
+          "Please use option services.kibana.elasticsearch.hosts instead.";
+      }
+      {
+        assertion = lt6_6 -> cfg.elasticsearch.hosts == null;
+        message =
+          "The option services.kibana.elasticsearch.hosts is only valid for kibana >= 6.6.";
+      }
+    ];
     systemd.services.kibana = {
       description = "Kibana Service";
       wantedBy = [ "multi-user.target" ];
diff --git a/nixpkgs/nixos/modules/services/search/solr.nix b/nixpkgs/nixos/modules/services/search/solr.nix
index 7200c40e89f7..6659cc8a2d1e 100644
--- a/nixpkgs/nixos/modules/services/search/solr.nix
+++ b/nixpkgs/nixos/modules/services/search/solr.nix
@@ -13,11 +13,19 @@ in
     services.solr = {
       enable = mkEnableOption "Enables the solr service.";
 
+      # default to the 8.x series not forcing major version upgrade of those on the 7.x series
       package = mkOption {
         type = types.package;
-        default = pkgs.solr;
+        default = if versionAtLeast config.system.stateVersion "19.09"
+          then pkgs.solr_8
+          else pkgs.solr_7
+        ;
         defaultText = "pkgs.solr";
-        description = "Which Solr package to use.";
+        description = ''
+          Which Solr package to use. This defaults to version 7.x if
+          <literal>system.stateVersion &lt; 19.09</literal> and version 8.x
+          otherwise.
+        '';
       };
 
       port = mkOption {
diff --git a/nixpkgs/nixos/modules/services/security/hologram-agent.nix b/nixpkgs/nixos/modules/services/security/hologram-agent.nix
index 39ed506f7617..a5087b0a99b4 100644
--- a/nixpkgs/nixos/modules/services/security/hologram-agent.nix
+++ b/nixpkgs/nixos/modules/services/security/hologram-agent.nix
@@ -45,7 +45,7 @@ in {
       wantedBy    = [ "multi-user.target" ];
       requires    = [ "network-link-dummy0.service" "network-addresses-dummy0.service" ]; 
       preStart = ''
-        /run/current-system/sw/bin/rm -fv /var/run/hologram.sock
+        /run/current-system/sw/bin/rm -fv /run/hologram.sock
       '';
       serviceConfig = {
         ExecStart = "${pkgs.hologram.bin}/bin/hologram-agent -debug -conf ${cfgFile} -port ${cfg.httpPort}";
diff --git a/nixpkgs/nixos/modules/services/web-apps/codimd.nix b/nixpkgs/nixos/modules/services/web-apps/codimd.nix
index 56e1de17e3c3..ee2fc2b9d857 100644
--- a/nixpkgs/nixos/modules/services/web-apps/codimd.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/codimd.nix
@@ -67,7 +67,7 @@ in
       path = mkOption {
         type = types.nullOr types.str;
         default = null;
-        example = "/var/run/codimd.sock";
+        example = "/run/codimd.sock";
         description = ''
           Specify where a UNIX domain socket should be placed.
         '';
diff --git a/nixpkgs/nixos/modules/services/web-apps/documize.nix b/nixpkgs/nixos/modules/services/web-apps/documize.nix
new file mode 100644
index 000000000000..206617b0e5ac
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/documize.nix
@@ -0,0 +1,67 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.documize;
+
+in
+
+  {
+    options.services.documize = {
+      enable = mkEnableOption "Documize Wiki";
+
+      offline = mkEnableOption "Documize offline mode";
+
+      package = mkOption {
+        default = pkgs.documize-community;
+        type = types.package;
+        description = ''
+          Which package to use for documize.
+        '';
+      };
+
+      db = mkOption {
+        type = types.str;
+        example = "host=localhost port=5432 sslmode=disable user=admin password=secret dbname=documize";
+        description = ''
+          The DB connection string to use for the database.
+        '';
+      };
+
+      dbtype = mkOption {
+        type = types.enum [ "postgresql" "percona" "mariadb" "mysql" ];
+        description = ''
+          Which database to use for storage.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        example = 3000;
+        description = ''
+          Which TCP port to serve.
+        '';
+      };
+    };
+
+    config = mkIf cfg.enable {
+      systemd.services.documize-server = {
+        wantedBy = [ "multi-user.target" ];
+
+        script = ''
+          ${cfg.package}/bin/documize \
+            -db "${cfg.db}" \
+            -dbtype ${cfg.dbtype} \
+            -port ${toString cfg.port} \
+            -offline ${if cfg.offline then "1" else "0"}
+        '';
+
+        serviceConfig = {
+          Restart = "always";
+          DynamicUser = "yes";
+        };
+      };
+    };
+  }
diff --git a/nixpkgs/nixos/modules/services/web-apps/miniflux.nix b/nixpkgs/nixos/modules/services/web-apps/miniflux.nix
new file mode 100644
index 000000000000..1d60004e574d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/miniflux.nix
@@ -0,0 +1,97 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.miniflux;
+
+  dbUser = "miniflux";
+  dbPassword = "miniflux";
+  dbHost = "localhost";
+  dbName = "miniflux";
+
+  defaultCredentials = pkgs.writeText "miniflux-admin-credentials" ''
+    ADMIN_USERNAME=admin
+    ADMIN_PASSWORD=password
+  '';
+
+  pgsu = "${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser}";
+  pgbin = "${config.services.postgresql.package}/bin";
+  preStart = pkgs.writeScript "miniflux-pre-start" ''
+    #!${pkgs.runtimeShell}
+    db_exists() {
+      [ "$(${pgsu} ${pgbin}/psql -Atc "select 1 from pg_database where datname='$1'")" == "1" ]
+    }
+    if ! db_exists "${dbName}"; then
+      ${pgsu} ${pgbin}/psql postgres -c "CREATE ROLE ${dbUser} WITH LOGIN NOCREATEDB NOCREATEROLE ENCRYPTED PASSWORD '${dbPassword}'"
+      ${pgsu} ${pgbin}/createdb --owner "${dbUser}" "${dbName}"
+      ${pgsu} ${pgbin}/psql "${dbName}" -c "CREATE EXTENSION IF NOT EXISTS hstore"
+    fi
+  '';
+in
+
+{
+  options = {
+    services.miniflux = {
+      enable = mkEnableOption "miniflux";
+
+      config = mkOption {
+        type = types.attrsOf types.str;
+        example = literalExample ''
+          {
+            CLEANUP_FREQUENCY = "48";
+            LISTEN_ADDR = "localhost:8080";
+          }
+        '';
+        description = ''
+          Configuration for Miniflux, refer to
+          <link xlink:href="http://docs.miniflux.app/en/latest/configuration.html"/>
+          for documentation on the supported values.
+        '';
+      };
+
+      adminCredentialsFile = mkOption  {
+        type = types.nullOr types.path;
+        default = null;
+        description = ''
+          File containing the ADMIN_USERNAME, default is "admin", and
+          ADMIN_PASSWORD (length >= 6), default is "password"; in the format of
+          an EnvironmentFile=, as described by systemd.exec(5).
+        '';
+        example = "/etc/nixos/miniflux-admin-credentials";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.miniflux.config =  {
+      LISTEN_ADDR = mkDefault "localhost:8080";
+      DATABASE_URL = "postgresql://${dbUser}:${dbPassword}@${dbHost}/${dbName}?sslmode=disable";
+      RUN_MIGRATIONS = "1";
+      CREATE_ADMIN = "1";
+    };
+
+    services.postgresql.enable = true;
+
+    systemd.services.miniflux = {
+      description = "Miniflux service";
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "postgresql.service" ];
+      after = [ "network.target" "postgresql.service" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.miniflux}/bin/miniflux";
+        ExecStartPre = "+${preStart}";
+        DynamicUser = true;
+        RuntimeDirectory = "miniflux";
+        RuntimeDirectoryMode = "0700";
+        EnvironmentFile = if isNull cfg.adminCredentialsFile
+        then defaultCredentials
+        else cfg.adminCredentialsFile;
+      };
+
+      environment = cfg.config;
+    };
+    environment.systemPackages = [ pkgs.miniflux ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix b/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
index eedcccac723c..d0e45e1c12a5 100644
--- a/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
@@ -32,7 +32,7 @@ let
     cd ${pkgs.nextcloud}
     exec /run/wrappers/bin/sudo -u nextcloud \
       NEXTCLOUD_CONFIG_DIR="${cfg.home}/config" \
-      ${config.services.phpfpm.phpPackage}/bin/php \
+      ${phpPackage}/bin/php \
       -c ${pkgs.writeText "php.ini" phpOptionsStr}\
       occ $*
   '';
@@ -360,7 +360,7 @@ in {
           environment.NEXTCLOUD_CONFIG_DIR = "${cfg.home}/config";
           serviceConfig.Type = "oneshot";
           serviceConfig.User = "nextcloud";
-          serviceConfig.ExecStart = "${pkgs.php}/bin/php -f ${pkgs.nextcloud}/cron.php";
+          serviceConfig.ExecStart = "${phpPackage}/bin/php -f ${pkgs.nextcloud}/cron.php";
         };
       };
 
diff --git a/nixpkgs/nixos/modules/services/web-apps/restya-board.nix b/nixpkgs/nixos/modules/services/web-apps/restya-board.nix
index b064eae248ed..15fd943a0826 100644
--- a/nixpkgs/nixos/modules/services/web-apps/restya-board.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/restya-board.nix
@@ -13,7 +13,7 @@ let
   runDir = "/run/restya-board";
 
   poolName = "restya-board";
-  phpfpmSocketName = "/var/run/phpfpm/${poolName}.sock";
+  phpfpmSocketName = "/run/phpfpm/${poolName}.sock";
 
 in
 
diff --git a/nixpkgs/nixos/modules/services/web-apps/selfoss.nix b/nixpkgs/nixos/modules/services/web-apps/selfoss.nix
index 7b0ce8a8d03f..cd0f743a5fb8 100644
--- a/nixpkgs/nixos/modules/services/web-apps/selfoss.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/selfoss.nix
@@ -4,7 +4,7 @@ let
   cfg = config.services.selfoss;
 
   poolName = "selfoss_pool";
-  phpfpmSocketName = "/var/run/phpfpm/${poolName}.sock";
+  phpfpmSocketName = "/run/phpfpm/${poolName}.sock";
 
   dataDir = "/var/lib/selfoss";
 
diff --git a/nixpkgs/nixos/modules/services/web-apps/tt-rss.nix b/nixpkgs/nixos/modules/services/web-apps/tt-rss.nix
index f7a3daa5fdd5..08297c7275a4 100644
--- a/nixpkgs/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/tt-rss.nix
@@ -15,7 +15,7 @@ let
     else cfg.database.port;
 
   poolName = "tt-rss";
-  phpfpmSocketName = "/var/run/phpfpm/${poolName}.sock";
+  phpfpmSocketName = "/run/phpfpm/${poolName}.sock";
 
   tt-rss-config = pkgs.writeText "config.php" ''
     <?php
diff --git a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix
index 1eac5be2f8d3..8f00f81b078c 100644
--- a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix
@@ -705,10 +705,7 @@ in
 
         path =
           [ httpd pkgs.coreutils pkgs.gnugrep ]
-          ++ # Needed for PHP's mail() function.  !!! Probably the
-             # ssmtp module should export the path to sendmail in
-             # some way.
-             optional config.networking.defaultMailServer.directDelivery pkgs.ssmtp
+          ++ optional enablePHP pkgs.system-sendmail # Needed for PHP's mail() function.
           ++ concatMap (svc: svc.extraServerPath) allSubservices;
 
         environment =
diff --git a/nixpkgs/nixos/modules/services/web-servers/mighttpd2.nix b/nixpkgs/nixos/modules/services/web-servers/mighttpd2.nix
index 4e7082c67690..f9b1a8b6ccce 100644
--- a/nixpkgs/nixos/modules/services/web-servers/mighttpd2.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/mighttpd2.nix
@@ -22,7 +22,7 @@ in {
         User: root
         # If available, "nobody" is much more secure for Group:.
         Group: root
-        Pid_File: /var/run/mighty.pid
+        Pid_File: /run/mighty.pid
         Logging: Yes # Yes or No
         Log_File: /var/log/mighty # The directory must be writable by User:
         Log_File_Size: 16777216 # bytes
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix
index ea01749349de..3d748d4308b7 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -126,8 +126,10 @@ in {
     services.dleyna-renderer.enable = mkDefault true;
     services.dleyna-server.enable = mkDefault true;
     services.gnome3.at-spi2-core.enable = true;
+    services.gnome3.evince.enable = mkDefault true;
     services.gnome3.evolution-data-server.enable = true;
     services.gnome3.file-roller.enable = mkDefault true;
+    services.gnome3.glib-networking.enable = true;
     services.gnome3.gnome-disks.enable = mkDefault true;
     services.gnome3.gnome-documents.enable = mkDefault true;
     services.gnome3.gnome-keyring.enable = true;
@@ -160,7 +162,11 @@ in {
     # If gnome3 is installed, build vim for gtk3 too.
     nixpkgs.config.vim.gui = "gtk3";
 
-    fonts.fonts = [ pkgs.dejavu_fonts pkgs.cantarell-fonts ];
+    fonts.fonts = [
+      pkgs.dejavu_fonts pkgs.cantarell-fonts
+      pkgs.source-sans-pro
+      pkgs.source-code-pro # Default monospace font in 3.32
+    ];
 
     services.xserver.displayManager.extraSessionFilePackages = [ pkgs.gnome3.gnome-session ]
       ++ map
@@ -200,7 +206,6 @@ in {
     services.xserver.updateDbusEnvironment = true;
 
     environment.variables.GIO_EXTRA_MODULES = [ "${lib.getLib pkgs.gnome3.dconf}/lib/gio/modules"
-                                                "${pkgs.gnome3.glib-networking.out}/lib/gio/modules"
                                                 "${pkgs.gnome3.gvfs}/lib/gio/modules" ];
     environment.systemPackages = pkgs.gnome3.corePackages ++ cfg.sessionPath
       ++ (pkgs.gnome3.removePackagesByName pkgs.gnome3.optionalPackages config.environment.gnome3.excludePackages) ++ [
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/kodi.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/kodi.nix
index 3ce49b9d2bf8..65a7b9c628e5 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/kodi.nix
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/kodi.nix
@@ -20,7 +20,7 @@ in
     services.xserver.desktopManager.session = [{
       name = "kodi";
       start = ''
-        ${pkgs.kodi}/bin/kodi --lircdev /var/run/lirc/lircd --standalone &
+        ${pkgs.kodi}/bin/kodi --lircdev /run/lirc/lircd --standalone &
         waitPID=$!
       '';
     }];
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.nix
index d0278271409a..e1eeb32aa1a0 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.nix
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.nix
@@ -73,8 +73,14 @@ in
 
     # Ensure lightdm is used when Pantheon is enabled
     # Without it screen locking will be nonfunctional because of the use of lightlocker
+
+    warnings = optional (config.services.xserver.displayManager.lightdm.enable != true)
+      ''
+        Using Pantheon without LightDM as a displayManager will break screenlocking from the UI.
+      '';
+
     services.xserver.displayManager.lightdm.enable = mkDefault true;
-    services.xserver.displayManager.lightdm.greeters.pantheon.enable = mkDefault true;
+    services.xserver.displayManager.lightdm.greeters.gtk.enable = mkDefault true;
 
     # If not set manually Pantheon session cannot be started
     # Known issue of https://github.com/NixOS/nixpkgs/pull/43992
@@ -116,9 +122,11 @@ in
     # pantheon has pantheon-agent-geoclue2
     services.geoclue2.enableDemoAgent = false;
     services.gnome3.at-spi2-core.enable = true;
+    services.gnome3.evince.enable = mkDefault true;
     services.gnome3.evolution-data-server.enable = true;
     services.gnome3.file-roller.enable = mkDefault true;
     # TODO: gnome-keyring's xdg autostarts will still be in the environment (from elementary-session-settings) if disabled forcefully
+    services.gnome3.glib-networking.enable = true;
     services.gnome3.gnome-keyring.enable = true;
     services.gnome3.gnome-settings-daemon.enable = true;
     services.gnome3.gnome-settings-daemon.package = pkgs.pantheon.elementary-settings-daemon;
@@ -146,7 +154,6 @@ in
 
     environment.variables.GIO_EXTRA_MODULES = [
       "${lib.getLib pkgs.gnome3.dconf}/lib/gio/modules"
-      "${pkgs.gnome3.glib-networking.out}/lib/gio/modules"
       "${pkgs.gnome3.gvfs}/lib/gio/modules"
     ];
 
@@ -162,7 +169,6 @@ in
         gnome3.geary
         gnome3.epiphany
         gnome3.gnome-font-viewer
-        evince
       ] ++ pantheon.apps) config.environment.pantheon.excludePackages)
       ++ (with pkgs;
       [
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix b/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
index 226fee7491c1..3edf7c8d9cab 100644
--- a/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
@@ -208,76 +208,25 @@ in
         session  optional       pam_permit.so
       '';
 
-      gdm.text = ''
-        auth     requisite      pam_nologin.so
-        auth     required       pam_env.so envfile=${config.system.build.pamEnvironment}
-
-        auth     required       pam_succeed_if.so uid >= 1000 quiet
-        auth     optional       ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so
-        auth     ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
-        ${optionalString config.security.pam.enableEcryptfs
-          "auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
-
-        ${optionalString (! config.security.pam.enableEcryptfs)
-          "auth     required       pam_deny.so"}
-
-        account  sufficient     pam_unix.so
-
-        password requisite      pam_unix.so nullok sha512
-        ${optionalString config.security.pam.enableEcryptfs
-          "password optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
-
-        session  required       pam_env.so envfile=${config.system.build.pamEnvironment}
-        session  required       pam_unix.so
-        ${optionalString config.security.pam.enableEcryptfs
-          "session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
-        session  required       pam_loginuid.so
-        session  optional       ${pkgs.systemd}/lib/security/pam_systemd.so
-        session  optional       ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so auto_start
-      '';
-
       gdm-password.text = ''
-        auth     requisite      pam_nologin.so
-        auth     required       pam_env.so envfile=${config.system.build.pamEnvironment}
-
-        auth     required       pam_succeed_if.so uid >= 1000 quiet
-        auth     optional       ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so
-        auth     ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
-        ${optionalString config.security.pam.enableEcryptfs
-          "auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
-        ${optionalString (! config.security.pam.enableEcryptfs)
-          "auth     required       pam_deny.so"}
-
-        account  sufficient     pam_unix.so
-
-        password requisite      pam_unix.so nullok sha512
-        ${optionalString config.security.pam.enableEcryptfs
-          "password optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
-
-        session  required       pam_env.so envfile=${config.system.build.pamEnvironment}
-        session  required       pam_unix.so
-        ${optionalString config.security.pam.enableEcryptfs
-          "session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
-        session  required       pam_loginuid.so
-        session  optional       ${pkgs.systemd}/lib/security/pam_systemd.so
-        session  optional       ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so auto_start
+        auth      substack      login
+        account   include       login
+        password  substack      login
+        session   include       login
       '';
 
       gdm-autologin.text = ''
-        auth     requisite      pam_nologin.so
+        auth      requisite     pam_nologin.so
 
-        auth     required       pam_succeed_if.so uid >= 1000 quiet
-        auth     required       pam_permit.so
+        auth      required      pam_succeed_if.so uid >= 1000 quiet
+        auth      required      pam_permit.so
 
-        account  sufficient     pam_unix.so
+        account   sufficient    pam_unix.so
 
-        password requisite      pam_unix.so nullok sha512
+        password  requisite     pam_unix.so nullok sha512
 
-        session  optional       pam_keyinit.so revoke
-        session  required       pam_env.so envfile=${config.system.build.pamEnvironment}
-        session  required       pam_unix.so
-        session  required       pam_loginuid.so
-        session  optional       ${pkgs.systemd}/lib/security/pam_systemd.so
+        session   optional      pam_keyinit.so revoke
+        session   include       login
       '';
 
     };
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix
index 05011b999f2b..bfba174144a1 100644
--- a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix
@@ -33,6 +33,13 @@ in
 
   config = mkIf (ldmcfg.enable && cfg.enable) {
 
+    warnings = [
+      ''
+        The Pantheon greeter is suboptimal in NixOS and can possibly put you in
+        a situation where you cannot start a session when switching desktopManagers.
+      ''
+    ];
+
     services.xserver.displayManager.lightdm.greeters.gtk.enable = false;
 
     services.xserver.displayManager.lightdm.greeter = mkDefault {
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix b/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
index a4b57cfdab64..40a1680da537 100644
--- a/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
@@ -221,7 +221,7 @@ in
 
     services.xserver.displayManager.job.execCmd = ''
       ${optionalString (cfg.pulseaudio)
-        "export PULSE_COOKIE=/var/run/pulse/.config/pulse/cookie"}
+        "export PULSE_COOKIE=/run/pulse/.config/pulse/cookie"}
       exec ${pkgs.xpra}/bin/xpra start \
         --daemon=off \
         --log-dir=/var/log \
@@ -233,7 +233,7 @@ in
         --mdns=no \
         --pulseaudio=no \
         ${optionalString (cfg.pulseaudio) "--sound-source=pulse"} \
-        --socket-dirs=/var/run/xpra \
+        --socket-dirs=/run/xpra \
         --xvfb="xpra_Xdummy ${concatStringsSep " " dmcfg.xserverArgs}" \
         ${optionalString (cfg.bindTcp != null) "--bind-tcp=${cfg.bindTcp}"} \
         --auth=${cfg.auth} \
diff --git a/nixpkgs/nixos/modules/system/boot/kernel.nix b/nixpkgs/nixos/modules/system/boot/kernel.nix
index 8ea05ed14687..ab919099d112 100644
--- a/nixpkgs/nixos/modules/system/boot/kernel.nix
+++ b/nixpkgs/nixos/modules/system/boot/kernel.nix
@@ -5,7 +5,7 @@ with lib;
 let
 
   inherit (config.boot) kernelPatches;
-  inherit (config.boot.kernel) features;
+  inherit (config.boot.kernel) features randstructSeed;
   inherit (config.boot.kernelPackages) kernel;
 
   kernelModulesConf = pkgs.writeText "nixos.conf"
@@ -38,6 +38,7 @@ in
       default = pkgs.linuxPackages;
       apply = kernelPackages: kernelPackages.extend (self: super: {
         kernel = super.kernel.override {
+          inherit randstructSeed;
           kernelPatches = super.kernel.kernelPatches ++ kernelPatches;
           features = lib.recursiveUpdate super.kernel.features features;
         };
@@ -67,6 +68,19 @@ in
       description = "A list of additional patches to apply to the kernel.";
     };
 
+    boot.kernel.randstructSeed = mkOption {
+      type = types.str;
+      default = "";
+      example = "my secret seed";
+      description = ''
+        Provides a custom seed for the <varname>RANDSTRUCT</varname> security
+        option of the Linux kernel. Note that <varname>RANDSTRUCT</varname> is
+        only enabled in NixOS hardened kernels. Using a custom seed requires
+        building the kernel and dependent packages locally, since this
+        customization happens at build time.
+      '';
+    };
+
     boot.kernelParams = mkOption {
       type = types.listOf types.str;
       default = [ ];
@@ -298,7 +312,7 @@ in
       # !!! Should this really be needed?
       (isYes "MODULES")
       (isYes "BINFMT_ELF")
-    ];
+    ] ++ (optional (randstructSeed != "") (isYes "GCC_PLUGIN_RANDSTRUCT"));
 
     # nixpkgs kernels are assumed to have all required features
     assertions = if config.boot.kernelPackages.kernel ? features then [] else
diff --git a/nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix b/nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix
index d3cf8b29bb46..99aa7759c954 100644
--- a/nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix
+++ b/nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix
@@ -61,7 +61,7 @@ let
       inherit (cfg)
         version extraConfig extraPerEntryConfig extraEntries forceInstall useOSProber
         extraEntriesBeforeNixOS extraPrepareConfig extraInitrd configurationLimit copyKernels
-        default fsIdentifier efiSupport efiInstallAsRemovable gfxmodeEfi gfxmodeBios;
+        default fsIdentifier efiSupport efiInstallAsRemovable gfxmodeEfi gfxmodeBios gfxpayloadEfi gfxpayloadBios;
       path = with pkgs; makeBinPath (
         [ coreutils gnused gnugrep findutils diffutils btrfs-progs utillinux mdadm ]
         ++ optional (cfg.efiSupport && (cfg.version == 2)) efibootmgr
@@ -393,6 +393,24 @@ in
         '';
       };
 
+      gfxpayloadEfi = mkOption {
+        default = "keep";
+        example = "text";
+        type = types.str;
+        description = ''
+          The gfxpayload to pass to GRUB when loading a graphical boot interface under EFI. 
+        '';
+      };
+
+      gfxpayloadBios = mkOption {
+        default = "text";
+        example = "keep";
+        type = types.str;
+        description = ''
+          The gfxpayload to pass to GRUB when loading a graphical boot interface under BIOS. 
+        '';
+      };
+
       configurationLimit = mkOption {
         default = 100;
         example = 120;
diff --git a/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl b/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl
index bda6a3136407..a36b3c180eb4 100644
--- a/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl
+++ b/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl
@@ -67,6 +67,8 @@ my $efiInstallAsRemovable = get("efiInstallAsRemovable");
 my $efiSysMountPoint = get("efiSysMountPoint");
 my $gfxmodeEfi = get("gfxmodeEfi");
 my $gfxmodeBios = get("gfxmodeBios");
+my $gfxpayloadEfi = get("gfxpayloadEfi");
+my $gfxpayloadBios = get("gfxpayloadBios");
 my $bootloaderId = get("bootloaderId");
 my $forceInstall = get("forceInstall");
 my $font = get("font");
@@ -293,10 +295,10 @@ else {
               insmod gfxterm
               if [ \"\${grub_platform}\" = \"efi\" ]; then
                 set gfxmode=$gfxmodeEfi
-                set gfxpayload=keep
+                set gfxpayload=$gfxpayloadEfi
               else
                 set gfxmode=$gfxmodeBios
-                set gfxpayload=text
+                set gfxpayload=$gfxpayloadBios
               fi
               terminal_output gfxterm
             fi
diff --git a/nixpkgs/nixos/modules/system/boot/stage-1.nix b/nixpkgs/nixos/modules/system/boot/stage-1.nix
index 9984a97bbdd2..8702abd3df83 100644
--- a/nixpkgs/nixos/modules/system/boot/stage-1.nix
+++ b/nixpkgs/nixos/modules/system/boot/stage-1.nix
@@ -11,7 +11,9 @@ let
 
   udev = config.systemd.package;
 
-  modulesTree = config.system.modulesTree;
+  kernel-name = config.boot.kernelPackages.kernel.name or "kernel";
+
+  modulesTree = config.system.modulesTree.override { name = kernel-name + "-modules"; };
   firmware = config.hardware.firmware;
 
 
@@ -290,6 +292,7 @@ let
   # The closure of the init script of boot stage 1 is what we put in
   # the initial RAM disk.
   initialRamdisk = pkgs.makeInitrd {
+    name = "initrd-${kernel-name}";
     inherit (config.boot.initrd) compressor prepend;
 
     contents =
diff --git a/nixpkgs/nixos/modules/virtualisation/docker.nix b/nixpkgs/nixos/modules/virtualisation/docker.nix
index 4ee84c5268e6..ba04dfd57942 100644
--- a/nixpkgs/nixos/modules/virtualisation/docker.nix
+++ b/nixpkgs/nixos/modules/virtualisation/docker.nix
@@ -31,7 +31,7 @@ in
     listenOptions =
       mkOption {
         type = types.listOf types.str;
-        default = ["/var/run/docker.sock"];
+        default = ["/run/docker.sock"];
         description =
           ''
             A list of unix and tcp docker should listen to. The format follows
diff --git a/nixpkgs/nixos/modules/virtualisation/openvswitch.nix b/nixpkgs/nixos/modules/virtualisation/openvswitch.nix
index bb8b9172f23f..47e07e7432cb 100644
--- a/nixpkgs/nixos/modules/virtualisation/openvswitch.nix
+++ b/nixpkgs/nixos/modules/virtualisation/openvswitch.nix
@@ -49,7 +49,7 @@ in {
   config = mkIf cfg.enable (let
 
     # Where the communication sockets live
-    runDir = "/var/run/openvswitch";
+    runDir = "/run/openvswitch";
 
     # The path to the an initialized version of the database
     db = pkgs.stdenv.mkDerivation {
@@ -99,13 +99,13 @@ in {
             --certificate=db:Open_vSwitch,SSL,certificate \
             --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert \
             --unixctl=ovsdb.ctl.sock \
-            --pidfile=/var/run/openvswitch/ovsdb.pid \
+            --pidfile=/run/openvswitch/ovsdb.pid \
             --detach \
             /var/db/openvswitch/conf.db
           '';
         Restart = "always";
         RestartSec = 3;
-        PIDFile = "/var/run/openvswitch/ovsdb.pid";
+        PIDFile = "/run/openvswitch/ovsdb.pid";
         # Use service type 'forking' to correctly determine when ovsdb-server is ready.
         Type = "forking";
       };
@@ -123,10 +123,10 @@ in {
       serviceConfig = {
         ExecStart = ''
           ${cfg.package}/bin/ovs-vswitchd \
-          --pidfile=/var/run/openvswitch/ovs-vswitchd.pid \
+          --pidfile=/run/openvswitch/ovs-vswitchd.pid \
           --detach
         '';
-        PIDFile = "/var/run/openvswitch/ovs-vswitchd.pid";
+        PIDFile = "/run/openvswitch/ovs-vswitchd.pid";
         # Use service type 'forking' to correctly determine when vswitchd is ready.
         Type = "forking";
       };
@@ -152,11 +152,11 @@ in {
         ExecStart = ''
           ${cfg.package}/bin/ovs-monitor-ipsec \
             --root-prefix ${runDir}/ipsec \
-            --pidfile /var/run/openvswitch/ovs-monitor-ipsec.pid \
+            --pidfile /run/openvswitch/ovs-monitor-ipsec.pid \
             --monitor --detach \
-            unix:/var/run/openvswitch/db.sock
+            unix:/run/openvswitch/db.sock
         '';
-        PIDFile = "/var/run/openvswitch/ovs-monitor-ipsec.pid";
+        PIDFile = "/run/openvswitch/ovs-monitor-ipsec.pid";
         # Use service type 'forking' to correctly determine when ovs-monitor-ipsec is ready.
         Type = "forking";
       };
@@ -167,7 +167,7 @@ in {
         ln -fs ${pkgs.ipsecTools}/bin/setkey ${runDir}/ipsec/usr/sbin/setkey
         ln -fs ${pkgs.writeScript "racoon-restart" ''
         #!${pkgs.runtimeShell}
-        /var/run/current-system/sw/bin/systemctl $1 racoon
+        /run/current-system/sw/bin/systemctl $1 racoon
         ''} ${runDir}/ipsec/etc/init.d/racoon
       '';
     };
diff --git a/nixpkgs/nixos/tests/all-tests.nix b/nixpkgs/nixos/tests/all-tests.nix
index b366f019f6e0..8b27ff808e6b 100644
--- a/nixpkgs/nixos/tests/all-tests.nix
+++ b/nixpkgs/nixos/tests/all-tests.nix
@@ -65,6 +65,7 @@ in
   docker-registry = handleTest ./docker-registry.nix {};
   docker-tools = handleTestOn ["x86_64-linux"] ./docker-tools.nix {};
   docker-tools-overlay = handleTestOn ["x86_64-linux"] ./docker-tools-overlay.nix {};
+  documize = handleTest ./documize.nix {};
   dovecot = handleTest ./dovecot.nix {};
   # ec2-config doesn't work in a sandbox as the simulated ec2 instance needs network access
   #ec2-config = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-config or {};
@@ -137,6 +138,7 @@ in
   matrix-synapse = handleTest ./matrix-synapse.nix {};
   memcached = handleTest ./memcached.nix {};
   mesos = handleTest ./mesos.nix {};
+  miniflux = handleTest ./miniflux.nix {};
   minio = handleTest ./minio.nix {};
   misc = handleTest ./misc.nix {};
   mongodb = handleTest ./mongodb.nix {};
@@ -193,6 +195,7 @@ in
   predictable-interface-names = handleTest ./predictable-interface-names.nix {};
   printing = handleTest ./printing.nix {};
   prometheus = handleTest ./prometheus.nix {};
+  prometheus2 = handleTest ./prometheus-2.nix {};
   prometheus-exporters = handleTest ./prometheus-exporters.nix {};
   prosody = handleTest ./prosody.nix {};
   proxy = handleTest ./proxy.nix {};
diff --git a/nixpkgs/nixos/tests/common/letsencrypt/default.nix b/nixpkgs/nixos/tests/common/letsencrypt/default.nix
index 73aac51a0126..8fe59bf4e70c 100644
--- a/nixpkgs/nixos/tests/common/letsencrypt/default.nix
+++ b/nixpkgs/nixos/tests/common/letsencrypt/default.nix
@@ -381,7 +381,7 @@ in {
       enableACME = false;
       sslCertificate = siteCertFile;
       sslCertificateKey = siteKeyFile;
-      locations.${tosPath}.extraConfig = "alias ${tosFile};";
+      locations."= ${tosPath}".alias = tosFile;
     };
 
     systemd.services = {
diff --git a/nixpkgs/nixos/tests/documize.nix b/nixpkgs/nixos/tests/documize.nix
new file mode 100644
index 000000000000..8b852a4f7795
--- /dev/null
+++ b/nixpkgs/nixos/tests/documize.nix
@@ -0,0 +1,58 @@
+import ./make-test.nix ({ pkgs, lib, ...} : {
+  name = "documize";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ma27 ];
+  };
+
+  machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.jq ];
+
+    services.documize = {
+      enable = true;
+      port = 3000;
+      dbtype = "postgresql";
+      db = "host=localhost port=5432 sslmode=disable user=documize password=documize dbname=documize";
+    };
+
+    systemd.services.documize-server = {
+      after = [ "postgresql.service" ];
+      requires = [ "postgresql.service" ];
+    };
+
+    services.postgresql = {
+      enable = true;
+      initialScript = pkgs.writeText "psql-init" ''
+        CREATE ROLE documize WITH LOGIN PASSWORD 'documize';
+        CREATE DATABASE documize WITH OWNER documize;
+      '';
+    };
+  };
+
+  testScript = ''
+    startAll;
+
+    $machine->waitForUnit("documize-server.service");
+    $machine->waitForOpenPort(3000);
+
+    my $dbhash = $machine->succeed("curl -f localhost:3000 "
+                                  . " | grep 'property=\"dbhash' "
+                                  . " | grep -Po 'content=\"\\K[^\"]*'"
+                                  );
+
+    chomp($dbhash);
+
+    $machine->succeed("curl -X POST "
+                      . "--data 'dbname=documize' "
+                      . "--data 'dbhash=$dbhash' "
+                      . "--data 'title=NixOS' "
+                      . "--data 'message=Docs' "
+                      . "--data 'firstname=John' "
+                      . "--data 'lastname=Doe' "
+                      . "--data 'email=john.doe\@nixos.org' "
+                      . "--data 'password=verysafe' "
+                      . "-f localhost:3000/api/setup"
+                    );
+
+    $machine->succeed('test "$(curl -f localhost:3000/api/public/meta | jq ".title" | xargs echo)" = "NixOS"');
+  '';
+})
diff --git a/nixpkgs/nixos/tests/elk.nix b/nixpkgs/nixos/tests/elk.nix
index e7ae023f3ff2..3b3fbd73dd5f 100644
--- a/nixpkgs/nixos/tests/elk.nix
+++ b/nixpkgs/nixos/tests/elk.nix
@@ -2,6 +2,8 @@
   config ? {},
   pkgs ? import ../.. { inherit system config; },
   enableUnfree ? false
+  # To run the test on the unfree ELK use the folllowing command:
+  # NIXPKGS_ALLOW_UNFREE=1 nix-build nixos/tests/elk.nix -A ELK-6 --arg enableUnfree true
 }:
 
 with import ../lib/testing.nix { inherit system pkgs; };
@@ -10,7 +12,9 @@ with pkgs.lib;
 let
   esUrl = "http://localhost:9200";
 
-  mkElkTest = name : elk : makeTest {
+  mkElkTest = name : elk :
+   let elasticsearchGe7 = builtins.compareVersions elk.elasticsearch.version "7" >= 0;
+   in makeTest {
     inherit name;
     meta = with pkgs.stdenv.lib.maintainers; {
       maintainers = [ eelco offline basvandijk ];
@@ -67,11 +71,11 @@ let
               kibana = {
                 enable = true;
                 package = elk.kibana;
-                elasticsearch.url = esUrl;
               };
 
               elasticsearch-curator = {
-                enable = true;
+                # The current version of curator (5.6) doesn't support elasticsearch >= 7.0.0.
+                enable = !elasticsearchGe7;
                 actionYAML = ''
                 ---
                 actions:
@@ -124,7 +128,7 @@ let
       # See if logstash messages arive in elasticsearch.
       $one->waitUntilSucceeds("curl --silent --show-error '${esUrl}/_search' -H 'Content-Type: application/json' -d '{\"query\" : { \"match\" : { \"message\" : \"flowers\"}}}' | jq .hits.total | grep -v 0");
       $one->waitUntilSucceeds("curl --silent --show-error '${esUrl}/_search' -H 'Content-Type: application/json' -d '{\"query\" : { \"match\" : { \"message\" : \"dragons\"}}}' | jq .hits.total | grep 0");
-
+    '' + optionalString (!elasticsearchGe7) ''
       # Test elasticsearch-curator.
       $one->systemctl("stop logstash");
       $one->systemctl("start elasticsearch-curator");
@@ -149,4 +153,16 @@ in mapAttrs mkElkTest {
       logstash      = pkgs.logstash6-oss;
       kibana        = pkgs.kibana6-oss;
     };
+  "ELK-7" =
+    if enableUnfree
+    then {
+      elasticsearch = pkgs.elasticsearch7;
+      logstash      = pkgs.logstash7;
+      kibana        = pkgs.kibana7;
+    }
+    else {
+      elasticsearch = pkgs.elasticsearch7-oss;
+      logstash      = pkgs.logstash7-oss;
+      kibana        = pkgs.kibana7-oss;
+    };
 }
diff --git a/nixpkgs/nixos/tests/miniflux.nix b/nixpkgs/nixos/tests/miniflux.nix
new file mode 100644
index 000000000000..19ab4803a1d3
--- /dev/null
+++ b/nixpkgs/nixos/tests/miniflux.nix
@@ -0,0 +1,52 @@
+import ./make-test.nix ({ pkgs, lib, ... }:
+
+let
+  port = 3142;
+  username = "alice";
+  password = "correcthorsebatterystaple";
+  defaultPort = 8080;
+  defaultUsername = "admin";
+  defaultPassword = "password";
+in
+with lib;
+{
+  name = "miniflux";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ bricewge ];
+
+  nodes = {
+    default =
+      { ... }:
+      {
+        services.miniflux.enable = true;
+      };
+
+    customized =
+      { ... }:
+      {
+        services.miniflux = {
+          enable = true;
+          config = {
+            CLEANUP_FREQUENCY = "48";
+            LISTEN_ADDR = "localhost:${toString port}";
+          };
+          adminCredentialsFile = pkgs.writeText "admin-credentials" ''
+            ADMIN_USERNAME=${username}
+            ADMIN_PASSWORD=${password}
+          '';
+        };
+      };
+  };
+  testScript = ''
+    startAll;
+
+    $default->waitForUnit('miniflux.service');
+    $default->waitForOpenPort(${toString defaultPort});
+    $default->succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep -q OK");
+    $default->succeed("curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep -q '\"is_admin\":true'");
+
+    $customized->waitForUnit('miniflux.service');
+    $customized->waitForOpenPort(${toString port});
+    $customized->succeed("curl --fail 'http://localhost:${toString port}/healthcheck' | grep -q OK");
+    $customized->succeed("curl 'http://localhost:${toString port}/v1/me' -u '${username}:${password}' -H Content-Type:application/json | grep -q '\"is_admin\":true'");
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nghttpx.nix b/nixpkgs/nixos/tests/nghttpx.nix
index 433562b97191..d41fa01aa9a8 100644
--- a/nixpkgs/nixos/tests/nghttpx.nix
+++ b/nixpkgs/nixos/tests/nghttpx.nix
@@ -1,5 +1,5 @@
 let
-  nginxRoot = "/var/run/nginx";
+  nginxRoot = "/run/nginx";
 in
   import ./make-test.nix ({...}: {
     name  = "nghttpx";
diff --git a/nixpkgs/nixos/tests/nginx.nix b/nixpkgs/nixos/tests/nginx.nix
index 32b113649237..a4d14986a146 100644
--- a/nixpkgs/nixos/tests/nginx.nix
+++ b/nixpkgs/nixos/tests/nginx.nix
@@ -28,6 +28,7 @@ import ./make-test.nix ({ pkgs, ...} : {
         services.nginx.virtualHosts."0.my.test" = {
           extraConfig = ''
             access_log syslog:server=unix:/dev/log,facility=user,tag=mytag,severity=info ceeformat;
+            location /favicon.ico { allow all; access_log off; log_not_found off; }
           '';
         };
       };
diff --git a/nixpkgs/nixos/tests/osquery.nix b/nixpkgs/nixos/tests/osquery.nix
index 281dbcff6643..d95871ffafc6 100644
--- a/nixpkgs/nixos/tests/osquery.nix
+++ b/nixpkgs/nixos/tests/osquery.nix
@@ -11,7 +11,7 @@ with lib;
   machine = {
     services.osquery.enable = true;
     services.osquery.loggerPath = "/var/log/osquery/logs";
-    services.osquery.pidfile = "/var/run/osqueryd.pid";
+    services.osquery.pidfile = "/run/osqueryd.pid";
   };
 
   testScript = ''
@@ -23,6 +23,6 @@ with lib;
       "echo 'SELECT value FROM osquery_flags WHERE name = \"logger_path\";' | osqueryi | grep /var/log/osquery/logs"
     );
 
-    $machine->succeed("echo 'SELECT value FROM osquery_flags WHERE name = \"pidfile\";' | osqueryi | grep /var/run/osqueryd.pid");
+    $machine->succeed("echo 'SELECT value FROM osquery_flags WHERE name = \"pidfile\";' | osqueryi | grep /run/osqueryd.pid");
   '';
 })
diff --git a/nixpkgs/nixos/tests/printing.nix b/nixpkgs/nixos/tests/printing.nix
index f009a7c706ee..e8702c1ffbf1 100644
--- a/nixpkgs/nixos/tests/printing.nix
+++ b/nixpkgs/nixos/tests/printing.nix
@@ -42,7 +42,7 @@ import ./make-test.nix ({pkgs, ... }: {
       # check local encrypted connections work without error
       $client->succeed("lpstat -E -r") =~ /scheduler is running/ or die;
       # Test that UNIX socket is used for connections.
-      $client->succeed("lpstat -H") =~ "/var/run/cups/cups.sock" or die;
+      $client->succeed("lpstat -H") =~ "/run/cups/cups.sock" or die;
       # Test that HTTP server is available too.
       $client->succeed("curl --fail http://localhost:631/");
       $client->succeed("curl --fail http://server:631/");
diff --git a/nixpkgs/nixos/tests/prometheus-2.nix b/nixpkgs/nixos/tests/prometheus-2.nix
new file mode 100644
index 000000000000..d7035d49ad4d
--- /dev/null
+++ b/nixpkgs/nixos/tests/prometheus-2.nix
@@ -0,0 +1,67 @@
+import ./make-test.nix {
+  name = "prometheus-2";
+
+  nodes = {
+    one = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      services.prometheus2 = {
+        enable = true;
+        scrapeConfigs = [
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [ "127.0.0.1:9090" ];
+                labels = { instance = "localhost"; };
+              }
+            ];
+          }
+          {
+            job_name = "pushgateway";
+            scrape_interval = "1s";
+            static_configs = [
+              {
+                targets = [ "127.0.0.1:9091" ];
+              }
+            ];
+          }
+        ];
+        rules = [
+          ''
+            groups:
+              - name: test
+                rules:
+                  - record: testrule
+                    expr: count(up{job="prometheus"})
+          ''
+        ];
+      };
+      services.prometheus.pushgateway = {
+        enable = true;
+        persistMetrics = true;
+        persistence.interval = "1s";
+        stateDir = "prometheus-pushgateway";
+      };
+    };
+  };
+
+  testScript = ''
+    startAll;
+    $one->waitForUnit("prometheus2.service");
+    $one->waitForOpenPort(9090);
+    $one->succeed("curl -s http://127.0.0.1:9090/metrics");
+
+    # Let's test if pushing a metric to the pushgateway succeeds
+    # and whether that metric gets ingested by prometheus.
+    $one->waitForUnit("pushgateway.service");
+    $one->succeed(
+      "echo 'some_metric 3.14' | " .
+      "curl --data-binary \@- http://127.0.0.1:9091/metrics/job/some_job");
+    $one->waitUntilSucceeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' " .
+      "| jq '.data.result[0].value[1]' | grep '\"3.14\"'");
+
+    # Let's test if the pushgateway persists metrics to the configured location.
+    $one->waitUntilSucceeds("test -e /var/lib/prometheus-pushgateway/metrics");
+  '';
+}
diff --git a/nixpkgs/nixos/tests/solr.nix b/nixpkgs/nixos/tests/solr.nix
index 9ba3863411ea..2108e851bc59 100644
--- a/nixpkgs/nixos/tests/solr.nix
+++ b/nixpkgs/nixos/tests/solr.nix
@@ -1,47 +1,65 @@
-import ./make-test.nix ({ pkgs, lib, ... }:
-{
-  name = "solr";
-  meta.maintainers = [ lib.maintainers.aanderse ];
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
 
-  machine =
-    { config, pkgs, ... }:
-    {
-      # Ensure the virtual machine has enough memory for Solr to avoid the following error:
-      #
-      #   OpenJDK 64-Bit Server VM warning:
-      #     INFO: os::commit_memory(0x00000000e8000000, 402653184, 0)
-      #     failed; error='Cannot allocate memory' (errno=12)
-      #
-      #   There is insufficient memory for the Java Runtime Environment to continue.
-      #   Native memory allocation (mmap) failed to map 402653184 bytes for committing reserved memory.
-      virtualisation.memorySize = 2000;
+let
+  solrTest = package: makeTest {
+    machine =
+      { config, pkgs, ... }:
+      {
+        # Ensure the virtual machine has enough memory for Solr to avoid the following error:
+        #
+        #   OpenJDK 64-Bit Server VM warning:
+        #     INFO: os::commit_memory(0x00000000e8000000, 402653184, 0)
+        #     failed; error='Cannot allocate memory' (errno=12)
+        #
+        #   There is insufficient memory for the Java Runtime Environment to continue.
+        #   Native memory allocation (mmap) failed to map 402653184 bytes for committing reserved memory.
+        virtualisation.memorySize = 2000;
 
-      services.solr.enable = true;
-    };
+        services.solr.enable = true;
+        services.solr.package = package;
+      };
 
-  testScript = ''
-    startAll;
+    testScript = ''
+      startAll;
 
-    $machine->waitForUnit('solr.service');
-    $machine->waitForOpenPort('8983');
-    $machine->succeed('curl --fail http://localhost:8983/solr/');
+      $machine->waitForUnit('solr.service');
+      $machine->waitForOpenPort('8983');
+      $machine->succeed('curl --fail http://localhost:8983/solr/');
+
+      # adapted from pkgs.solr/examples/films/README.txt
+      $machine->succeed('sudo -u solr solr create -c films');
+      $machine->succeed(q(curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:application/json' --data-binary '{
+        "add-field" : {
+          "name":"name",
+          "type":"text_general",
+          "multiValued":false,
+          "stored":true
+        },
+        "add-field" : {
+          "name":"initial_release_date",
+          "type":"pdate",
+          "stored":true
+        }
+      }')) =~ /"status":0/ or die;
+      $machine->succeed('sudo -u solr post -c films ${pkgs.solr}/example/films/films.json');
+      $machine->succeed('curl http://localhost:8983/solr/films/query?q=name:batman') =~ /"name":"Batman Begins"/ or die;
+    '';
+  };
+in
+{
+  solr_7 = solrTest pkgs.solr_7 // {
+    name = "solr_7";
+    meta.maintainers = [ lib.maintainers.aanderse ];
+  };
 
-    # adapted from pkgs.solr/examples/films/README.txt
-    $machine->succeed('sudo -u solr solr create -c films');
-    $machine->succeed(q(curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:application/json' --data-binary '{
-      "add-field" : {
-        "name":"name",
-        "type":"text_general",
-        "multiValued":false,
-        "stored":true
-      },
-      "add-field" : {
-        "name":"initial_release_date",
-        "type":"pdate",
-        "stored":true
-      }
-    }')) =~ /"status":0/ or die;
-    $machine->succeed('sudo -u solr post -c films ${pkgs.solr}/example/films/films.json');
-    $machine->succeed('curl http://localhost:8983/solr/films/query?q=name:batman') =~ /"name":"Batman Begins"/ or die;
-  '';
-})
+  solr_8 = solrTest pkgs.solr_8 // {
+    name = "solr_8";
+    meta.maintainers = [ lib.maintainers.aanderse ];
+  };
+}