summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorEelco Dolstra <eelco.dolstra@logicblox.com>2015-10-01 14:17:46 +0200
committerEelco Dolstra <eelco.dolstra@logicblox.com>2015-10-01 14:17:46 +0200
commitb23038dd801fcbfad2980664758d820b29abebae (patch)
treea743ff1a9428dea20b791e9556a32a077293ad41 /nixos
parent96e1b7eaf9eaaa3592906a97e0c3cc4a4552769c (diff)
parent562851a0680c35ecc34e59976e2f8eabe142dca6 (diff)
downloadnixlib-b23038dd801fcbfad2980664758d820b29abebae.tar
nixlib-b23038dd801fcbfad2980664758d820b29abebae.tar.gz
nixlib-b23038dd801fcbfad2980664758d820b29abebae.tar.bz2
nixlib-b23038dd801fcbfad2980664758d820b29abebae.tar.lz
nixlib-b23038dd801fcbfad2980664758d820b29abebae.tar.xz
nixlib-b23038dd801fcbfad2980664758d820b29abebae.tar.zst
nixlib-b23038dd801fcbfad2980664758d820b29abebae.zip
Merge remote-tracking branch 'origin/master' into systemd-219
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/default.nix17
-rw-r--r--nixos/doc/manual/release-notes/rl-1509.xml319
-rw-r--r--nixos/lib/make-disk-image.nix115
-rw-r--r--nixos/maintainers/scripts/ec2/amazon-base-config.nix5
-rw-r--r--nixos/maintainers/scripts/ec2/amazon-hvm-config.nix5
-rw-r--r--nixos/maintainers/scripts/ec2/amazon-hvm-install-config.nix34
-rw-r--r--nixos/maintainers/scripts/ec2/amazon-image.nix27
-rwxr-xr-xnixos/maintainers/scripts/ec2/create-amis.sh217
-rwxr-xr-xnixos/maintainers/scripts/ec2/create-ebs-amis.py216
-rwxr-xr-xnixos/maintainers/scripts/ec2/create-s3-amis.sh53
-rw-r--r--nixos/maintainers/scripts/ec2/ebs-creator.nix13
-rw-r--r--nixos/modules/config/fonts/fonts.nix1
-rw-r--r--nixos/modules/config/networking.nix11
-rw-r--r--nixos/modules/config/shells-environment.nix5
-rw-r--r--nixos/modules/installer/cd-dvd/channel.nix2
-rw-r--r--nixos/modules/installer/tools/nixos-install.sh3
-rw-r--r--nixos/modules/installer/tools/nixos-rebuild.sh4
-rw-r--r--nixos/modules/misc/ids.nix2
-rw-r--r--nixos/modules/module-list.nix2
-rw-r--r--nixos/modules/programs/ssh.nix3
-rw-r--r--nixos/modules/programs/venus.nix3
-rw-r--r--nixos/modules/services/amqp/activemq/default.nix3
-rw-r--r--nixos/modules/services/databases/opentsdb.nix16
-rw-r--r--nixos/modules/services/hardware/sane.nix3
-rw-r--r--nixos/modules/services/logging/logstash.nix4
-rw-r--r--nixos/modules/services/mail/postfix.nix3
-rw-r--r--nixos/modules/services/misc/nixos-manual.nix5
-rw-r--r--nixos/modules/services/misc/subsonic.nix4
-rw-r--r--nixos/modules/services/monitoring/grafana.nix6
-rw-r--r--nixos/modules/services/network-filesystems/xtreemfs.nix469
-rw-r--r--nixos/modules/services/networking/bind.nix9
-rw-r--r--nixos/modules/services/networking/dhcpcd.nix1
-rw-r--r--nixos/modules/services/networking/dnscrypt-proxy.nix5
-rw-r--r--nixos/modules/services/networking/seeks.nix2
-rw-r--r--nixos/modules/services/networking/syncthing.nix7
-rw-r--r--nixos/modules/services/web-servers/phpfpm.nix5
-rw-r--r--nixos/modules/services/x11/display-managers/default.nix4
-rw-r--r--nixos/modules/services/x11/display-managers/gdm.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/kdm.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix4
-rw-r--r--nixos/modules/services/x11/hardware/synaptics.nix9
-rw-r--r--nixos/modules/system/boot/stage-1-init.sh29
-rw-r--r--nixos/modules/system/boot/stage-1.nix7
-rw-r--r--nixos/modules/system/boot/systemd.nix52
-rw-r--r--nixos/modules/tasks/encrypted-devices.nix11
-rw-r--r--nixos/modules/tasks/filesystems.nix20
-rw-r--r--nixos/modules/tasks/filesystems/vboxsf.nix23
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix16
-rw-r--r--nixos/modules/tasks/network-interfaces-scripted.nix40
-rw-r--r--nixos/modules/tasks/network-interfaces-systemd.nix3
-rw-r--r--nixos/modules/tasks/network-interfaces.nix80
-rw-r--r--nixos/modules/virtualisation/amazon-config.nix3
-rw-r--r--nixos/modules/virtualisation/amazon-grow-partition.nix50
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix105
-rw-r--r--nixos/modules/virtualisation/ec2-data.nix23
-rw-r--r--nixos/modules/virtualisation/growpart-util-linux-2.26.patch88
-rw-r--r--nixos/modules/virtualisation/openvswitch.nix13
-rw-r--r--nixos/modules/virtualisation/virtualbox-guest.nix3
-rw-r--r--nixos/modules/virtualisation/virtualbox-image.nix112
-rw-r--r--nixos/release.nix2
-rw-r--r--nixos/tests/ec2.nix79
-rw-r--r--nixos/tests/gnome3.nix3
-rw-r--r--nixos/tests/make-test.nix2
-rw-r--r--nixos/tests/virtualbox.nix22
64 files changed, 1730 insertions, 676 deletions
diff --git a/nixos/doc/manual/default.nix b/nixos/doc/manual/default.nix
index 419df915e6a4..87964e27bb9c 100644
--- a/nixos/doc/manual/default.nix
+++ b/nixos/doc/manual/default.nix
@@ -31,10 +31,8 @@ let
     else
       fn;
 
-  # Convert the list of options into an XML file.  The builtin
-  # unsafeDiscardStringContext is used to prevent the realisation of
-  # the store paths which are used in options definitions.
-  optionsXML = builtins.toFile "options.xml" (builtins.unsafeDiscardStringContext (builtins.toXML optionsList'));
+  # Convert the list of options into an XML file.
+  optionsXML = builtins.toFile "options.xml" (builtins.toXML optionsList');
 
   optionsDocBook = runCommand "options-db.xml" {} ''
     optionsXML=${optionsXML}
@@ -139,6 +137,8 @@ in rec {
     ''; # */
 
     meta.description = "The NixOS manual in HTML format";
+
+    allowedReferences = ["out"];
   };
 
   manualPDF = stdenv.mkDerivation {
@@ -146,12 +146,9 @@ in rec {
 
     inherit sources;
 
-    buildInputs = [ libxml2 libxslt dblatex tetex ];
+    buildInputs = [ libxml2 libxslt dblatex dblatex.tex ];
 
     buildCommand = ''
-      # TeX needs a writable font cache.
-      export VARTEXFONTS=$TMPDIR/texfonts
-
       ${copySources}
 
       dst=$out/share/doc/nixos
@@ -162,7 +159,7 @@ in rec {
 
       mkdir -p $out/nix-support
       echo "doc-pdf manual $dst/manual.pdf" >> $out/nix-support/hydra-build-products
-    ''; # */
+    '';
   };
 
   # Generate the NixOS manpages.
@@ -190,6 +187,8 @@ in rec {
         ${docbook5_xsl}/xml/xsl/docbook/manpages/docbook.xsl \
         ./man-pages.xml
     '';
+
+    allowedReferences = ["out"];
   };
 
 }
diff --git a/nixos/doc/manual/release-notes/rl-1509.xml b/nixos/doc/manual/release-notes/rl-1509.xml
index 098613f9685a..a68baa0d8078 100644
--- a/nixos/doc/manual/release-notes/rl-1509.xml
+++ b/nixos/doc/manual/release-notes/rl-1509.xml
@@ -4,7 +4,7 @@
          version="5.0"
          xml:id="sec-release-15.09">
 
-<title>Release 15.09 (“Dingo”, 2015/09/??)</title>
+<title>Release 15.09 (“Dingo”, 2015/09/30)</title>
 
 <para>In addition to numerous new and upgraded packages, this release
 has the following highlights:</para>
@@ -12,16 +12,25 @@ has the following highlights:</para>
 <itemizedlist>
 
   <listitem>
-    <para>The Haskell packages infrastructure has been re-designed
-    from the ground up.  NixOS now distributes the latest version of
-    every single package registered on <link
-    xlink:href="http://hackage.haskell.org/">Hackage</link>, i.e. well
-    over 8000 Haskell packages. Further information and usage
-    instructions for the improved infrastructure are available at
-    <link
-    xlink:href="https://nixos.org/wiki/Haskell">https://nixos.org/wiki/Haskell</link>.
-    Users migrating from an earlier release will also find helpful
-    information below, in the list of backwards-incompatible changes.</para>
+    <para>The <link xlink:href="http://haskell.org/">Haskell</link>
+    packages infrastructure has been re-designed from the ground up
+    (&quot;Haskell NG&quot;). NixOS now distributes the latest version
+    of every single package registered on <link
+    xlink:href="http://hackage.haskell.org/">Hackage</link> -- well in
+    excess of 8,000 Haskell packages. Detailed instructions on how to
+    use that infrastructure can be found in the <link
+    xlink:href="http://nixos.org/nixpkgs/manual/#users-guide-to-the-haskell-infrastructure">User's
+    Guide to the Haskell Infrastructure</link>. Users migrating from an
+    earlier release may find helpful information below, in the list of
+    backwards-incompatible changes. Furthermore, we distribute 51(!)
+    additional Haskell package sets that provide every single <link
+    xlink:href="http://www.stackage.org/">LTS Haskell</link> release
+    since version 0.0 as well as the most recent <link
+    xlink:href="http://www.stackage.org/">Stackage Nightly</link>
+    snapshot. The announcement <link
+    xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2015-September/018138.html">&quot;Full
+    Stackage Support in Nixpkgs&quot;</link> gives additional
+    details.</para>
   </listitem>
 
   <listitem>
@@ -47,9 +56,105 @@ system.autoUpgrade.enable = true;
     3.18.</para>
   </listitem>
 
+  <listitem>
+    <para>GNOME has been upgraded to 3.16.
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>Xfce has been upgraded to 4.12.
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>KDE 5 has been upgraded to KDE Frameworks 5.10,
+      Plasma 5.3.2 and Applications 15.04.3.
+      KDE 4 has been updated to kdelibs-4.14.10.
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>E19 has been upgraded to 0.16.8.15.
+    </para>
+  </listitem>
+
 </itemizedlist>
 
 
+<para>The following new services were added since the last release:
+
+  <itemizedlist>
+    <listitem><para><literal>services/mail/exim.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/apache-kafka.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/canto-daemon.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/confd.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/devmon.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/gitit.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/ihaskell.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/mbpfan.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/mediatomb.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/mwlib.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/parsoid.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/plex.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/ripple-rest.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/ripple-data-api.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/subsonic.nix</literal></para></listitem>
+    <listitem><para><literal>services/misc/sundtek.nix</literal></para></listitem>
+    <listitem><para><literal>services/monitoring/cadvisor.nix</literal></para></listitem>
+    <listitem><para><literal>services/monitoring/das_watchdog.nix</literal></para></listitem>
+    <listitem><para><literal>services/monitoring/grafana.nix</literal></para></listitem>
+    <listitem><para><literal>services/monitoring/riemann-tools.nix</literal></para></listitem>
+    <listitem><para><literal>services/monitoring/teamviewer.nix</literal></para></listitem>
+    <listitem><para><literal>services/network-filesystems/u9fs.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/aiccu.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/asterisk.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/bird.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/charybdis.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/docker-registry-server.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/fan.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/firefox/sync-server.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/gateone.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/heyefi.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/i2p.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/lambdabot.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/mstpd.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/nix-serve.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/nylon.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/racoon.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/skydns.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/shout.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/softether.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/sslh.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/tinc.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/tlsdated.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/tox-bootstrapd.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/tvheadend.nix</literal></para></listitem>
+    <listitem><para><literal>services/networking/zerotierone.nix</literal></para></listitem>
+    <listitem><para><literal>services/scheduling/marathon.nix</literal></para></listitem>
+    <listitem><para><literal>services/security/fprintd.nix</literal></para></listitem>
+    <listitem><para><literal>services/security/hologram.nix</literal></para></listitem>
+    <listitem><para><literal>services/security/munge.nix</literal></para></listitem>
+    <listitem><para><literal>services/system/cloud-init.nix</literal></para></listitem>
+    <listitem><para><literal>services/web-servers/shellinabox.nix</literal></para></listitem>
+    <listitem><para><literal>services/web-servers/uwsgi.nix</literal></para></listitem>
+    <listitem><para><literal>services/x11/unclutter.nix</literal></para></listitem>
+    <listitem><para><literal>services/x11/display-managers/sddm.nix</literal></para></listitem>
+    <listitem><para><literal>system/boot/coredump.nix</literal></para></listitem>
+    <listitem><para><literal>system/boot/loader/loader.nix</literal></para></listitem>
+    <listitem><para><literal>system/boot/loader/generic-extlinux-compatible</literal></para></listitem>
+    <listitem><para><literal>system/boot/networkd.nix</literal></para></listitem>
+    <listitem><para><literal>system/boot/resolved.nix</literal></para></listitem>
+    <listitem><para><literal>system/boot/timesyncd.nix</literal></para></listitem>
+    <listitem><para><literal>tasks/filesystems/exfat.nix</literal></para></listitem>
+    <listitem><para><literal>tasks/filesystems/ntfs.nix</literal></para></listitem>
+    <listitem><para><literal>tasks/filesystems/vboxsf.nix</literal></para></listitem>
+    <listitem><para><literal>virtualisation/virtualbox-host.nix</literal></para></listitem>
+    <listitem><para><literal>virtualisation/vmware-guest.nix</literal></para></listitem>
+    <listitem><para><literal>virtualisation/xen-dom0.nix</literal></para></listitem>
+  </itemizedlist>
+</para>
+
+
 <para>When upgrading from a previous release, please be aware of the
 following incompatible changes:
 
@@ -104,56 +209,75 @@ which contains the latest Elm platform.</para></listitem>
 
   <para>Local printers are no longer shared or advertised by
   default. This behavior can be changed by enabling
-  <literal>services.printing.defaultShared</literal> or
-  <literal>services.printing.browsing</literal> respectively.</para>
+  <option>services.printing.defaultShared</option> or
+  <option>services.printing.browsing</option> respectively.</para>
 </listitem>
 
 <listitem>
   <para>
     The VirtualBox host and guest options have been named more
     consistently. They can now found in
-    <literal>virtualisation.virtualbox.host.*</literal> instead of
-    <literal>services.virtualboxHost.*</literal> and
-    <literal>virtualisation.virtualbox.guest.*</literal> instead of
-    <literal>services.virtualboxGuest.*</literal>.
+    <option>virtualisation.virtualbox.host.*</option> instead of
+    <option>services.virtualboxHost.*</option> and
+    <option>virtualisation.virtualbox.guest.*</option> instead of
+    <option>services.virtualboxGuest.*</option>.
+  </para>
+
+  <para>
+    Also, there now is support for the <literal>vboxsf</literal> file
+    system using the <option>fileSystems</option> configuration
+    attribute. An example of how this can be used in a configuration:
+
+<programlisting>
+fileSystems."/shiny" = {
+  device = "myshinysharedfolder";
+  fsType = "vboxsf";
+};
+</programlisting>
+
   </para>
 </listitem>
 
 <listitem>
   <para>
-    Haskell packages can no longer be found by name, i.e. the commands
-    <literal>nix-env -qa cabal-install</literal> and <literal>nix-env -i
-    ghc</literal> will fail, even though we <emphasis>do</emphasis> ship
-    both <literal>cabal-install</literal> and <literal>ghc</literal>.
-    The reason for this inconvenience is the sheer size of the Haskell
-    package set: name-based lookups such as these would become much
-    slower than they are today if we'd add the entire Hackage database
-    into the top level attribute set. Instead, the list of Haskell
-    packages can be displayed by
+    &quot;<literal>nix-env -qa</literal>&quot; no longer discovers
+    Haskell packages by name. The only packages visible in the global
+    scope are <literal>ghc</literal>, <literal>cabal-install</literal>,
+    and <literal>stack</literal>, but all other packages are hidden. The
+    reason for this inconvenience is the sheer size of the Haskell
+    package set. Name-based lookups are expensive, and most
+    <literal>nix-env -qa</literal> operations would become much slower
+    if we'd add the entire Hackage database into the top level attribute
+    set. Instead, the list of Haskell packages can be displayed by
+    running:
   </para>
   <programlisting>
 nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A haskellPackages
 </programlisting>
   <para>
-    and packages can be installed with:
+    Executable programs written in Haskell can be installed with:
   </para>
   <programlisting>
-nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.cabal-install
+nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.pandoc
 </programlisting>
+  <para>
+    Installing Haskell <emphasis>libraries</emphasis> this way, however, is no
+    longer supported. See the next item for more details.
+  </para>
 </listitem>
 
 <listitem>
   <para>
     Previous versions of NixOS came with a feature called
-    <literal>ghc-wrapper</literal>, a small wrapper script that allows
-    GHC to transparently pick up on libraries installed in the user's
-    profile. This feature has been deprecated;
-    <literal>ghc-wrapper</literal> was removed from the distribution.
-    The proper way to register Haskell libraries with the compiler now
-    is the <literal>haskellPackages.ghcWithPackages</literal>
-    function.
-    <link xlink:href="https://nixos.org/wiki/Haskell">https://nixos.org/wiki/Haskell</link>
-    provides much information about this subject.
+    <literal>ghc-wrapper</literal>, a small script that allowed GHC to
+    transparently pick up on libraries installed in the user's profile. This
+    feature has been deprecated; <literal>ghc-wrapper</literal> was removed
+    from the distribution. The proper way to register Haskell libraries with
+    the compiler now is the <literal>haskellPackages.ghcWithPackages</literal>
+    function. The <link
+    xlink:href="http://nixos.org/nixpkgs/manual/#users-guide-to-the-haskell-infrastructure">User's
+    Guide to the Haskell Infrastructure</link> provides more information about
+    this subject.
   </para>
 </listitem>
 
@@ -203,7 +327,7 @@ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.cabal-install
     The <literal>locate</literal> service no longer indexes the Nix store
     by default, preventing packages with potentially numerous versions from
     cluttering the output. Indexing the store can be activated by setting
-    <literal>services.locate.includeStore = true</literal>.
+    <option>services.locate.includeStore = true</option>.
   </para>
 </listitem>
 
@@ -216,6 +340,107 @@ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.cabal-install
   </para>
 </listitem>
 
+<listitem>
+  <para>
+    Python 2.6 has been marked as broken (as it no longer recieves
+    security updates from upstream).
+  </para>
+</listitem>
+
+<listitem>
+  <para>
+    Any use of module arguments such as <varname>pkgs</varname> to access
+    library functions, or to define <literal>imports</literal> attributes
+    will now lead to an infinite loop at the time of the evaluation.
+  </para>
+
+  <para>
+    In case of an infinite loop, use the <command>--show-trace</command>
+    command line argument and read the line just above the error message.
+
+<screen>
+$ nixos-rebuild build --show-trace
+…
+while evaluating the module argument `pkgs' in "/etc/nixos/my-module.nix":
+infinite recursion encountered
+</screen>
+  </para>
+
+
+  <para>
+    Any use of <literal>pkgs.lib</literal>, should be replaced by
+    <varname>lib</varname>, after adding it as argument of the module.  The
+    following module
+
+<programlisting>
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+{
+  options = {
+    foo = mkOption { … };
+  };
+  config = mkIf config.foo { … };
+}
+</programlisting>
+
+   should be modified to look like:
+
+<programlisting>
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options = {
+    foo = mkOption { <replaceable>option declaration</replaceable> };
+  };
+  config = mkIf config.foo { <replaceable>option definition</replaceable> };
+}
+</programlisting>
+  </para>
+
+  <para>
+    When <varname>pkgs</varname> is used to download other projects to
+    import their modules, and only in such cases, it should be replaced by
+    <literal>(import &lt;nixpkgs&gt; {})</literal>.  The following module
+
+<programlisting>
+{ config, pkgs, ... }:
+
+let
+  myProject = pkgs.fetchurl {
+    src = <replaceable>url</replaceable>;
+    sha256 = <replaceable>hash</replaceable>;
+  };
+in
+
+{
+  imports = [ "${myProject}/module.nix" ];
+}
+</programlisting>
+
+    should be modified to look like:
+
+<programlisting>
+{ config, pkgs, ... }:
+
+let
+  myProject = (import &lt;nixpkgs&gt; {}).fetchurl {
+    src = <replaceable>url</replaceable>;
+    sha256 = <replaceable>hash</replaceable>;
+  };
+in
+
+{
+  imports = [ "${myProject}/module.nix" ];
+}
+</programlisting>
+  </para>
+
+</listitem>
+
 </itemizedlist>
 </para>
 
@@ -239,10 +464,26 @@ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.cabal-install
       discovered in the Diffie-Hellman key exchange</link> can now
       replace OpenSSH's default version with one they generated
       themselves using the new
-      <literal>services.openssh.moduliFile</literal> option.
+      <option>services.openssh.moduliFile</option> option.
       </para>
   </listitem>
 
+  <listitem> <para>
+    A newly packaged TeX Live 2015 is provided in <literal>pkgs.texlive</literal>,
+    split into 6500 nix packages. For basic user documentation see
+    <link xlink:href="https://github.com/NixOS/nixpkgs/blob/release-15.09/pkgs/tools/typesetting/tex/texlive-new/default.nix#L1"
+      >the source</link>.
+    Beware of <link xlink:href="https://github.com/NixOS/nixpkgs/issues/9757"
+      >an issue</link> when installing a too large package set.
+
+    The plan is to deprecate and maybe delete the original TeX packages
+    until the next release.
+  </para> </listitem>
+
+  <listitem><para>
+    <option>buildEnv.env</option> on all Python interpreters
+    is now available for nix-shell interoperability.
+  </para> </listitem>
 </itemizedlist>
 
 </para>
diff --git a/nixos/lib/make-disk-image.nix b/nixos/lib/make-disk-image.nix
new file mode 100644
index 000000000000..79c5199cbec4
--- /dev/null
+++ b/nixos/lib/make-disk-image.nix
@@ -0,0 +1,115 @@
+{ pkgs
+, lib
+
+, # The NixOS configuration to be installed onto the disk image.
+  config
+
+, # The size of the disk, in megabytes.
+  diskSize
+
+, # Whether the disk should be partitioned (with a single partition
+  # containing the root filesystem) or contain the root filesystem
+  # directly.
+  partitioned ? true
+
+, # The root file system type.
+  fsType ? "ext4"
+
+, # The initial NixOS configuration file to be copied to
+  # /etc/nixos/configuration.nix.
+  configFile ? null
+
+, # Shell code executed after the VM has finished.
+  postVM ? ""
+
+}:
+
+with lib;
+
+pkgs.vmTools.runInLinuxVM (
+  pkgs.runCommand "nixos-disk-image"
+    { preVM =
+        ''
+          mkdir $out
+          diskImage=$out/nixos.img
+          ${pkgs.vmTools.qemu}/bin/qemu-img create -f raw $diskImage "${toString diskSize}M"
+          mv closure xchg/
+        '';
+      buildInputs = [ pkgs.utillinux pkgs.perl pkgs.e2fsprogs pkgs.parted ];
+      exportReferencesGraph =
+        [ "closure" config.system.build.toplevel ];
+      inherit postVM;
+    }
+    ''
+      ${if partitioned then ''
+        # Create a single / partition.
+        parted /dev/vda mklabel msdos
+        parted /dev/vda -- mkpart primary ext2 1M -1s
+        . /sys/class/block/vda1/uevent
+        mknod /dev/vda1 b $MAJOR $MINOR
+        rootDisk=/dev/vda1
+      '' else ''
+        rootDisk=/dev/vda
+      ''}
+
+      # Create an empty filesystem and mount it.
+      mkfs.${fsType} -L nixos $rootDisk
+      ${optionalString (fsType == "ext4") ''
+        tune2fs -c 0 -i 0 $rootDisk
+      ''}
+      mkdir /mnt
+      mount $rootDisk /mnt
+
+      # The initrd expects these directories to exist.
+      mkdir /mnt/dev /mnt/proc /mnt/sys
+
+      mount -o bind /proc /mnt/proc
+      mount -o bind /dev /mnt/dev
+      mount -o bind /sys /mnt/sys
+
+      # Copy all paths in the closure to the filesystem.
+      storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
+
+      mkdir -p /mnt/nix/store
+      echo "copying everything (will take a while)..."
+      set -f
+      cp -prd $storePaths /mnt/nix/store/
+
+      # Register the paths in the Nix database.
+      printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
+          chroot /mnt ${config.nix.package}/bin/nix-store --load-db --option build-users-group ""
+
+      # Add missing size/hash fields to the database. FIXME:
+      # exportReferencesGraph should provide these directly.
+      chroot /mnt ${config.nix.package}/bin/nix-store --verify --check-contents
+
+      # Create the system profile to allow nixos-rebuild to work.
+      chroot /mnt ${config.nix.package}/bin/nix-env --option build-users-group "" \
+          -p /nix/var/nix/profiles/system --set ${config.system.build.toplevel}
+
+      # `nixos-rebuild' requires an /etc/NIXOS.
+      mkdir -p /mnt/etc
+      touch /mnt/etc/NIXOS
+
+      # `switch-to-configuration' requires a /bin/sh
+      mkdir -p /mnt/bin
+      ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
+
+      # Install a configuration.nix.
+      mkdir -p /mnt/etc/nixos
+      ${optionalString (configFile != null) ''
+        cp ${configFile} /mnt/etc/nixos/configuration.nix
+      ''}
+
+      # Generate the GRUB menu.
+      ln -s vda /dev/xvda
+      ln -s vda /dev/sda
+      chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
+
+      umount /mnt/proc /mnt/dev /mnt/sys
+      umount /mnt
+
+      # Do an fsck to make sure resize2fs works.
+      fsck.${fsType} -f -y $rootDisk
+    ''
+)
diff --git a/nixos/maintainers/scripts/ec2/amazon-base-config.nix b/nixos/maintainers/scripts/ec2/amazon-base-config.nix
deleted file mode 100644
index 28317317eab2..000000000000
--- a/nixos/maintainers/scripts/ec2/amazon-base-config.nix
+++ /dev/null
@@ -1,5 +0,0 @@
-{ modulesPath, ...}:
-{
-  imports = [ "${modulesPath}/virtualisation/amazon-init.nix" ];
-  services.journald.rateLimitBurst = 0;
-}
diff --git a/nixos/maintainers/scripts/ec2/amazon-hvm-config.nix b/nixos/maintainers/scripts/ec2/amazon-hvm-config.nix
deleted file mode 100644
index d0c7f3a6a6c9..000000000000
--- a/nixos/maintainers/scripts/ec2/amazon-hvm-config.nix
+++ /dev/null
@@ -1,5 +0,0 @@
-{ config, pkgs, ...}:
-{
-  imports = [ ./amazon-base-config.nix ];
-  ec2.hvm = true;
-}
diff --git a/nixos/maintainers/scripts/ec2/amazon-hvm-install-config.nix b/nixos/maintainers/scripts/ec2/amazon-hvm-install-config.nix
deleted file mode 100644
index c0ec38bf489a..000000000000
--- a/nixos/maintainers/scripts/ec2/amazon-hvm-install-config.nix
+++ /dev/null
@@ -1,34 +0,0 @@
-{ config, pkgs, lib, ...}:
-let
-  cloudUtils = pkgs.fetchurl {
-    url = "https://launchpad.net/cloud-utils/trunk/0.27/+download/cloud-utils-0.27.tar.gz";
-    sha256 = "16shlmg36lidp614km41y6qk3xccil02f5n3r4wf6d1zr5n4v8vd";
-  };
-  growpart = pkgs.stdenv.mkDerivation {
-    name = "growpart";
-    src = cloudUtils;
-    buildPhase = ''
-      cp bin/growpart $out
-      sed -i 's|awk|gawk|' $out
-      sed -i 's|sed|gnused|' $out
-    '';
-    dontInstall = true;
-    dontPatchShebangs = true;
-  };
-in
-{
-  imports = [ ./amazon-base-config.nix ];
-  ec2.hvm = true;
-  boot.loader.grub.device = lib.mkOverride 0 "/dev/xvdg";
-  boot.kernelParams = [ "console=ttyS0" ];
-
-  boot.initrd.extraUtilsCommands = ''
-    copy_bin_and_libs ${pkgs.gawk}/bin/gawk
-    copy_bin_and_libs ${pkgs.gnused}/bin/sed
-    copy_bin_and_libs ${pkgs.utillinux}/sbin/sfdisk
-    cp -v ${growpart} $out/bin/growpart
-  '';
-  boot.initrd.postDeviceCommands = ''
-    [ -e /dev/xvda ] && [ -e /dev/xvda1 ] && TMPDIR=/run sh $(type -P growpart) /dev/xvda 1
-  '';
-}
diff --git a/nixos/maintainers/scripts/ec2/amazon-image.nix b/nixos/maintainers/scripts/ec2/amazon-image.nix
new file mode 100644
index 000000000000..ef8646c66d1e
--- /dev/null
+++ b/nixos/maintainers/scripts/ec2/amazon-image.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  imports =
+    [ ../../../modules/installer/cd-dvd/channel.nix
+      ../../../modules/virtualisation/amazon-image.nix
+    ];
+
+  system.build.amazonImage = import ../../../lib/make-disk-image.nix {
+    inherit pkgs lib config;
+    partitioned = config.ec2.hvm;
+    diskSize = if config.ec2.hvm then 2048 else 8192;
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        {
+          imports = [ <nixpkgs/nixos/modules/virtualisation/amazon-image.nix> ];
+          ${optionalString config.ec2.hvm ''
+            ec2.hvm = true;
+          ''}
+        }
+      '';
+  };
+
+}
diff --git a/nixos/maintainers/scripts/ec2/create-amis.sh b/nixos/maintainers/scripts/ec2/create-amis.sh
new file mode 100755
index 000000000000..8604091dbcdb
--- /dev/null
+++ b/nixos/maintainers/scripts/ec2/create-amis.sh
@@ -0,0 +1,217 @@
+#! /bin/sh -e
+
+set -o pipefail
+#set -x
+
+stateDir=${TMPDIR:-/tmp}/ec2-image
+echo "keeping state in $stateDir"
+mkdir -p $stateDir
+
+version=$(nix-instantiate --eval --strict '<nixpkgs>' -A lib.nixpkgsVersion | sed s/'"'//g)
+echo "NixOS version is $version"
+
+rm -f ec2-amis.nix
+
+
+for type in hvm pv; do
+    link=$stateDir/$type
+    imageFile=$link/nixos.img
+    system=x86_64-linux
+    arch=x86_64
+
+    # Build the image.
+    if ! [ -L $link ]; then
+        if [ $type = pv ]; then hvmFlag=false; else hvmFlag=true; fi
+
+        echo "building image type '$type'..."
+        nix-build -o $link \
+            '<nixpkgs/nixos>' \
+            -A config.system.build.amazonImage \
+            --arg configuration "{ imports = [ <nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix> ]; ec2.hvm = $hvmFlag; }"
+    fi
+
+    for store in ebs s3; do
+
+        bucket=nixos-amis
+        bucketDir="$version-$type-$store"
+
+        prevAmi=
+        prevRegion=
+
+        for region in eu-west-1 eu-central-1 us-east-1 us-west-1 us-west-2 ap-southeast-1 ap-southeast-2 ap-northeast-1 sa-east-1; do
+
+            name=nixos-$version-$arch-$type-$store
+            description="NixOS $system $version ($type-$store)"
+
+            amiFile=$stateDir/$region.$type.$store.ami-id
+
+            if ! [ -e $amiFile ]; then
+
+                echo "doing $name in $region..."
+
+                if [ -n "$prevAmi" ]; then
+                    ami=$(ec2-copy-image \
+                        --region "$region" \
+                        --source-region "$prevRegion" --source-ami-id "$prevAmi" \
+                        --name "$name" --description "$description" | cut -f 2)
+                else
+
+                    if [ $store = s3 ]; then
+
+                        # Bundle the image.
+                        imageDir=$stateDir/$type-bundled
+
+                        if ! [ -d $imageDir ]; then
+                            rm -rf $imageDir.tmp
+                            mkdir -p $imageDir.tmp
+                            ec2-bundle-image \
+                                -d $imageDir.tmp \
+                                -i $imageFile --arch $arch \
+                                --user "$AWS_ACCOUNT" -c "$EC2_CERT" -k "$EC2_PRIVATE_KEY"
+                            mv $imageDir.tmp $imageDir
+                        fi
+
+                        # Upload the bundle to S3.
+                        if ! [ -e $imageDir/uploaded ]; then
+                            echo "uploading bundle to S3..."
+                            ec2-upload-bundle \
+                                -m $imageDir/nixos.img.manifest.xml \
+                                -b "$bucket/$bucketDir" \
+                                -a "$EC2_ACCESS_KEY" -s "$EC2_SECRET_KEY" \
+                                --location EU
+                            touch $imageDir/uploaded
+                        fi
+
+                        extraFlags="$bucket/$bucketDir/nixos.img.manifest.xml"
+
+                    else
+
+                        # Convert the image to vhd format so we don't have
+                        # to upload a huge raw image.
+                        vhdFile=$stateDir/$type.vhd
+                        if ! [ -e $vhdFile ]; then
+                            qemu-img convert -O vpc $imageFile $vhdFile.tmp
+                            mv $vhdFile.tmp $vhdFile
+                        fi
+
+                        taskId=$(cat $stateDir/$region.$type.task-id 2> /dev/null || true)
+                        volId=$(cat $stateDir/$region.$type.vol-id 2> /dev/null || true)
+                        snapId=$(cat $stateDir/$region.$type.snap-id 2> /dev/null || true)
+
+                        # Import the VHD file.
+                        if [ -z "$snapId" -a -z "$volId" -a -z "$taskId" ]; then
+                            echo "importing $vhdFile..."
+                            taskId=$(ec2-import-volume $vhdFile --no-upload -f vhd \
+                                -o "$EC2_ACCESS_KEY" -w "$EC2_SECRET_KEY" \
+                                --region "$region" -z "${region}a" \
+                                --bucket "$bucket" --prefix "$bucketDir/" \
+                                | tee /dev/stderr \
+                                | sed 's/.*\(import-vol-[0-9a-z]\+\).*/\1/ ; t ; d')
+                            echo -n "$taskId" > $stateDir/$region.$type.task-id
+                        fi
+
+                        if [ -z "$snapId" -a -z "$volId" ]; then
+                            ec2-resume-import  $vhdFile -t "$taskId" --region "$region" \
+                                -o "$EC2_ACCESS_KEY" -w "$EC2_SECRET_KEY"
+                        fi
+
+                        # Wait for the volume creation to finish.
+                        if [ -z "$snapId" -a -z "$volId" ]; then
+                            echo "waiting for import to finish..."
+                            while true; do
+                                volId=$(ec2-describe-conversion-tasks "$taskId" --region "$region" | sed 's/.*VolumeId.*\(vol-[0-9a-f]\+\).*/\1/ ; t ; d')
+                                if [ -n "$volId" ]; then break; fi
+                                sleep 10
+                            done
+
+                            echo -n "$volId" > $stateDir/$region.$type.vol-id
+                        fi
+
+                        # Delete the import task.
+                        if [ -n "$volId" -a -n "$taskId" ]; then
+                            echo "removing import task..."
+                            ec2-delete-disk-image -t "$taskId" --region "$region" -o "$EC2_ACCESS_KEY" -w "$EC2_SECRET_KEY" || true
+                            rm -f $stateDir/$region.$type.task-id
+                        fi
+
+                        # Create a snapshot.
+                        if [ -z "$snapId" ]; then
+                            echo "creating snapshot..."
+                            snapId=$(ec2-create-snapshot "$volId" --region "$region" | cut -f 2)
+                            echo -n "$snapId" > $stateDir/$region.$type.snap-id
+                            ec2-create-tags "$snapId" -t "Name=$description" --region "$region"
+                        fi
+
+                        # Wait for the snapshot to finish.
+                        echo "waiting for snapshot to finish..."
+                        while true; do
+                            status=$(ec2-describe-snapshots "$snapId" --region "$region" | head -n1 | cut -f 4)
+                            if [ "$status" = completed ]; then break; fi
+                            sleep 10
+                        done
+
+                        # Delete the volume.
+                        if [ -n "$volId" ]; then
+                            echo "deleting volume..."
+                            ec2-delete-volume "$volId" --region "$region" || true
+                            rm -f $stateDir/$region.$type.vol-id
+                        fi
+
+                        extraFlags="-b /dev/sda1=$snapId:20:true:gp2"
+
+                        if [ $type = pv ]; then
+                            extraFlags+=" --root-device-name=/dev/sda1"
+                        fi
+
+                        extraFlags+=" -b /dev/sdb=ephemeral0 -b /dev/sdc=ephemeral1 -b /dev/sdd=ephemeral2 -b /dev/sde=ephemeral3"
+                    fi
+
+                    # Register the AMI.
+                    if [ $type = pv ]; then
+                        kernel=$(ec2-describe-images -o amazon --filter "manifest-location=*pv-grub-hd0_1.04-$arch*" --region "$region" | cut -f 2)
+                        [ -n "$kernel" ]
+                        echo "using PV-GRUB kernel $kernel"
+                        extraFlags+=" --virtualization-type paravirtual --kernel $kernel"
+                    else
+                        extraFlags+=" --virtualization-type hvm"
+                    fi
+
+                    ami=$(ec2-register \
+                        -n "$name" \
+                        -d "$description" \
+                        --region "$region" \
+                        --architecture "$arch" \
+                        $extraFlags | cut -f 2)
+                fi
+
+                echo -n "$ami" > $amiFile
+                echo "created AMI $ami of type '$type' in $region..."
+
+            else
+                ami=$(cat $amiFile)
+            fi
+
+            if [ -z "$NO_WAIT" -o -z "$prevAmi" ]; then
+                echo "waiting for AMI..."
+                while true; do
+                    status=$(ec2-describe-images "$ami" --region "$region" | head -n1 | cut -f 5)
+                    if [ "$status" = available ]; then break; fi
+                    sleep 10
+                done
+
+                ec2-modify-image-attribute \
+                    --region "$region" "$ami" -l -a all
+            fi
+
+            echo "region = $region, type = $type, store = $store, ami = $ami"
+            if [ -z "$prevAmi" ]; then
+                prevAmi="$ami"
+                prevRegion="$region"
+            fi
+
+            echo "  \"15.09\".$region.$type-$store = \"$ami\";" >> ec2-amis.nix
+        done
+
+    done
+
+done
diff --git a/nixos/maintainers/scripts/ec2/create-ebs-amis.py b/nixos/maintainers/scripts/ec2/create-ebs-amis.py
deleted file mode 100755
index 44af56c4091b..000000000000
--- a/nixos/maintainers/scripts/ec2/create-ebs-amis.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#! /usr/bin/env python
-
-import os
-import sys
-import time
-import argparse
-import nixops.util
-from nixops import deployment
-from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
-import boto.ec2
-from nixops.statefile import StateFile, get_default_state_file
-
-parser = argparse.ArgumentParser(description='Create an EBS-backed NixOS AMI')
-parser.add_argument('--region', dest='region', required=True, help='EC2 region to create the image in')
-parser.add_argument('--channel', dest='channel', default="14.12", help='Channel to use')
-parser.add_argument('--keep', dest='keep', action='store_true', help='Keep NixOps machine after use')
-parser.add_argument('--hvm', dest='hvm', action='store_true', help='Create HVM image')
-parser.add_argument('--key', dest='key_name', action='store_true', help='Keypair used for HVM instance creation', default="rob")
-args = parser.parse_args()
-
-instance_type = "m3.medium" if args.hvm else "m1.small"
-
-if args.hvm:
-    virtualization_type = "hvm"
-    root_block = "/dev/sda1"
-    image_type = 'hvm'
-else:
-    virtualization_type = "paravirtual"
-    root_block = "/dev/sda"
-    image_type = 'ebs'
-
-ebs_size = 20
-
-# Start a NixOS machine in the given region.
-f = open("ebs-creator-config.nix", "w")
-f.write('''{{
-  resources.ec2KeyPairs.keypair.accessKeyId = "lb-nixos";
-  resources.ec2KeyPairs.keypair.region = "{0}";
-
-  machine =
-    {{ pkgs, ... }}:
-    {{
-      deployment.ec2.accessKeyId = "lb-nixos";
-      deployment.ec2.region = "{0}";
-      deployment.ec2.blockDeviceMapping."/dev/xvdg".size = pkgs.lib.mkOverride 10 {1};
-    }};
-}}
-'''.format(args.region, ebs_size))
-f.close()
-
-db = StateFile(get_default_state_file())
-try:
-    depl = db.open_deployment("ebs-creator")
-except Exception:
-    depl = db.create_deployment()
-    depl.name = "ebs-creator"
-depl.logger.set_autoresponse("y")
-depl.nix_exprs = [os.path.abspath("./ebs-creator.nix"), os.path.abspath("./ebs-creator-config.nix")]
-if not args.keep: depl.destroy_resources()
-depl.deploy(allow_reboot=True)
-
-m = depl.machines['machine']
-
-# Do the installation.
-device="/dev/xvdg"
-if args.hvm:
-    m.run_command('parted -s /dev/xvdg -- mklabel msdos')
-    m.run_command('parted -s /dev/xvdg -- mkpart primary ext2 1M -1s')
-    device="/dev/xvdg1"
-
-m.run_command("if mountpoint -q /mnt; then umount /mnt; fi")
-m.run_command("mkfs.ext4 -L nixos {0}".format(device))
-m.run_command("mkdir -p /mnt")
-m.run_command("mount {0} /mnt".format(device))
-m.run_command("touch /mnt/.ebs")
-m.run_command("mkdir -p /mnt/etc/nixos")
-
-m.run_command("nix-channel --add https://nixos.org/channels/nixos-{} nixos".format(args.channel))
-m.run_command("nix-channel --update")
-
-version = m.run_command("nix-instantiate --eval-only -A lib.nixpkgsVersion '<nixpkgs>'", capture_stdout=True).split(' ')[0].replace('"','').strip()
-print >> sys.stderr, "NixOS version is {0}".format(version)
-if args.hvm:
-    m.upload_file("./amazon-base-config.nix", "/mnt/etc/nixos/amazon-base-config.nix")
-    m.upload_file("./amazon-hvm-config.nix", "/mnt/etc/nixos/configuration.nix")
-    m.upload_file("./amazon-hvm-install-config.nix", "/mnt/etc/nixos/amazon-hvm-install-config.nix")
-    m.run_command("NIXOS_CONFIG=/etc/nixos/amazon-hvm-install-config.nix nixos-install")
-else:
-    m.upload_file("./amazon-base-config.nix", "/mnt/etc/nixos/configuration.nix")
-    m.run_command("nixos-install")
-
-m.run_command("umount /mnt")
-
-if args.hvm:
-    ami_name = "nixos-{0}-x86_64-hvm".format(version)
-    description = "NixOS {0} (x86_64; EBS root; hvm)".format(version)
-else:
-    ami_name = "nixos-{0}-x86_64-ebs".format(version)
-    description = "NixOS {0} (x86_64; EBS root)".format(version)
-
-
-# Wait for the snapshot to finish.
-def check():
-    status = snapshot.update()
-    print >> sys.stderr, "snapshot status is {0}".format(status)
-    return status == '100%'
-
-m.connect()
-volume = m._conn.get_all_volumes([], filters={'attachment.instance-id': m.resource_id, 'attachment.device': "/dev/sdg"})[0]
-
-# Create a snapshot.
-snapshot = volume.create_snapshot(description=description)
-print >> sys.stderr, "created snapshot {0}".format(snapshot.id)
-
-nixops.util.check_wait(check, max_tries=120)
-
-m._conn.create_tags([snapshot.id], {'Name': ami_name})
-
-if not args.keep: depl.destroy_resources()
-
-# Register the image.
-aki = m._conn.get_all_images(filters={'manifest-location': 'ec2*pv-grub-hd0_1.03-x86_64*'})[0]
-print >> sys.stderr, "using kernel image {0} - {1}".format(aki.id, aki.location)
-
-block_map = BlockDeviceMapping()
-block_map[root_block] = BlockDeviceType(snapshot_id=snapshot.id, delete_on_termination=True, size=ebs_size, volume_type="gp2")
-block_map['/dev/sdb'] = BlockDeviceType(ephemeral_name="ephemeral0")
-block_map['/dev/sdc'] = BlockDeviceType(ephemeral_name="ephemeral1")
-block_map['/dev/sdd'] = BlockDeviceType(ephemeral_name="ephemeral2")
-block_map['/dev/sde'] = BlockDeviceType(ephemeral_name="ephemeral3")
-
-common_args = dict(
-        name=ami_name,
-        description=description,
-        architecture="x86_64",
-        root_device_name=root_block,
-        block_device_map=block_map,
-        virtualization_type=virtualization_type,
-        delete_root_volume_on_termination=True
-        )
-if not args.hvm:
-    common_args['kernel_id']=aki.id
-
-ami_id = m._conn.register_image(**common_args)
-
-print >> sys.stderr, "registered AMI {0}".format(ami_id)
-
-print >> sys.stderr, "sleeping a bit..."
-time.sleep(30)
-
-print >> sys.stderr, "setting image name..."
-m._conn.create_tags([ami_id], {'Name': ami_name})
-
-print >> sys.stderr, "making image public..."
-image = m._conn.get_all_images(image_ids=[ami_id])[0]
-image.set_launch_permissions(user_ids=[], group_names=["all"])
-
-# Do a test deployment to make sure that the AMI works.
-f = open("ebs-test.nix", "w")
-f.write(
-    '''
-    {{
-      network.description = "NixOS EBS test";
-
-      resources.ec2KeyPairs.keypair.accessKeyId = "lb-nixos";
-      resources.ec2KeyPairs.keypair.region = "{0}";
-
-      machine = {{ config, pkgs, resources, ... }}: {{
-        deployment.targetEnv = "ec2";
-        deployment.ec2.accessKeyId = "lb-nixos";
-        deployment.ec2.region = "{0}";
-        deployment.ec2.instanceType = "{2}";
-        deployment.ec2.keyPair = resources.ec2KeyPairs.keypair.name;
-        deployment.ec2.securityGroups = [ "public-ssh" ];
-        deployment.ec2.ami = "{1}";
-      }};
-    }}
-    '''.format(args.region, ami_id, instance_type))
-f.close()
-
-test_depl = db.create_deployment()
-test_depl.auto_response = "y"
-test_depl.name = "ebs-creator-test"
-test_depl.nix_exprs = [os.path.abspath("./ebs-test.nix")]
-test_depl.deploy(create_only=True)
-test_depl.machines['machine'].run_command("nixos-version")
-
-# Log the AMI ID.
-f = open("ec2-amis.nix".format(args.region, image_type), "w")
-f.write("{\n")
-
-for dest in [ 'us-east-1', 'us-west-1', 'us-west-2', 'eu-west-1', 'eu-central-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'sa-east-1']:
-    copy_image = None
-    if args.region != dest:
-        try:
-            print >> sys.stderr, "copying image from region {0} to {1}".format(args.region, dest)
-            conn = boto.ec2.connect_to_region(dest)
-            copy_image = conn.copy_image(args.region, ami_id, ami_name, description=None, client_token=None)
-        except :
-            print >> sys.stderr, "FAILED!"
-
-        # Log the AMI ID.
-        if copy_image != None:
-            f.write('  "{0}"."{1}".{2} = "{3}";\n'.format(args.channel,dest,"hvm" if args.hvm else "ebs",copy_image.image_id))
-    else:
-        f.write('  "{0}"."{1}".{2} = "{3}";\n'.format(args.channel,args.region,"hvm" if args.hvm else "ebs",ami_id))
-
-
-f.write("}\n")
-f.close()
-
-if not args.keep:
-    test_depl.logger.set_autoresponse("y")
-    test_depl.destroy_resources()
-    test_depl.delete()
-
diff --git a/nixos/maintainers/scripts/ec2/create-s3-amis.sh b/nixos/maintainers/scripts/ec2/create-s3-amis.sh
deleted file mode 100755
index ed861a3944ac..000000000000
--- a/nixos/maintainers/scripts/ec2/create-s3-amis.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#! /bin/sh -e
-
-export NIXOS_CONFIG=$(dirname $(readlink -f $0))/amazon-base-config.nix
-
-version=$(nix-instantiate --eval-only '<nixpkgs/nixos>' -A config.system.nixosVersion | sed s/'"'//g)
-echo "NixOS version is $version"
-
-buildAndUploadFor() {
-    system="$1"
-    arch="$2"
-
-    echo "building $system image..."
-    nix-build '<nixpkgs/nixos>' \
-        -A config.system.build.amazonImage --argstr system "$system" -o ec2-ami
-
-    ec2-bundle-image -i ./ec2-ami/nixos.img --user "$AWS_ACCOUNT" --arch "$arch" \
-        -c "$EC2_CERT" -k "$EC2_PRIVATE_KEY"
-
-    for region in eu-west-1; do
-        echo "uploading $system image for $region..."
-
-        name=nixos-$version-$arch-s3
-        bucket="$(echo $name-$region | tr '[A-Z]_' '[a-z]-')"
-
-        if [ "$region" = eu-west-1 ]; then s3location=EU;
-        elif [ "$region" = us-east-1 ]; then s3location=US;
-        else s3location="$region"
-        fi
-
-        ec2-upload-bundle -b "$bucket" -m /tmp/nixos.img.manifest.xml \
-            -a "$EC2_ACCESS_KEY" -s "$EC2_SECRET_KEY" --location "$s3location" \
-            --url http://s3.amazonaws.com
-
-        kernel=$(ec2-describe-images -o amazon --filter "manifest-location=*pv-grub-hd0_1.04-$arch*" --region "$region" | cut -f 2)
-        echo "using PV-GRUB kernel $kernel"
-
-        ami=$(ec2-register "$bucket/nixos.img.manifest.xml" -n "$name" -d "NixOS $system r$revision" -O "$EC2_ACCESS_KEY" -W "$EC2_SECRET_KEY" \
-            --region "$region" --kernel "$kernel" | cut -f 2)
-
-        echo "AMI ID is $ami"
-
-        echo "  \"14.12\".\"$region\".s3 = \"$ami\";" >> ec2-amis.nix
-
-        ec2-modify-image-attribute --region "$region" "$ami" -l -a all -O "$EC2_ACCESS_KEY" -W "$EC2_SECRET_KEY"
-
-        for cp_region in us-east-1 us-west-1 us-west-2 eu-central-1 ap-southeast-1 ap-southeast-2 ap-northeast-1 sa-east-1; do
-          new_ami=$(aws ec2 copy-image --source-image-id $ami --source-region $region --region $cp_region --name "$name" | json ImageId)
-          echo "  \"14.12\".\"$cp_region\".s3 = \"$new_ami\";" >> ec2-amis.nix  
-        done
-    done
-}
-
-buildAndUploadFor x86_64-linux x86_64
diff --git a/nixos/maintainers/scripts/ec2/ebs-creator.nix b/nixos/maintainers/scripts/ec2/ebs-creator.nix
deleted file mode 100644
index 7bb13695fa78..000000000000
--- a/nixos/maintainers/scripts/ec2/ebs-creator.nix
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  network.description = "NixOS EBS creator";
-
-  machine =
-    { config, pkgs, resources, ... }:
-    { deployment.targetEnv = "ec2";
-      deployment.ec2.instanceType = "c3.large";
-      deployment.ec2.securityGroups = [ "public-ssh" ];
-      deployment.ec2.ebsBoot = false;
-      deployment.ec2.keyPair = resources.ec2KeyPairs.keypair.name;
-      environment.systemPackages = [ pkgs.parted ];
-    };
-}
diff --git a/nixos/modules/config/fonts/fonts.nix b/nixos/modules/config/fonts/fonts.nix
index a3fa4bd97783..ea0a67038572 100644
--- a/nixos/modules/config/fonts/fonts.nix
+++ b/nixos/modules/config/fonts/fonts.nix
@@ -31,6 +31,7 @@ with lib;
         pkgs.xorg.fontbh100dpi
         pkgs.xorg.fontmiscmisc
         pkgs.xorg.fontcursormisc
+        pkgs.unifont
       ];
 
   };
diff --git a/nixos/modules/config/networking.nix b/nixos/modules/config/networking.nix
index f99cea7d17b1..b49f8a156d1d 100644
--- a/nixos/modules/config/networking.nix
+++ b/nixos/modules/config/networking.nix
@@ -39,6 +39,16 @@ in
       '';
     };
 
+    networking.extraResolvconfConf = lib.mkOption {
+      type = types.lines;
+      default = "";
+      example = "libc=NO";
+      description = ''
+        Extra configuration to append to <filename>resolvconf.conf</filename>.
+      '';
+    };
+
+
     networking.proxy = {
 
       default = lib.mkOption {
@@ -150,6 +160,7 @@ in
             '' + optionalString dnsmasqResolve ''
               dnsmasq_conf=/etc/dnsmasq-conf.conf
               dnsmasq_resolv=/etc/dnsmasq-resolv.conf
+            '' + cfg.extraResolvconfConf + ''
             '';
 
       } // (optionalAttrs config.services.resolved.enable (
diff --git a/nixos/modules/config/shells-environment.nix b/nixos/modules/config/shells-environment.nix
index 533280890a70..d0243f9775c5 100644
--- a/nixos/modules/config/shells-environment.nix
+++ b/nixos/modules/config/shells-environment.nix
@@ -57,8 +57,8 @@ in
       type = types.attrsOf (types.listOf types.str);
       example = { PATH = [ "/bin" "/sbin" ]; MANPATH = [ "/man" "/share/man" ]; };
       description = ''
-	Attribute set of environment variable.  Each attribute maps to a list
-	of relative paths.  Each relative path is appended to the each profile
+        Attribute set of environment variable.  Each attribute maps to a list
+        of relative paths.  Each relative path is appended to the each profile
         of <option>environment.profiles</option> to form the content of the
         corresponding environment variable.
       '';
@@ -123,6 +123,7 @@ in
         "''${pkgs.dash}/bin/dash"
       '';
       type = types.path;
+      visible = false;
       description = ''
         The shell executable that is linked system-wide to
         <literal>/bin/sh</literal>. Please note that NixOS assumes all
diff --git a/nixos/modules/installer/cd-dvd/channel.nix b/nixos/modules/installer/cd-dvd/channel.nix
index eccb19da5cb3..ea7e3e16b8df 100644
--- a/nixos/modules/installer/cd-dvd/channel.nix
+++ b/nixos/modules/installer/cd-dvd/channel.nix
@@ -33,7 +33,7 @@ in
         echo "unpacking the NixOS/Nixpkgs sources..."
         mkdir -p /nix/var/nix/profiles/per-user/root
         ${config.nix.package}/bin/nix-env -p /nix/var/nix/profiles/per-user/root/channels \
-          -i ${channelSources} --quiet --option use-substitutes false
+          -i ${channelSources} --quiet --option build-use-substitutes false
         mkdir -m 0700 -p /root/.nix-defexpr
         ln -s /nix/var/nix/profiles/per-user/root/channels /root/.nix-defexpr/channels
         mkdir -m 0755 -p /var/lib/nixos
diff --git a/nixos/modules/installer/tools/nixos-install.sh b/nixos/modules/installer/tools/nixos-install.sh
index f7fe6245d6d9..4e10615f902f 100644
--- a/nixos/modules/installer/tools/nixos-install.sh
+++ b/nixos/modules/installer/tools/nixos-install.sh
@@ -188,6 +188,9 @@ mkdir -m 0755 -p $mountPoint/bin
 ln -sf @shell@ $mountPoint/bin/sh
 
 
+# Build hooks likely won't function correctly in the minimal chroot; just disable them.
+unset NIX_BUILD_HOOK
+
 # Make the build below copy paths from the CD if possible.  Note that
 # /tmp/root in the chroot is the root of the CD.
 export NIX_OTHER_STORES=/tmp/root/nix:$NIX_OTHER_STORES
diff --git a/nixos/modules/installer/tools/nixos-rebuild.sh b/nixos/modules/installer/tools/nixos-rebuild.sh
index 7d0e5913cfb1..af19004cbddb 100644
--- a/nixos/modules/installer/tools/nixos-rebuild.sh
+++ b/nixos/modules/installer/tools/nixos-rebuild.sh
@@ -157,9 +157,9 @@ if [ -n "$buildNix" ]; then
             if ! nix-build '<nixpkgs>' -A nix -o $tmpDir/nix "${extraBuildFlags[@]}" > /dev/null; then
                 machine="$(uname -m)"
                 if [ "$machine" = x86_64 ]; then
-                    nixStorePath=/nix/store/664kxr14kfgx4dl095crvmr7pbh9xlh5-nix-1.9
+                    nixStorePath=/nix/store/xryr9g56h8yjddp89d6dw12anyb4ch7c-nix-1.10
                 elif [[ "$machine" =~ i.86 ]]; then
-                    nixStorePath=/nix/store/p7xdvz72xx3rhm121jclsbdmmcds7xh6-nix-1.9
+                    nixStorePath=/nix/store/2w92k5wlpspf0q2k9mnf2z42prx3bwmv-nix-1.10
                 else
                     echo "$0: unsupported platform"
                     exit 1
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index b0e9ceea10b3..0d2700a126f6 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -233,6 +233,7 @@
       dnschain = 209;
       #lxd = 210; # unused
       kibana = 211;
+      xtreemfs = 212;
 
       # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
 
@@ -444,6 +445,7 @@
       #dnschain = 209; #unused
       lxd = 210; # unused
       #kibana = 211;
+      xtreemfs = 212;
 
       # When adding a gid, make sure it doesn't match an existing
       # uid. Users and groups with the same name should have equal
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index b03f4494522b..c890eac49910 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -257,6 +257,7 @@
   ./services/network-filesystems/diod.nix
   ./services/network-filesystems/u9fs.nix
   ./services/network-filesystems/yandex-disk.nix
+  ./services/network-filesystems/xtreemfs.nix
   ./services/networking/aiccu.nix
   ./services/networking/amuled.nix
   ./services/networking/asterisk.nix
@@ -472,6 +473,7 @@
   ./tasks/filesystems/ntfs.nix
   ./tasks/filesystems/reiserfs.nix
   ./tasks/filesystems/unionfs-fuse.nix
+  ./tasks/filesystems/vboxsf.nix
   ./tasks/filesystems/vfat.nix
   ./tasks/filesystems/xfs.nix
   ./tasks/filesystems/zfs.nix
diff --git a/nixos/modules/programs/ssh.nix b/nixos/modules/programs/ssh.nix
index cf7ef455eb85..87a7bac208b7 100644
--- a/nixos/modules/programs/ssh.nix
+++ b/nixos/modules/programs/ssh.nix
@@ -36,7 +36,6 @@ in
 
       askPassword = mkOption {
         type = types.str;
-        default = "${pkgs.x11_ssh_askpass}/libexec/x11-ssh-askpass";
         description = ''Program used by SSH to ask for passwords.'';
       };
 
@@ -223,5 +222,7 @@ in
         export SSH_ASKPASS=${askPassword}
       '';
 
+    programs.ssh.askPassword = mkDefault "${pkgs.x11_ssh_askpass}/libexec/x11-ssh-askpass";
+
   };
 }
diff --git a/nixos/modules/programs/venus.nix b/nixos/modules/programs/venus.nix
index ca3188b18199..8f85b602fe2c 100644
--- a/nixos/modules/programs/venus.nix
+++ b/nixos/modules/programs/venus.nix
@@ -99,7 +99,6 @@ in
       };
 
       outputTheme = mkOption {
-        default = "${pkgs.venus}/themes/classic_fancy";
         type = types.path;
         description = ''
           Directory containing a config.ini file which is merged with this one.
@@ -170,5 +169,7 @@ in
         startAt = cfg.dates;
       };
 
+    services.venus.outputTheme = mkDefault "${pkgs.venus}/themes/classic_fancy";
+
   };
 }
diff --git a/nixos/modules/services/amqp/activemq/default.nix b/nixos/modules/services/amqp/activemq/default.nix
index 261f97617664..56ff388f8a9e 100644
--- a/nixos/modules/services/amqp/activemq/default.nix
+++ b/nixos/modules/services/amqp/activemq/default.nix
@@ -32,7 +32,6 @@ in {
         '';
       };
       configurationDir = mkOption {
-        default = "${activemq}/conf";
         description = ''
           The base directory for ActiveMQ's configuration.
           By default, this directory is searched for a file named activemq.xml,
@@ -126,6 +125,8 @@ in {
       '';
     };
 
+    services.activemq.configurationDir = mkDefault "${activemq}/conf";
+
   };
 
 }
diff --git a/nixos/modules/services/databases/opentsdb.nix b/nixos/modules/services/databases/opentsdb.nix
index 9c9738570e3f..0e73d4aca0e6 100644
--- a/nixos/modules/services/databases/opentsdb.nix
+++ b/nixos/modules/services/databases/opentsdb.nix
@@ -5,10 +5,7 @@ with lib;
 let
   cfg = config.services.opentsdb;
 
-  configFile = pkgs.writeText "opentsdb.conf" ''
-    tsd.core.auto_create_metrics = true
-    tsd.http.request.enable_chunked  = true
-  '';
+  configFile = pkgs.writeText "opentsdb.conf" cfg.config;
 
 in {
 
@@ -59,6 +56,17 @@ in {
         '';
       };
 
+      config = mkOption {
+        type = types.lines;
+        default = ''
+          tsd.core.auto_create_metrics = true
+          tsd.http.request.enable_chunked  = true
+        '';
+        description = ''
+          The contents of OpenTSDB's configuration file
+        '';
+      };
+
     };
 
   };
diff --git a/nixos/modules/services/hardware/sane.nix b/nixos/modules/services/hardware/sane.nix
index 3bf765c6f991..0428602688dd 100644
--- a/nixos/modules/services/hardware/sane.nix
+++ b/nixos/modules/services/hardware/sane.nix
@@ -36,7 +36,6 @@ in
 
     hardware.sane.configDir = mkOption {
       type = types.string;
-      default = "${saneConfig}/etc/sane.d";
       description = "The value of SANE_CONFIG_DIR.";
     };
 
@@ -47,6 +46,8 @@ in
 
   config = mkIf config.hardware.sane.enable {
 
+    hardware.sane.configDir = mkDefault "${saneConfig}/etc/sane.d";
+
     environment.systemPackages = backends;
     environment.sessionVariables = {
       SANE_CONFIG_DIR = config.hardware.sane.configDir;
diff --git a/nixos/modules/services/logging/logstash.nix b/nixos/modules/services/logging/logstash.nix
index aec45d9286d8..3a798c6f3724 100644
--- a/nixos/modules/services/logging/logstash.nix
+++ b/nixos/modules/services/logging/logstash.nix
@@ -84,10 +84,10 @@ in
         type = types.lines;
         default = ''stdin { type => "example" }'';
         description = "Logstash input configuration.";
-        example = ''
+        example = literalExample ''
           # Read from journal
           pipe {
-            command => "${pkgs.systemd}/bin/journalctl -f -o json"
+            command => "''${pkgs.systemd}/bin/journalctl -f -o json"
             type => "syslog" codec => json {}
           }
         '';
diff --git a/nixos/modules/services/mail/postfix.nix b/nixos/modules/services/mail/postfix.nix
index 29e0cf7a8829..e8beba4b3586 100644
--- a/nixos/modules/services/mail/postfix.nix
+++ b/nixos/modules/services/mail/postfix.nix
@@ -77,7 +77,8 @@ let
       smtpd_tls_key_file = ${cfg.sslKey}
 
       smtpd_use_tls = yes
-
+    ''
+    + optionalString (cfg.recipientDelimiter != "") ''
       recipient_delimiter = ${cfg.recipientDelimiter}
     ''
     + optionalString (cfg.virtual != "") ''
diff --git a/nixos/modules/services/misc/nixos-manual.nix b/nixos/modules/services/misc/nixos-manual.nix
index c570a01fb3b2..c10d8197686f 100644
--- a/nixos/modules/services/misc/nixos-manual.nix
+++ b/nixos/modules/services/misc/nixos-manual.nix
@@ -80,7 +80,6 @@ in
 
     services.nixosManual.browser = mkOption {
       type = types.path;
-      default = "${pkgs.w3m}/bin/w3m";
       description = ''
         Browser used to show the manual.
       '';
@@ -93,7 +92,7 @@ in
 
     system.build.manual = manual;
 
-    environment.systemPackages = [ manual.manpages help ];
+    environment.systemPackages = [ manual.manpages manual.manual help ];
 
     boot.extraTTYs = mkIf cfg.showManual ["tty${cfg.ttyNumber}"];
 
@@ -116,6 +115,8 @@ in
     services.mingetty.helpLine = mkIf cfg.showManual
       "\nPress <Alt-F${toString cfg.ttyNumber}> for the NixOS manual.";
 
+    services.nixosManual.browser = mkDefault "${pkgs.w3m}/bin/w3m";
+
   };
 
 }
diff --git a/nixos/modules/services/misc/subsonic.nix b/nixos/modules/services/misc/subsonic.nix
index 3e1a2e8fbb51..4d164ad8d65f 100644
--- a/nixos/modules/services/misc/subsonic.nix
+++ b/nixos/modules/services/misc/subsonic.nix
@@ -97,7 +97,6 @@ in
 
       transcoders = mkOption {
         type = types.listOf types.path;
-        default = [ "${pkgs.ffmpeg}/bin/ffmpeg" ];
         description = ''
           List of paths to transcoder executables that should be accessible
           from Subsonic. Symlinks will be created to each executable inside
@@ -153,5 +152,8 @@ in
     };
 
     users.extraGroups.subsonic.gid = config.ids.gids.subsonic;
+
+    services.subsonic.transcoders = mkDefault [ "${pkgs.ffmpeg}/bin/ffmpeg" ];
+
   };
 }
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 0393d01054d1..fa653565a67f 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -200,7 +200,6 @@ in {
 
     staticRootPath = mkOption {
       description = "Root path for static assets.";
-      default = "${cfg.package.out}/share/go/src/github.com/grafana/grafana/public";
       type = types.str;
     };
 
@@ -311,7 +310,7 @@ in {
 
   config = mkIf cfg.enable {
     warnings = [
-      "Grafana passwords will be stored as plaintext in nix store!"
+      "Grafana passwords will be stored as plaintext in the Nix store!"
     ];
 
     systemd.services.grafana = {
@@ -331,5 +330,8 @@ in {
       home = cfg.dataDir;
       createHome = true;
     };
+
+    services.grafana.staticRootPath = mkDefault "${cfg.package.out}/share/go/src/github.com/grafana/grafana/public";
+
   };
 }
diff --git a/nixos/modules/services/network-filesystems/xtreemfs.nix b/nixos/modules/services/network-filesystems/xtreemfs.nix
new file mode 100644
index 000000000000..b051214e1d08
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/xtreemfs.nix
@@ -0,0 +1,469 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xtreemfs;
+
+  xtreemfs = pkgs.xtreemfs;
+
+  home = cfg.homeDir;
+
+  startupScript = class: configPath: pkgs.writeScript "xtreemfs-osd.sh" ''
+    #! ${pkgs.stdenv.shell}
+    JAVA_HOME="${pkgs.jdk}"
+    JAVADIR="${xtreemfs}/share/java"
+    JAVA_CALL="$JAVA_HOME/bin/java -ea -cp $JAVADIR/XtreemFS.jar:$JAVADIR/BabuDB.jar:$JAVADIR/Flease.jar:$JAVADIR/protobuf-java-2.5.0.jar:$JAVADIR/Foundation.jar:$JAVADIR/jdmkrt.jar:$JAVADIR/jdmktk.jar:$JAVADIR/commons-codec-1.3.jar"
+    $JAVA_CALL ${class} ${configPath}
+  '';
+
+  dirReplicationConfig = pkgs.writeText "xtreemfs-dir-replication-plugin.properties" ''
+    babudb.repl.backupDir = ${home}/server-repl-dir
+    plugin.jar = ${xtreemfs}/share/java/BabuDB_replication_plugin.jar
+    babudb.repl.dependency.0 = ${xtreemfs}/share/java/Flease.jar
+
+    ${cfg.dir.replication.extraConfig}
+  '';
+
+  dirConfig = pkgs.writeText "xtreemfs-dir-config.properties" ''
+    uuid = ${cfg.dir.uuid}
+    listen.port = ${toString cfg.dir.port}
+    ${optionalString (cfg.dir.address != "") "listen.address = ${cfg.dir.address}"}
+    http_port = ${toString cfg.dir.httpPort}
+    babudb.baseDir = ${home}/dir/database
+    babudb.logDir = ${home}/dir/db-log
+    babudb.sync = ${if cfg.dir.replication.enable then "FDATASYNC" else cfg.dir.syncMode}
+
+    ${optionalString cfg.dir.replication.enable "babudb.plugin.0 = ${dirReplicationConfig}"}
+
+    ${cfg.dir.extraConfig}
+  '';
+
+  mrcReplicationConfig = pkgs.writeText "xtreemfs-mrc-replication-plugin.properties" ''
+    babudb.repl.backupDir = ${home}/server-repl-mrc
+    plugin.jar = ${xtreemfs}/share/java/BabuDB_replication_plugin.jar
+    babudb.repl.dependency.0 = ${xtreemfs}/share/java/Flease.jar
+
+    ${cfg.mrc.replication.extraConfig}
+  '';
+
+  mrcConfig = pkgs.writeText "xtreemfs-mrc-config.properties" ''
+    uuid = ${cfg.mrc.uuid}
+    listen.port = ${toString cfg.mrc.port}
+    ${optionalString (cfg.mrc.address != "") "listen.address = ${cfg.mrc.address}"}
+    http_port = ${toString cfg.mrc.httpPort}
+    babudb.baseDir = ${home}/mrc/database
+    babudb.logDir = ${home}/mrc/db-log
+    babudb.sync = ${if cfg.mrc.replication.enable then "FDATASYNC" else cfg.mrc.syncMode}
+
+    ${optionalString cfg.mrc.replication.enable "babudb.plugin.0 = ${mrcReplicationConfig}"}
+
+    ${cfg.mrc.extraConfig}
+  '';
+
+  osdConfig = pkgs.writeText "xtreemfs-osd-config.properties" ''
+    uuid = ${cfg.osd.uuid}
+    listen.port = ${toString cfg.osd.port}
+    ${optionalString (cfg.osd.address != "") "listen.address = ${cfg.osd.address}"}
+    http_port = ${toString cfg.osd.httpPort}
+    object_dir = ${home}/osd/
+
+    ${cfg.osd.extraConfig}
+  '';
+
+  optionalDir = optionals cfg.dir.enable ["xtreemfs-dir.service"];
+
+  systemdOptionalDependencies = {
+    after = [ "network.target" ] ++ optionalDir;
+    wantedBy = [ "multi-user.target" ] ++ optionalDir;
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.xtreemfs = {
+
+      enable = mkEnableOption "XtreemFS";
+
+      homeDir = mkOption {
+        default = "/var/lib/xtreemfs";
+        description = ''
+          XtreemFS home dir for the xtreemfs user.
+        '';
+      };
+
+      dir = {
+        enable = mkOption {
+          default = true;
+          description = ''
+            Whether to enable XtreemFS DIR service.
+          '';
+        };
+        uuid = mkOption {
+          example = "eacb6bab-f444-4ebf-a06a-3f72d7465e40";
+          description = ''
+            Must be set to a unique identifier, preferably a UUID according to
+            RFC 4122. UUIDs can be generated with `uuidgen` command, found in
+            the `utillinux` package.
+          '';
+        };
+        port = mkOption {
+          default = 32638;
+          description = ''
+            The port to listen on for incoming connections (TCP).
+          '';
+        };
+        address = mkOption {
+          example = "127.0.0.1";
+          default = "";
+          description = ''
+            If specified, it defines the interface to listen on. If not
+            specified, the service will listen on all interfaces (any).
+          '';
+        };
+        httpPort = mkOption {
+          default = 30638;
+          description = ''
+            Specifies the listen port for the HTTP service that returns the
+            status page.
+          '';
+        };
+        syncMode = mkOption {
+          default = "FSYNC";
+          example = "FDATASYNC";
+          description = ''
+            The sync mode influences how operations are committed to the disk
+            log before the operation is acknowledged to the caller.
+
+            -ASYNC mode the writes to the disk log are buffered in memory by the operating system. This is the fastest mode but will lead to data loss in case of a crash, kernel panic or power failure.
+            -SYNC_WRITE_METADATA opens the file with O_SYNC, the system will not buffer any writes. The operation will be acknowledged when data has been safely written to disk. This mode is slow but offers maximum data safety. However, BabuDB cannot influence the disk drive caches, this depends on the OS and hard disk model.
+            -SYNC_WRITE similar to SYNC_WRITE_METADATA but opens file with O_DSYNC which means that only the data is commit to disk. This can lead to some data loss depending on the implementation of the underlying file system. Linux does not implement this mode.
+            -FDATASYNC is similar to SYNC_WRITE but opens the file in asynchronous mode and calls fdatasync() after writing the data to disk.
+            -FSYNC is similar to SYNC_WRITE_METADATA but opens the file in asynchronous mode and calls fsync() after writing the data to disk.
+
+            For best throughput use ASYNC, for maximum data safety use FSYNC.
+
+            (If xtreemfs.dir.replication.enable is true then FDATASYNC is forced)
+          '';
+        };
+        extraConfig = mkOption {
+          default = "";
+          example = ''
+            # specify whether SSL is required
+            ssl.enabled = true
+            ssl.service_creds.pw = passphrase
+            ssl.service_creds.container = pkcs12
+            ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/dir.p12
+            ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+            ssl.trusted_certs.pw = jks_passphrase
+            ssl.trusted_certs.container = jks
+          '';
+          description = ''
+            Configuration of XtreemFS DIR service.
+            WARNING: configuration is saved as plaintext inside nix store.
+            For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+          '';
+        };
+        replication = {
+          enable = mkEnableOption "XtreemFS DIR replication plugin";
+          extraConfig = mkOption {
+            example = ''
+              # participants of the replication including this replica
+              babudb.repl.participant.0 = 192.168.0.10
+              babudb.repl.participant.0.port = 35676
+              babudb.repl.participant.1 = 192.168.0.11
+              babudb.repl.participant.1.port = 35676
+              babudb.repl.participant.2 = 192.168.0.12
+              babudb.repl.participant.2.port = 35676
+
+              # number of servers that at least have to be up to date
+              # To have a fault-tolerant system, this value has to be set to the
+              # majority of nodes i.e., if you have three replicas, set this to 2
+              # Please note that a setup with two nodes provides no fault-tolerance.
+              babudb.repl.sync.n = 2
+
+              # specify whether SSL is required
+              babudb.ssl.enabled = true
+
+              babudb.ssl.protocol = tlsv12
+
+              # server credentials for SSL handshakes
+              babudb.ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+              babudb.ssl.service_creds.pw = passphrase
+              babudb.ssl.service_creds.container = pkcs12
+
+              # trusted certificates for SSL handshakes
+              babudb.ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+              babudb.ssl.trusted_certs.pw = jks_passphrase
+              babudb.ssl.trusted_certs.container = jks
+
+              babudb.ssl.authenticationWithoutEncryption = false
+            '';
+            description = ''
+              Configuration of XtreemFS DIR replication plugin.
+              WARNING: configuration is saved as plaintext inside nix store.
+              For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+            '';
+          };
+        };
+      };
+
+      mrc = {
+        enable = mkOption {
+          default = true;
+          description = ''
+            Whether to enable XtreemFS MRC service.
+          '';
+        };
+        uuid = mkOption {
+          example = "eacb6bab-f444-4ebf-a06a-3f72d7465e41";
+          description = ''
+            Must be set to a unique identifier, preferably a UUID according to
+            RFC 4122. UUIDs can be generated with `uuidgen` command, found in
+            the `utillinux` package.
+          '';
+        };
+        port = mkOption {
+          default = 32636;
+          description = ''
+            The port to listen on for incoming connections (TCP).
+          '';
+        };
+        address = mkOption {
+          example = "127.0.0.1";
+          default = "";
+          description = ''
+            If specified, it defines the interface to listen on. If not
+            specified, the service will listen on all interfaces (any).
+          '';
+        };
+        httpPort = mkOption {
+          default = 30636;
+          description = ''
+            Specifies the listen port for the HTTP service that returns the
+            status page.
+          '';
+        };
+        syncMode = mkOption {
+          default = "FSYNC";
+          example = "FDATASYNC";
+          description = ''
+            The sync mode influences how operations are committed to the disk
+            log before the operation is acknowledged to the caller.
+
+            -ASYNC mode the writes to the disk log are buffered in memory by the operating system. This is the fastest mode but will lead to data loss in case of a crash, kernel panic or power failure.
+            -SYNC_WRITE_METADATA opens the file with O_SYNC, the system will not buffer any writes. The operation will be acknowledged when data has been safely written to disk. This mode is slow but offers maximum data safety. However, BabuDB cannot influence the disk drive caches, this depends on the OS and hard disk model.
+            -SYNC_WRITE similar to SYNC_WRITE_METADATA but opens file with O_DSYNC which means that only the data is commit to disk. This can lead to some data loss depending on the implementation of the underlying file system. Linux does not implement this mode.
+            -FDATASYNC is similar to SYNC_WRITE but opens the file in asynchronous mode and calls fdatasync() after writing the data to disk.
+            -FSYNC is similar to SYNC_WRITE_METADATA but opens the file in asynchronous mode and calls fsync() after writing the data to disk.
+
+            For best throughput use ASYNC, for maximum data safety use FSYNC.
+
+            (If xtreemfs.mrc.replication.enable is true then FDATASYNC is forced)
+          '';
+        };
+        extraConfig = mkOption {
+          example = ''
+            osd_check_interval = 300
+            no_atime = true
+            local_clock_renewal = 0
+            remote_time_sync = 30000
+            authentication_provider = org.xtreemfs.common.auth.NullAuthProvider
+
+            # shared secret between the MRC and all OSDs
+            capability_secret = iNG8UuQJrJ6XVDTe
+
+            dir_service.host = 192.168.0.10
+            dir_service.port = 32638
+
+            # if replication is enabled
+            dir_service.1.host = 192.168.0.11
+            dir_service.1.port = 32638
+            dir_service.2.host = 192.168.0.12
+            dir_service.2.port = 32638
+
+            # specify whether SSL is required
+            ssl.enabled = true
+            ssl.protocol = tlsv12
+            ssl.service_creds.pw = passphrase
+            ssl.service_creds.container = pkcs12
+            ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/mrc.p12
+            ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+            ssl.trusted_certs.pw = jks_passphrase
+            ssl.trusted_certs.container = jks
+          '';
+          description = ''
+            Configuration of XtreemFS MRC service.
+            WARNING: configuration is saved as plaintext inside nix store.
+            For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+          '';
+        };
+        replication = {
+          enable = mkEnableOption "XtreemFS MRC replication plugin";
+          extraConfig = mkOption {
+            example = ''
+              # participants of the replication including this replica
+              babudb.repl.participant.0 = 192.168.0.10
+              babudb.repl.participant.0.port = 35678
+              babudb.repl.participant.1 = 192.168.0.11
+              babudb.repl.participant.1.port = 35678
+              babudb.repl.participant.2 = 192.168.0.12
+              babudb.repl.participant.2.port = 35678
+
+              # number of servers that at least have to be up to date
+              # To have a fault-tolerant system, this value has to be set to the
+              # majority of nodes i.e., if you have three replicas, set this to 2
+              # Please note that a setup with two nodes provides no fault-tolerance.
+              babudb.repl.sync.n = 2
+
+              # specify whether SSL is required
+              babudb.ssl.enabled = true
+
+              babudb.ssl.protocol = tlsv12
+
+              # server credentials for SSL handshakes
+              babudb.ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+              babudb.ssl.service_creds.pw = passphrase
+              babudb.ssl.service_creds.container = pkcs12
+
+              # trusted certificates for SSL handshakes
+              babudb.ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+              babudb.ssl.trusted_certs.pw = jks_passphrase
+              babudb.ssl.trusted_certs.container = jks
+
+              babudb.ssl.authenticationWithoutEncryption = false
+            '';
+            description = ''
+              Configuration of XtreemFS MRC replication plugin.
+              WARNING: configuration is saved as plaintext inside nix store.
+              For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+            '';
+          };
+        };
+      };
+
+      osd = {
+        enable = mkOption {
+          default = true;
+          description = ''
+            Whether to enable XtreemFS OSD service.
+          '';
+        };
+        uuid = mkOption {
+          example = "eacb6bab-f444-4ebf-a06a-3f72d7465e42";
+          description = ''
+            Must be set to a unique identifier, preferably a UUID according to
+            RFC 4122. UUIDs can be generated with `uuidgen` command, found in
+            the `utillinux` package.
+          '';
+        };
+        port = mkOption {
+          default = 32640;
+          description = ''
+            The port to listen on for incoming connections (TCP and UDP).
+          '';
+        };
+        address = mkOption {
+          example = "127.0.0.1";
+          default = "";
+          description = ''
+            If specified, it defines the interface to listen on. If not
+            specified, the service will listen on all interfaces (any).
+          '';
+        };
+        httpPort = mkOption {
+          default = 30640;
+          description = ''
+            Specifies the listen port for the HTTP service that returns the
+            status page.
+          '';
+        };
+        extraConfig = mkOption {
+          example = ''
+            local_clock_renewal = 0
+            remote_time_sync = 30000
+            report_free_space = true
+            capability_secret = iNG8UuQJrJ6XVDTe
+
+            dir_service.host = 192.168.0.10
+            dir_service.port = 32638
+
+            # if replication is used
+            dir_service.1.host = 192.168.0.11
+            dir_service.1.port = 32638
+            dir_service.2.host = 192.168.0.12
+            dir_service.2.port = 32638
+
+            # specify whether SSL is required
+            ssl.enabled = true
+            ssl.service_creds.pw = passphrase
+            ssl.service_creds.container = pkcs12
+            ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+            ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+            ssl.trusted_certs.pw = jks_passphrase
+            ssl.trusted_certs.container = jks
+          '';
+          description = ''
+            Configuration of XtreemFS OSD service.
+            WARNING: configuration is saved as plaintext inside nix store.
+            For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+          '';
+        };
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+
+    environment.systemPackages = [ xtreemfs ];
+
+    users.extraUsers.xtreemfs =
+      { uid = config.ids.uids.xtreemfs;
+        description = "XtreemFS user";
+        createHome = true;
+        home = home;
+      };
+
+    users.extraGroups.xtreemfs =
+      { gid = config.ids.gids.xtreemfs;
+      };
+
+    systemd.services.xtreemfs-dir = mkIf cfg.dir.enable {
+      description = "XtreemFS-DIR Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "xtreemfs";
+        ExecStart = "${startupScript "org.xtreemfs.dir.DIR" dirConfig}";
+      };
+    };
+
+    systemd.services.xtreemfs-mrc = mkIf cfg.mrc.enable ({
+      description = "XtreemFS-MRC Server";
+      serviceConfig = {
+        User = "xtreemfs";
+        ExecStart = "${startupScript "org.xtreemfs.mrc.MRC" mrcConfig}";
+      };
+    } // systemdOptionalDependencies);
+
+    systemd.services.xtreemfs-osd = mkIf cfg.osd.enable ({
+      description = "XtreemFS-OSD Server";
+      serviceConfig = {
+        User = "xtreemfs";
+        ExecStart = "${startupScript "org.xtreemfs.osd.OSD" osdConfig}";
+      };
+    } // systemdOptionalDependencies);
+
+  };
+
+}
diff --git a/nixos/modules/services/networking/bind.nix b/nixos/modules/services/networking/bind.nix
index 57547da10067..34e7470dfc6f 100644
--- a/nixos/modules/services/networking/bind.nix
+++ b/nixos/modules/services/networking/bind.nix
@@ -24,6 +24,8 @@ let
         pid-file "/var/run/named/named.pid";
       };
 
+      ${cfg.extraConfig}
+
       ${ concatMapStrings
           ({ name, file, master ? true, slaves ? [], masters ? [] }:
             ''
@@ -110,6 +112,13 @@ in
         }];
       };
 
+      extraConfig = mkOption {
+        default = "";
+        description = "
+          Extra lines to be added verbatim to the generated named configuration file.
+        ";
+      };
+
       configFile = mkOption {
         default = confFile;
         description = "
diff --git a/nixos/modules/services/networking/dhcpcd.nix b/nixos/modules/services/networking/dhcpcd.nix
index 8552395fdb16..b31d479ab4fd 100644
--- a/nixos/modules/services/networking/dhcpcd.nix
+++ b/nixos/modules/services/networking/dhcpcd.nix
@@ -18,6 +18,7 @@ let
     map (i: i.name) (filter (i: if i.useDHCP != null then !i.useDHCP else i.ip4 != [ ] || i.ipAddress != null) interfaces)
     ++ mapAttrsToList (i: _: i) config.networking.sits
     ++ concatLists (attrValues (mapAttrs (n: v: v.interfaces) config.networking.bridges))
+    ++ concatLists (attrValues (mapAttrs (n: v: v.interfaces) config.networking.vswitches))
     ++ concatLists (attrValues (mapAttrs (n: v: v.interfaces) config.networking.bonds))
     ++ config.networking.dhcpcd.denyInterfaces;
 
diff --git a/nixos/modules/services/networking/dnscrypt-proxy.nix b/nixos/modules/services/networking/dnscrypt-proxy.nix
index c724ee979c2d..218bce2dbb31 100644
--- a/nixos/modules/services/networking/dnscrypt-proxy.nix
+++ b/nixos/modules/services/networking/dnscrypt-proxy.nix
@@ -52,10 +52,7 @@ in
         default = "opendns";
         type = types.nullOr types.string;
         description = ''
-          The name of the upstream DNSCrypt resolver to use. See
-          <literal>${resolverListFile}</literal> for alternative resolvers
-          (e.g., if you are concerned about logging and/or server
-          location).
+          The name of the upstream DNSCrypt resolver to use.
         '';
       };
       customResolver = mkOption {
diff --git a/nixos/modules/services/networking/seeks.nix b/nixos/modules/services/networking/seeks.nix
index 155ecbb98ef3..f5bc60be3457 100644
--- a/nixos/modules/services/networking/seeks.nix
+++ b/nixos/modules/services/networking/seeks.nix
@@ -33,7 +33,7 @@ in
         type = types.str;
         description = "
           The Seeks server configuration. If it is not specified,
-          a default configuration is used (${seeks}/etc/seeks).
+          a default configuration is used.
         ";
       };
 
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index d5accfef1cb5..4eb32b1cf306 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -54,12 +54,15 @@ in
         description = "Syncthing service";
         after = [ "network.target" ];
         wantedBy = [ "multi-user.target" ];
-        environment.STNORESTART = "placeholder";  # do not self-restart
+        environment.STNORESTART = "yes";  # do not self-restart
+        environment.STNOUPGRADE = "yes";
         serviceConfig = {
           User = "${cfg.user}";
           PermissionsStartOnly = true;
-          Restart = "always";
+          Restart = "on-failure";
           ExecStart = "${pkgs.syncthing}/bin/syncthing -no-browser -home=${cfg.dataDir}";
+          SuccessExitStatus = "2 3 4";
+          RestartForceExitStatus="3 4";
         };
       };
 
diff --git a/nixos/modules/services/web-servers/phpfpm.nix b/nixos/modules/services/web-servers/phpfpm.nix
index 41dbfff41cfe..82398948bfaa 100644
--- a/nixos/modules/services/web-servers/phpfpm.nix
+++ b/nixos/modules/services/web-servers/phpfpm.nix
@@ -44,8 +44,7 @@ in {
 
       phpIni = mkOption {
         type = types.path;
-        default = "${cfg.phpPackage}/etc/php-recommended.ini";
-        description = "php.ini file to use.";
+        description = "PHP configuration file to use.";
       };
 
       poolConfigs = mkOption {
@@ -86,5 +85,7 @@ in {
       };
     };
 
+    services.phpfpm.phpIni = mkDefault "${cfg.phpPackage}/etc/php-recommended.ini";
+
   };
 }
diff --git a/nixos/modules/services/x11/display-managers/default.nix b/nixos/modules/services/x11/display-managers/default.nix
index fc0803f2acaf..ca0832e5b0c8 100644
--- a/nixos/modules/services/x11/display-managers/default.nix
+++ b/nixos/modules/services/x11/display-managers/default.nix
@@ -114,6 +114,10 @@ let
       rm -rf $HOME/.compose-cache
       mkdir $HOME/.compose-cache
 
+      # Work around KDE errors when a user first logs in and
+      # .local/share doesn't exist yet.
+      mkdir -p $HOME/.local/share
+
       ${cfg.displayManager.sessionCommands}
 
       # Allow the user to execute commands at the beginning of the X session.
diff --git a/nixos/modules/services/x11/display-managers/gdm.nix b/nixos/modules/services/x11/display-managers/gdm.nix
index 887b6f88a741..c9a563768323 100644
--- a/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixos/modules/services/x11/display-managers/gdm.nix
@@ -106,7 +106,7 @@ in
     systemd.services.display-manager.wants = [ "systemd-machined.service" ];
     systemd.services.display-manager.after = [ "systemd-machined.service" ];
 
-    systemd.services.display-manager.path = [ gnome3.gnome_shell gnome3.caribou pkgs.xlibs.xhost pkgs.dbus_tools ];
+    systemd.services.display-manager.path = [ gnome3.gnome_shell gnome3.caribou pkgs.xorg.xhost pkgs.dbus_tools ];
 
     services.dbus.packages = [ gdm ];
 
diff --git a/nixos/modules/services/x11/display-managers/kdm.nix b/nixos/modules/services/x11/display-managers/kdm.nix
index d0b69c5452c2..558f5e8cfc7e 100644
--- a/nixos/modules/services/x11/display-managers/kdm.nix
+++ b/nixos/modules/services/x11/display-managers/kdm.nix
@@ -19,7 +19,7 @@ let
       ''}
 
       [X-*-Core]
-      Xrdb=${pkgs.xlibs.xrdb}/bin/xrdb
+      Xrdb=${pkgs.xorg.xrdb}/bin/xrdb
       SessionsDirs=${dmcfg.session.desktops}
       Session=${dmcfg.session.script}
       FailsafeClient=${pkgs.xterm}/bin/xterm
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index bc8f478c7d83..11e21c9d917f 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -104,7 +104,6 @@ in
       };
 
       background = mkOption {
-        default = "${pkgs.nixos-artwork}/share/artwork/gnome/Gnome_Dark.png";
         description = ''
           The background image or color to use.
         '';
@@ -172,5 +171,8 @@ in
     };
 
     users.extraGroups.lightdm.gid = config.ids.gids.lightdm;
+
+    services.xserver.displayManager.lightdm.background = mkDefault "${pkgs.nixos-artwork}/share/artwork/gnome/Gnome_Dark.png";
+
   };
 }
diff --git a/nixos/modules/services/x11/hardware/synaptics.nix b/nixos/modules/services/x11/hardware/synaptics.nix
index e967dc911411..e50ed08a218a 100644
--- a/nixos/modules/services/x11/hardware/synaptics.nix
+++ b/nixos/modules/services/x11/hardware/synaptics.nix
@@ -18,6 +18,8 @@ let cfg = config.services.xserver.synaptics;
       Option "TapButton2" "0"
       Option "TapButton3" "0"
     '';
+  pkg = pkgs.xorg.xf86inputsynaptics;
+  etcFile = "X11/xorg.conf.d/50-synaptics.conf";
 in {
 
   options = {
@@ -146,9 +148,12 @@ in {
 
   config = mkIf cfg.enable {
 
-    services.xserver.modules = [ pkgs.xorg.xf86inputsynaptics ];
+    services.xserver.modules = [ pkg ];
 
-    environment.systemPackages = [ pkgs.xorg.xf86inputsynaptics ];
+    environment.etc."${etcFile}".source =
+      "${pkg}/share/X11/xorg.conf.d/50-synaptics.conf";
+
+    environment.systemPackages = [ pkg ];
 
     services.xserver.config =
       ''
diff --git a/nixos/modules/system/boot/stage-1-init.sh b/nixos/modules/system/boot/stage-1-init.sh
index 480bbfa2b07b..51828c5c090b 100644
--- a/nixos/modules/system/boot/stage-1-init.sh
+++ b/nixos/modules/system/boot/stage-1-init.sh
@@ -135,7 +135,7 @@ ln -s @modulesClosure@/lib/modules /lib/modules
 echo @extraUtils@/bin/modprobe > /proc/sys/kernel/modprobe
 for i in @kernelModules@; do
     echo "loading module $(basename $i)..."
-    modprobe $i || true
+    modprobe $i
 done
 
 
@@ -146,7 +146,7 @@ ln -sfn @udevRules@ /etc/udev/rules.d
 mkdir -p /dev/.mdadm
 systemd-udevd --daemon
 udevadm trigger --action=add
-udevadm settle || true
+udevadm settle
 
 
 # Load boot-time keymap before any LVM/LUKS initialization
@@ -290,10 +290,23 @@ mountFS() {
         if [ -z "$fsType" ]; then fsType=auto; fi
     fi
 
-    echo "$device /mnt-root$mountPoint $fsType $options" >> /etc/fstab
+    # Filter out x- options, which busybox doesn't do yet.
+    local optionsFiltered="$(IFS=,; for i in $options; do if [ "${i:0:2}" != "x-" ]; then echo -n $i,; fi; done)"
+
+    echo "$device /mnt-root$mountPoint $fsType $optionsFiltered" >> /etc/fstab
 
     checkFS "$device" "$fsType"
 
+    # Optionally resize the filesystem.
+    case $options in
+        *x-nixos.autoresize*)
+            if [ "$fsType" = ext2 -o "$fsType" = ext3 -o "$fsType" = ext4 ]; then
+                echo "resizing $device..."
+                resize2fs "$device"
+            fi
+            ;;
+    esac
+
     # Create backing directories for unionfs-fuse.
     if [ "$fsType" = unionfs-fuse ]; then
         for i in $(IFS=:; echo ${options##*,dirs=}); do
@@ -303,7 +316,7 @@ mountFS() {
 
     echo "mounting $device on $mountPoint..."
 
-    mkdir -p "/mnt-root$mountPoint" || true
+    mkdir -p "/mnt-root$mountPoint"
 
     # For CIFS mounts, retry a few times before giving up.
     local n=0
@@ -375,7 +388,7 @@ while read -u 3 mountPoint; do
 
     # Wait once more for the udev queue to empty, just in case it's
     # doing something with $device right now.
-    udevadm settle || true
+    udevadm settle
 
     mountFS "$device" "$mountPoint" "$options" "$fsType"
 done
@@ -388,9 +401,9 @@ exec 3>&-
 
 # Emit a udev rule for /dev/root to prevent systemd from complaining.
 if [ -e /mnt-root/iso ]; then
-    eval $(udevadm info --export --export-prefix=ROOT_ --device-id-of-file=/mnt-root/iso || true)
+    eval $(udevadm info --export --export-prefix=ROOT_ --device-id-of-file=/mnt-root/iso)
 else
-    eval $(udevadm info --export --export-prefix=ROOT_ --device-id-of-file=$targetRoot || true)
+    eval $(udevadm info --export --export-prefix=ROOT_ --device-id-of-file=$targetRoot)
 fi
 if [ "$ROOT_MAJOR" -a "$ROOT_MINOR" -a "$ROOT_MAJOR" != 0 ]; then
     mkdir -p /run/udev/rules.d
@@ -399,7 +412,7 @@ fi
 
 
 # Stop udevd.
-udevadm control --exit || true
+udevadm control --exit
 
 # Kill any remaining processes, just to be sure we're not taking any
 # with us into stage 2. But keep storage daemons like unionfs-fuse.
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index f782eca3f647..ace2d10ec9c1 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -70,6 +70,12 @@ let
       copy_bin_and_libs ${pkgs.kmod}/bin/kmod
       ln -sf kmod $out/bin/modprobe
 
+      # Copy resize2fs if needed.
+      ${optionalString (any (fs: fs.autoResize) (attrValues config.fileSystems)) ''
+        # We need mke2fs in the initrd.
+        copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/resize2fs
+      ''}
+
       ${config.boot.initrd.extraUtilsCommands}
 
       # Copy ld manually since it isn't detected correctly
@@ -393,7 +399,6 @@ in
       }
     ];
 
-
     system.build.bootStage1 = bootStage1;
     system.build.initialRamdisk = initialRamdisk;
     system.build.extraUtils = extraUtils;
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 366bec7187ba..34eda7247cdd 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -444,6 +444,17 @@ in
       '';
     };
 
+    systemd.generators = mkOption {
+      type = types.attrsOf types.path;
+      default = {};
+      example = { "systemd-gpt-auto-generator" = "/dev/null"; };
+      description = ''
+        Definition of systemd generators.
+        For each <literal>NAME = VALUE</literal> pair of the attrSet, a link is generated from
+        <literal>/etc/systemd/system-generators/NAME</literal> to <literal>VALUE</literal>.
+      '';
+    };
+
     systemd.defaultUnit = mkOption {
       default = "multi-user.target";
       type = types.str;
@@ -600,20 +611,17 @@ in
 
     environment.systemPackages = [ systemd ];
 
-    environment.etc."systemd/system".source =
-      generateUnits "system" cfg.units upstreamSystemUnits upstreamSystemWants;
+    environment.etc = {
+      "systemd/system".source = generateUnits "system" cfg.units upstreamSystemUnits upstreamSystemWants;
 
-    environment.etc."systemd/user".source =
-      generateUnits "user" cfg.user.units upstreamUserUnits [];
+      "systemd/user".source = generateUnits "user" cfg.user.units upstreamUserUnits [];
 
-    environment.etc."systemd/system.conf".text =
-      ''
+      "systemd/system.conf".text = ''
         [Manager]
         ${config.systemd.extraConfig}
       '';
 
-    environment.etc."systemd/journald.conf".text =
-      ''
+      "systemd/journald.conf".text = ''
         [Journal]
         RateLimitInterval=${config.services.journald.rateLimitInterval}
         RateLimitBurst=${toString config.services.journald.rateLimitBurst}
@@ -624,17 +632,26 @@ in
         ${config.services.journald.extraConfig}
       '';
 
-    environment.etc."systemd/logind.conf".text =
-      ''
+      "systemd/logind.conf".text = ''
         [Login]
         ${config.services.logind.extraConfig}
       '';
 
-    environment.etc."systemd/sleep.conf".text =
-      ''
+      "systemd/sleep.conf".text = ''
         [Sleep]
       '';
 
+      "tmpfiles.d/systemd.conf".source = "${systemd}/example/tmpfiles.d/systemd.conf";
+      "tmpfiles.d/x11.conf".source = "${systemd}/example/tmpfiles.d/x11.conf";
+
+      "tmpfiles.d/nixos.conf".text = ''
+        # This file is created automatically and should not be modified.
+        # Please change the option ‘systemd.tmpfiles.rules’ instead.
+
+        ${concatStringsSep "\n" cfg.tmpfiles.rules}
+      '';
+    } // mapAttrs' (n: v: nameValuePair "systemd/system-generators/${n}" {"source"=v;}) cfg.generators;
+
     system.activationScripts.systemd = stringAfter [ "groups" ]
       ''
         mkdir -m 0755 -p /var/lib/udev
@@ -735,17 +752,6 @@ in
         startSession = true;
       };
 
-    environment.etc."tmpfiles.d/systemd.conf".source = "${systemd}/example/tmpfiles.d/systemd.conf";
-    environment.etc."tmpfiles.d/x11.conf".source = "${systemd}/example/tmpfiles.d/x11.conf";
-
-    environment.etc."tmpfiles.d/nixos.conf".text =
-      ''
-        # This file is created automatically and should not be modified.
-        # Please change the option ‘systemd.tmpfiles.rules’ instead.
-
-        ${concatStringsSep "\n" cfg.tmpfiles.rules}
-      '';
-
     # Some overrides to upstream units.
     systemd.services."systemd-backlight@".restartIfChanged = false;
     systemd.services."systemd-rfkill@".restartIfChanged = false;
diff --git a/nixos/modules/tasks/encrypted-devices.nix b/nixos/modules/tasks/encrypted-devices.nix
index 8b5dd22fd380..331531cee151 100644
--- a/nixos/modules/tasks/encrypted-devices.nix
+++ b/nixos/modules/tasks/encrypted-devices.nix
@@ -6,6 +6,7 @@ let
   fileSystems = attrValues config.fileSystems ++ config.swapDevices;
   encDevs = filter (dev: dev.encrypted.enable) fileSystems;
   keyedEncDevs = filter (dev: dev.encrypted.keyFile != null) encDevs;
+  keylessEncDevs = filter (dev: dev.encrypted.keyFile == null) encDevs;
   isIn = needle: haystack: filter (p: p == needle) haystack != [];
   anyEncrypted =
     fold (j: v: v || j.encrypted.enable) false encDevs;
@@ -29,15 +30,15 @@ let
       label = mkOption {
         default = null;
         example = "rootfs";
-        type = types.nullOr types.str;
-        description = "Label of the backing encrypted device.";
+        type = types.uniq (types.nullOr types.str);
+        description = "Label of the unlocked encrypted device. Set <literal>fileSystems.&lt;name?&gt;.device</literal> to <literal>/dev/mapper/&lt;label&gt;</literal> to mount the unlocked device.";
       };
 
       keyFile = mkOption {
         default = null;
         example = "/root/.swapkey";
         type = types.nullOr types.str;
-        description = "File system location of keyfile.";
+        description = "File system location of keyfile. This unlocks the drive after the root has been mounted to <literal>/mnt-root</literal>.";
       };
     };
   };
@@ -58,11 +59,11 @@ in
     boot.initrd = {
       luks = {
         devices =
-          map (dev: { name = dev.encrypted.label; device = dev.encrypted.blkDev; } ) encDevs;
+          map (dev: { name = dev.encrypted.label; device = dev.encrypted.blkDev; } ) keylessEncDevs;
         cryptoModules = [ "aes" "sha256" "sha1" "xts" ];
       };
       postMountCommands =
-        concatMapStrings (dev: "cryptsetup luksOpen --key-file ${dev.encrypted.keyFile} ${dev.encrypted.label};\n") keyedEncDevs;
+        concatMapStrings (dev: "cryptsetup luksOpen --key-file ${dev.encrypted.keyFile} ${dev.encrypted.blkDev} ${dev.encrypted.label};\n") keyedEncDevs;
     };
   };
 }
diff --git a/nixos/modules/tasks/filesystems.nix b/nixos/modules/tasks/filesystems.nix
index ce9e3555b6cd..9dd250f140ce 100644
--- a/nixos/modules/tasks/filesystems.nix
+++ b/nixos/modules/tasks/filesystems.nix
@@ -7,7 +7,7 @@ let
 
   fileSystems = attrValues config.fileSystems;
 
-  prioOption = prio: optionalString (prio !=null) " pri=${toString prio}";
+  prioOption = prio: optionalString (prio != null) " pri=${toString prio}";
 
   fileSystemOpts = { name, config, ... }: {
 
@@ -41,9 +41,9 @@ let
       };
 
       options = mkOption {
-        default = "defaults,relatime";
+        default = "defaults";
         example = "data=journal";
-        type = types.commas;
+        type = types.commas; # FIXME: should be a list
         description = "Options used to mount the file system.";
       };
 
@@ -58,6 +58,17 @@ let
         '';
       };
 
+      autoResize = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          If set, the filesystem is grown to its maximum size before
+          being mounted. (This is typically the size of the containing
+          partition.) This is currently only supported for ext2/3/4
+          filesystems that are mounted during early boot.
+        '';
+      };
+
       noCheck = mkOption {
         default = false;
         type = types.bool;
@@ -69,6 +80,7 @@ let
     config = {
       mountPoint = mkDefault name;
       device = mkIf (config.fsType == "tmpfs") (mkDefault config.fsType);
+      options = mkIf config.autoResize "x-nixos.autoresize";
     };
 
   };
@@ -141,7 +153,7 @@ in
 
     environment.etc.fstab.text =
       let
-        fsToSkipCheck = [ "none" "btrfs" "zfs" "tmpfs" "nfs" ];
+        fsToSkipCheck = [ "none" "btrfs" "zfs" "tmpfs" "nfs" "vboxsf" ];
         skipCheck = fs: fs.noCheck || fs.device == "none" || builtins.elem fs.fsType fsToSkipCheck;
       in ''
         # This is a generated file.  Do not edit!
diff --git a/nixos/modules/tasks/filesystems/vboxsf.nix b/nixos/modules/tasks/filesystems/vboxsf.nix
new file mode 100644
index 000000000000..87f1984f084f
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/vboxsf.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "vboxsf") config.boot.initrd.supportedFilesystems;
+
+  package = pkgs.runCommand "mount.vboxsf" {} ''
+    mkdir -p $out/bin
+    cp ${pkgs.linuxPackages.virtualboxGuestAdditions}/bin/mount.vboxsf $out/bin
+  '';
+in
+
+{
+  config = mkIf (any (fs: fs == "vboxsf") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ package ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "vboxsf" ];
+
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index d4b10e9ed09e..675bd3d232a6 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -21,9 +21,9 @@ let
 
   kernel = config.boot.kernelPackages;
 
-  splKernelPkg = if cfgZfs.useGit then kernel.spl_git else kernel.spl;
-  zfsKernelPkg = if cfgZfs.useGit then kernel.zfs_git else kernel.zfs;
-  zfsUserPkg = if cfgZfs.useGit then pkgs.zfs_git else pkgs.zfs;
+  splKernelPkg = kernel.spl;
+  zfsKernelPkg = kernel.zfs;
+  zfsUserPkg = pkgs.zfs;
 
   autosnapPkg = pkgs.zfstools.override {
     zfs = zfsUserPkg;
@@ -53,16 +53,6 @@ in
 
   options = {
     boot.zfs = {
-      useGit = mkOption {
-        type = types.bool;
-        default = false;
-        example = true;
-        description = ''
-          Use the git version of the SPL and ZFS packages.
-          Note that these are unreleased versions, with less testing, and therefore
-          may be more unstable.
-        '';
-      };
 
       extraPools = mkOption {
         type = types.listOf types.str;
diff --git a/nixos/modules/tasks/network-interfaces-scripted.nix b/nixos/modules/tasks/network-interfaces-scripted.nix
index 328d94cbb05c..d8b1592c36bb 100644
--- a/nixos/modules/tasks/network-interfaces-scripted.nix
+++ b/nixos/modules/tasks/network-interfaces-scripted.nix
@@ -220,6 +220,45 @@ in
             '';
           });
 
+        createVswitchDevice = n: v: nameValuePair "${n}-netdev"
+          (let
+            managedInterfaces = filter (x: hasAttr x cfg.interfaces) v.interfaces;
+            managedInterfaceServices = concatMap (i: [ "network-addresses-${i}.service" "network-link-${i}.service" ]) managedInterfaces;
+            virtualInterfaces = filter (x: (hasAttr x cfg.interfaces) && cfg.interfaces.${x}.virtual) v.interfaces;
+            virtualInterfaceServices = concatMap (i: [ "${i}-netdev.service" ]) virtualInterfaces;
+            deps = map subsystemDevice v.interfaces;
+            ofRules = pkgs.writeText "vswitch-${n}-openFlowRules" v.openFlowRules;
+          in
+          { description = "Open vSwitch Interface ${n}";
+            wantedBy = [ "network.target" "vswitchd.service" (subsystemDevice n) ];
+            requires = optionals v.bindInterfaces (deps ++ managedInterfaceServices ++ virtualInterfaceServices);
+            requiredBy = optionals v.bindInterfaces (managedInterfaceServices ++ virtualInterfaceServices);
+            bindsTo = deps ++ [ "vswitchd.service" ];
+            partOf = [ "vswitchd.service" ];
+            after = [ "network-pre.target" "vswitchd.service" ] ++ deps ++ managedInterfaceServices ++ virtualInterfaceServices;
+            before = [ "network-interfaces.target" (subsystemDevice n) ];
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute config.virtualisation.vswitch.package ];
+            script = ''
+              echo "Removing old Open vSwitch ${n}..."
+              ovs-vsctl --if-exists del-br ${n}
+
+              echo "Adding Open vSwitch ${n}..."
+              ovs-vsctl -- add-br ${n} ${concatMapStrings (i: " -- add-port ${n} ${i}") v.interfaces} \
+                ${concatMapStrings (x: " -- set-controller ${n} " + x)  v.controllers} \
+                ${concatMapStrings (x: " -- " + x) (splitString "\n" v.extraOvsctlCmds)}
+
+              echo "Adding OpenFlow rules for Open vSwitch ${n}..."
+              ovs-ofctl add-flows ${n} ${ofRules}
+            '';
+            postStop = ''
+              ip link set ${n} down || true
+              ovs-ofctl del-flows ${n} || true
+              ovs-vsctl --if-exists del-br ${n}
+            '';
+          });
+
         createBondDevice = n: v: nameValuePair "${n}-netdev"
           (let
             deps = map subsystemDevice v.interfaces;
@@ -335,6 +374,7 @@ in
            map configureAddrs interfaces ++
            map createTunDevice (filter (i: i.virtual) interfaces))
          // mapAttrs' createBridgeDevice cfg.bridges
+         // mapAttrs' createVswitchDevice cfg.vswitches
          // mapAttrs' createBondDevice cfg.bonds
          // mapAttrs' createMacvlanDevice cfg.macvlans
          // mapAttrs' createSitDevice cfg.sits
diff --git a/nixos/modules/tasks/network-interfaces-systemd.nix b/nixos/modules/tasks/network-interfaces-systemd.nix
index 8223c5a4941e..301ee43fd0e5 100644
--- a/nixos/modules/tasks/network-interfaces-systemd.nix
+++ b/nixos/modules/tasks/network-interfaces-systemd.nix
@@ -35,6 +35,9 @@ in
     assertions = [ {
       assertion = cfg.defaultGatewayWindowSize == null;
       message = "networking.defaultGatewayWindowSize is not supported by networkd.";
+    } {
+      assertion = cfg.vswitches == {};
+      message = "networking.vswichtes are not supported by networkd.";
     } ] ++ flip mapAttrsToList cfg.bridges (n: { rstp, ... }: {
       assertion = !rstp;
       message = "networking.bridges.${n}.rstp is not supported by networkd.";
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index 9931c977e8f0..7af3160e2d42 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -12,7 +12,8 @@ let
   hasBonds = cfg.bonds != { };
 
   slaves = concatMap (i: i.interfaces) (attrValues cfg.bonds)
-    ++ concatMap (i: i.interfaces) (attrValues cfg.bridges);
+    ++ concatMap (i: i.interfaces) (attrValues cfg.bridges)
+    ++ concatMap (i: i.interfaces) (attrValues cfg.vswitches);
 
   slaveIfs = map (i: cfg.interfaces.${i}) (filter (i: cfg.interfaces ? ${i}) slaves);
 
@@ -371,6 +372,81 @@ in
       options = [ interfaceOpts ];
     };
 
+    networking.vswitches = mkOption {
+      default = { };
+      example =
+        { vs0.interfaces = [ "eth0" "eth1" ];
+          vs1.interfaces = [ "eth2" "wlan0" ];
+        };
+      description =
+        ''
+          This option allows you to define Open vSwitches that connect
+          physical networks together.  The value of this option is an
+          attribute set.  Each attribute specifies a vswitch, with the
+          attribute name specifying the name of the vswitch's network
+          interface.
+        '';
+
+      type = types.attrsOf types.optionSet;
+
+      options = {
+
+        interfaces = mkOption {
+          example = [ "eth0" "eth1" ];
+          type = types.listOf types.str;
+          description =
+            "The physical network interfaces connected by the vSwitch.";
+        };
+
+        bindInterfaces = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            If true, then the interfaces of the vSwitch are brought 'up' and especially
+            also 'down' together with the vSwitch. That requires that every interfaces
+            is configured as a systemd network services.
+          '';
+        };
+
+        controllers = mkOption {
+          type = types.listOf types.str;
+          default = [];
+          example = [ "ptcp:6653:[::1]" ];
+          description = ''
+            Specify the controller targets. For the allowed options see <literal>man 8 ovs-vsctl</literal>.
+          '';
+        };
+
+        openFlowRules = mkOption {
+          type = types.lines;
+          default = "";
+          example = ''
+            actions=normal
+          '';
+          description = ''
+            OpenFlow rules to insert into the Open vSwitch. All <literal>openFlowRules</literal> are
+            loaded with <literal>ovs-ofctl</literal> within one atomic operation.
+          '';
+        };
+
+        extraOvsctlCmds = mkOption {
+          type = types.lines;
+          default = "";
+          example = ''
+            set-fail-mode <switch_name> secure
+            set Bridge <switch_name> stp_enable=true
+          '';
+          description = ''
+            Commands to manipulate the Open vSwitch database. Every line executed with <literal>ovs-vsctl</literal>.
+            All commands are bundled together with the operations for adding the interfaces
+            into one atomic operation.
+          '';
+        };
+
+      };
+
+    };
+
     networking.bridges = mkOption {
       default = { };
       example =
@@ -766,6 +842,8 @@ in
 
     services.mstpd = mkIf needsMstpd { enable = true; };
 
+    virtualisation.vswitch = mkIf (cfg.vswitches != { }) { enable = true; };
+
   };
 
 }
diff --git a/nixos/modules/virtualisation/amazon-config.nix b/nixos/modules/virtualisation/amazon-config.nix
deleted file mode 100644
index 809cdb4d108e..000000000000
--- a/nixos/modules/virtualisation/amazon-config.nix
+++ /dev/null
@@ -1,3 +0,0 @@
-{
-  imports = [ <nixpkgs/nixos/modules/virtualisation/amazon-image.nix> ];
-}
diff --git a/nixos/modules/virtualisation/amazon-grow-partition.nix b/nixos/modules/virtualisation/amazon-grow-partition.nix
new file mode 100644
index 000000000000..44a9fa93e7ec
--- /dev/null
+++ b/nixos/modules/virtualisation/amazon-grow-partition.nix
@@ -0,0 +1,50 @@
+# This module automatically grows the root partition on Amazon EC2 HVM
+# instances. This allows an instance to be created with a bigger root
+# filesystem than provided by the AMI.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  growpart = pkgs.stdenv.mkDerivation {
+    name = "growpart";
+    src = pkgs.fetchurl {
+      url = "https://launchpad.net/cloud-utils/trunk/0.27/+download/cloud-utils-0.27.tar.gz";
+      sha256 = "16shlmg36lidp614km41y6qk3xccil02f5n3r4wf6d1zr5n4v8vd";
+    };
+    patches = [ ./growpart-util-linux-2.26.patch ];
+    buildPhase = ''
+      cp bin/growpart $out
+      sed -i 's|awk|gawk|' $out
+      sed -i 's|sed|gnused|' $out
+    '';
+    dontInstall = true;
+    dontPatchShebangs = true;
+  };
+
+in
+
+{
+
+  config = mkIf config.ec2.hvm {
+
+    boot.initrd.extraUtilsCommands = ''
+      copy_bin_and_libs ${pkgs.gawk}/bin/gawk
+      copy_bin_and_libs ${pkgs.gnused}/bin/sed
+      copy_bin_and_libs ${pkgs.utillinux}/sbin/sfdisk
+      cp -v ${growpart} $out/bin/growpart
+      ln -s sed $out/bin/gnused
+    '';
+
+    boot.initrd.postDeviceCommands = ''
+      if [ -e /dev/xvda ] && [ -e /dev/xvda1 ]; then
+        TMPDIR=/run sh $(type -P growpart) /dev/xvda 1
+        udevadm settle
+      fi
+    '';
+
+  };
+
+}
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index 600a29f31bc5..bf2364a0459d 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -1,105 +1,40 @@
+# Configuration for Amazon EC2 instances. (Note that this file is a
+# misnomer - it should be "amazon-config.nix" or so, not
+# "amazon-image.nix", since it's used not only to build images but
+# also to reconfigure instances. However, we can't rename it because
+# existing "configuration.nix" files on EC2 instances refer to it.)
+
 { config, lib, pkgs, ... }:
 
 with lib;
-let
-  cfg = config.ec2;
-in
+
+let cfg = config.ec2; in
+
 {
-  imports = [ ../profiles/headless.nix ./ec2-data.nix ];
+  imports = [ ../profiles/headless.nix ./ec2-data.nix ./amazon-grow-partition.nix ];
 
   config = {
-    system.build.amazonImage =
-      pkgs.vmTools.runInLinuxVM (
-        pkgs.runCommand "amazon-image"
-          { preVM =
-              ''
-                mkdir $out
-                diskImage=$out/nixos.img
-                ${pkgs.vmTools.qemu}/bin/qemu-img create -f raw $diskImage "8G"
-                mv closure xchg/
-              '';
-            buildInputs = [ pkgs.utillinux pkgs.perl ];
-            exportReferencesGraph =
-              [ "closure" config.system.build.toplevel ];
-          }
-          ''
-            ${if cfg.hvm then ''
-              # Create a single / partition.
-              ${pkgs.parted}/sbin/parted /dev/vda mklabel msdos
-              ${pkgs.parted}/sbin/parted /dev/vda -- mkpart primary ext2 1M -1s
-              . /sys/class/block/vda1/uevent
-              mknod /dev/vda1 b $MAJOR $MINOR
-
-              # Create an empty filesystem and mount it.
-              ${pkgs.e2fsprogs}/sbin/mkfs.ext4 -L nixos /dev/vda1
-              ${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
-              mkdir /mnt
-              mount /dev/vda1 /mnt
-            '' else ''
-              # Create an empty filesystem and mount it.
-              ${pkgs.e2fsprogs}/sbin/mkfs.ext4 -L nixos /dev/vda
-              ${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda
-              mkdir /mnt
-              mount /dev/vda /mnt
-            ''}
-
-            # The initrd expects these directories to exist.
-            mkdir /mnt/dev /mnt/proc /mnt/sys
-
-            mount -o bind /proc /mnt/proc
-            mount -o bind /dev /mnt/dev
-            mount -o bind /sys /mnt/sys
-
-            # Copy all paths in the closure to the filesystem.
-            storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
-
-            mkdir -p /mnt/nix/store
-            echo "copying everything (will take a while)..."
-            cp -prd $storePaths /mnt/nix/store/
-
-            # Register the paths in the Nix database.
-            printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
-                chroot /mnt ${config.nix.package}/bin/nix-store --load-db --option build-users-group ""
-
-            # Create the system profile to allow nixos-rebuild to work.
-            chroot /mnt ${config.nix.package}/bin/nix-env --option build-users-group "" \
-                -p /nix/var/nix/profiles/system --set ${config.system.build.toplevel}
-
-            # `nixos-rebuild' requires an /etc/NIXOS.
-            mkdir -p /mnt/etc
-            touch /mnt/etc/NIXOS
-
-            # `switch-to-configuration' requires a /bin/sh
-            mkdir -p /mnt/bin
-            ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
-
-            # Install a configuration.nix.
-            mkdir -p /mnt/etc/nixos
-            cp ${./amazon-config.nix} /mnt/etc/nixos/configuration.nix
-
-            # Generate the GRUB menu.
-            ln -s vda /dev/xvda
-            chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
-
-            umount /mnt/proc /mnt/dev /mnt/sys
-            umount /mnt
-          ''
-      );
-
-    fileSystems."/".device = "/dev/disk/by-label/nixos";
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+    };
 
     boot.initrd.kernelModules = [ "xen-blkfront" ];
     boot.kernelModules = [ "xen-netfront" ];
+    boot.kernelParams = mkIf cfg.hvm [ "console=ttyS0" ];
 
     # Prevent the nouveau kernel module from being loaded, as it
     # interferes with the nvidia/nvidia-uvm modules needed for CUDA.
-    boot.blacklistedKernelModules = [ "nouveau" ];
+    # Also blacklist xen_fbfront to prevent a 30 second delay during
+    # boot.
+    boot.blacklistedKernelModules = [ "nouveau" "xen_fbfront" ];
 
     # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
     boot.loader.grub.version = if cfg.hvm then 2 else 1;
     boot.loader.grub.device = if cfg.hvm then "/dev/xvda" else "nodev";
     boot.loader.grub.timeout = 0;
-    boot.loader.grub.extraPerEntryConfig = "root (hd0${lib.optionalString cfg.hvm ",0"})";
+    boot.loader.grub.extraPerEntryConfig = mkIf (!cfg.hvm) "root (hd0)";
 
     boot.initrd.postDeviceCommands =
       ''
diff --git a/nixos/modules/virtualisation/ec2-data.nix b/nixos/modules/virtualisation/ec2-data.nix
index 44a582ba7666..383750520ab7 100644
--- a/nixos/modules/virtualisation/ec2-data.nix
+++ b/nixos/modules/virtualisation/ec2-data.nix
@@ -9,7 +9,7 @@ with lib;
 {
   config = {
 
-    systemd.services."fetch-ec2-data" =
+    systemd.services.fetch-ec2-data =
       { description = "Fetch EC2 Data";
 
         wantedBy = [ "multi-user.target" "sshd.service" ];
@@ -35,10 +35,8 @@ with lib;
                 mkdir -m 0700 -p /root/.ssh
                 $wget http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key > /root/key.pub
                 if [ $? -eq 0 -a -e /root/key.pub ]; then
-                    if ! grep -q -f /root/key.pub /root/.ssh/authorized_keys; then
-                        cat /root/key.pub >> /root/.ssh/authorized_keys
-                        echo "new key added to authorized_keys"
-                    fi
+                    cat /root/key.pub >> /root/.ssh/authorized_keys
+                    echo "new key added to authorized_keys"
                     chmod 600 /root/.ssh/authorized_keys
                     rm -f /root/key.pub
                 fi
@@ -48,13 +46,22 @@ with lib;
             # the supplied user data, if available.  Otherwise sshd will
             # generate one normally.
             $wget http://169.254.169.254/2011-01-01/user-data > /root/user-data || true
+
+            mkdir -m 0755 -p /etc/ssh
+
             key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' /root/user-data)"
             key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' /root/user-data)"
             if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
-                mkdir -m 0755 -p /etc/ssh
                 (umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
                 echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
             fi
+
+            key="$(sed 's/|/\n/g; s/SSH_HOST_ED25519_KEY://; t; d' /root/user-data)"
+            key_pub="$(sed 's/SSH_HOST_ED25519_KEY_PUB://; t; d' /root/user-data)"
+            if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_ed25519_key ]; then
+                (umask 077; echo "$key" > /etc/ssh/ssh_host_ed25519_key)
+                echo "$key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
+            fi
           '';
 
         serviceConfig.Type = "oneshot";
@@ -71,7 +78,9 @@ with lib;
             # can obtain it securely by parsing the output of
             # ec2-get-console-output.
             echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----" > /dev/console
-            ${config.programs.ssh.package}/bin/ssh-keygen -l -f /etc/ssh/ssh_host_dsa_key.pub > /dev/console
+            for i in /etc/ssh/ssh_host_*_key.pub; do
+                ${config.programs.ssh.package}/bin/ssh-keygen -l -f $i > /dev/console
+            done
             echo "-----END SSH HOST KEY FINGERPRINTS-----" > /dev/console
           '';
         serviceConfig.Type = "oneshot";
diff --git a/nixos/modules/virtualisation/growpart-util-linux-2.26.patch b/nixos/modules/virtualisation/growpart-util-linux-2.26.patch
new file mode 100644
index 000000000000..c782c2d7e4bd
--- /dev/null
+++ b/nixos/modules/virtualisation/growpart-util-linux-2.26.patch
@@ -0,0 +1,88 @@
+From 1895d10a7539d055a4e0206af1e7a9e5ea32a4f7 Mon Sep 17 00:00:00 2001
+From: Juerg Haefliger <juerg.haefliger@hp.com>
+Date: Wed, 25 Mar 2015 13:59:20 +0100
+Subject: [PATCH] Support new sfdisk version 2.26
+
+The sfdisk usage with version 2.26 changed. Specifically, the option
+--show-pt-geometry and functionality for CHS have been removed.
+Also, restoring a backup MBR now needs to be done using dd.
+---
+ bin/growpart | 28 ++++++++++------------------
+ 1 file changed, 10 insertions(+), 18 deletions(-)
+
+diff --git a/bin/growpart b/bin/growpart
+index 595c40b..d4c995b 100755
+--- a/bin/growpart
++++ b/bin/growpart
+@@ -28,7 +28,6 @@ PART=""
+ PT_UPDATE=false
+ DRY_RUN=0
+ 
+-MBR_CHS=""
+ MBR_BACKUP=""
+ GPT_BACKUP=""
+ _capture=""
+@@ -133,7 +132,8 @@ bad_Usage() {
+ }
+ 
+ mbr_restore() {
+-	sfdisk --no-reread "${DISK}" ${MBR_CHS} -I "${MBR_BACKUP}"
++	dd if="${MBR_BACKUP}-${DISK#/dev/}-0x00000000.bak" of="${DISK}" bs=1 \
++		conv=notrunc
+ }
+ 
+ sfdisk_worked_but_blkrrpart_failed() {
+@@ -148,34 +148,26 @@ sfdisk_worked_but_blkrrpart_failed() {
+ 
+ mbr_resize() {
+ 	RESTORE_HUMAN="${TEMP_D}/recovery"
+-	MBR_BACKUP="${TEMP_D}/orig.save"
++	MBR_BACKUP="${TEMP_D}/backup"
+ 
+ 	local change_out=${TEMP_D}/change.out
+ 	local dump_out=${TEMP_D}/dump.out
+ 	local new_out=${TEMP_D}/new.out
+ 	local dump_mod=${TEMP_D}/dump.mod
+-	local tmp="${TEMP_D}/tmp.out"
+-	local err="${TEMP_D}/err.out"
+ 
+-	local _devc cyl _w1 heads _w2 sectors _w3 tot dpart
++	local tot dpart
+ 	local pt_start pt_size pt_end max_end new_size change_info
+ 
+-	# --show-pt-geometry outputs something like
+-	#     /dev/sda: 164352 cylinders, 4 heads, 32 sectors/track
+-	rqe sfd_geom sfdisk "${DISK}" --show-pt-geometry >"${tmp}" &&
+-		read _devc cyl _w1 heads _w2 sectors _w3 <"${tmp}" &&
+-		MBR_CHS="-C ${cyl} -H ${heads} -S ${sectors}" ||
+-		fail "failed to get CHS from ${DISK}"
++	tot=$(sfdisk --list "${DISK}" | awk '{ print $(NF-1) ; exit }') ||
++		fail "failed to get total number of sectors from ${DISK}"
+ 
+-	tot=$((${cyl}*${heads}*${sectors}))
++	debug 1 "total number of sectors of ${DISK} is ${tot}"
+ 
+-	debug 1 "geometry is ${MBR_CHS}. total size=${tot}"
+-	rqe sfd_dump sfdisk ${MBR_CHS} --unit=S --dump "${DISK}" \
++	rqe sfd_dump sfdisk --dump "${DISK}" \
+ 		>"${dump_out}" ||
+ 		fail "failed to dump sfdisk info for ${DISK}"
+-
+ 	{
+-		echo "## sfdisk ${MBR_CHS} --unit=S --dump ${DISK}"
++		echo "## sfdisk --dump ${DISK}"
+ 		cat "${dump_out}"
+ 	}  >"${RESTORE_HUMAN}"
+ 	[ $? -eq 0 ] || fail "failed to save sfdisk -d output"
+@@ -237,7 +229,7 @@ mbr_resize() {
+ 		exit 0
+ 	fi
+ 
+-	LANG=C sfdisk --no-reread "${DISK}" ${MBR_CHS} --force \
++	LANG=C sfdisk --no-reread "${DISK}" --force \
+ 		-O "${MBR_BACKUP}" <"${new_out}" >"${change_out}" 2>&1
+ 	ret=$?
+ 	[ $ret -eq 0 ] || RESTORE_FUNC="mbr_restore"
+-- 
+2.1.4
+
diff --git a/nixos/modules/virtualisation/openvswitch.nix b/nixos/modules/virtualisation/openvswitch.nix
index b5155246fdad..a0231315236c 100644
--- a/nixos/modules/virtualisation/openvswitch.nix
+++ b/nixos/modules/virtualisation/openvswitch.nix
@@ -19,6 +19,15 @@ in {
         '';
     };
 
+    resetOnStart = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to reset the Open vSwitch configuration database to a default
+        configuration on every start of the systemd <literal>ovsdb.service</literal>.
+        '';
+    };
+
     package = mkOption {
       type = types.package;
       default = pkgs.openvswitch;
@@ -75,6 +84,7 @@ in {
         mkdir -p ${runDir}
         mkdir -p /var/db/openvswitch
         chmod +w /var/db/openvswitch
+        ${optionalString cfg.resetOnStart "rm -f /var/db/openvswitch/conf.db"}
         if [[ ! -e /var/db/openvswitch/conf.db ]]; then
           ${cfg.package}/bin/ovsdb-tool create \
             "/var/db/openvswitch/conf.db" \
@@ -98,6 +108,7 @@ in {
         Restart = "always";
         RestartSec = 3;
         PIDFile = "/var/run/openvswitch/ovsdb.pid";
+        # Use service type 'forking' to correctly determine when ovsdb-server is ready.
         Type = "forking";
       };
       postStart = ''
@@ -118,6 +129,7 @@ in {
           --detach
         '';
         PIDFile = "/var/run/openvswitch/ovs-vswitchd.pid";
+        # Use service type 'forking' to correctly determine when vswitchd is ready.
         Type = "forking";
       };
     };
@@ -147,6 +159,7 @@ in {
             unix:/var/run/openvswitch/db.sock
         '';
         PIDFile = "/var/run/openvswitch/ovs-monitor-ipsec.pid";
+        # Use service type 'forking' to correctly determine when ovs-monitor-ipsec is ready.
         Type = "forking";
       };
 
diff --git a/nixos/modules/virtualisation/virtualbox-guest.nix b/nixos/modules/virtualisation/virtualbox-guest.nix
index 642ea3154b1b..a025aee7cfeb 100644
--- a/nixos/modules/virtualisation/virtualbox-guest.nix
+++ b/nixos/modules/virtualisation/virtualbox-guest.nix
@@ -32,7 +32,8 @@ in
 
     boot.extraModulePackages = [ kernel.virtualboxGuestAdditions ];
 
-    boot.kernelModules = [ "vboxsf" ];
+    boot.supportedFilesystems = [ "vboxsf" ];
+    boot.initrd.supportedFilesystems = [ "vboxsf" ];
 
     users.extraGroups.vboxsf.gid = config.ids.gids.vboxsf;
 
diff --git a/nixos/modules/virtualisation/virtualbox-image.nix b/nixos/modules/virtualisation/virtualbox-image.nix
index 2d3b4834fc5b..425726333c40 100644
--- a/nixos/modules/virtualisation/virtualbox-image.nix
+++ b/nixos/modules/virtualisation/virtualbox-image.nix
@@ -11,93 +11,37 @@ in {
   options = {
     virtualbox = {
       baseImageSize = mkOption {
-        type = types.str;
-        default = "10G";
+        type = types.int;
+        default = 10 * 1024;
         description = ''
-          The size of the VirtualBox base image. The size string should be on
-          a format the qemu-img command accepts.
+          The size of the VirtualBox base image in MiB.
         '';
       };
     };
   };
 
   config = {
-    system.build.virtualBoxImage =
-      pkgs.vmTools.runInLinuxVM (
-        pkgs.runCommand "virtualbox-image"
-          { memSize = 768;
-            preVM =
-              ''
-                mkdir $out
-                diskImage=$out/image
-                ${pkgs.vmTools.qemu}/bin/qemu-img create -f raw $diskImage "${cfg.baseImageSize}"
-                mv closure xchg/
-              '';
-            postVM =
-              ''
-                echo "creating VirtualBox disk image..."
-                ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -O vdi $diskImage $out/disk.vdi
-                rm $diskImage
-              '';
-            buildInputs = [ pkgs.utillinux pkgs.perl ];
-            exportReferencesGraph =
-              [ "closure" config.system.build.toplevel ];
+
+    system.build.virtualBoxImage = import ../../lib/make-disk-image.nix {
+      inherit pkgs lib config;
+      partitioned = true;
+      diskSize = cfg.baseImageSize;
+
+      configFile = pkgs.writeText "configuration.nix"
+        ''
+          {
+            imports = [ <nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix> ];
           }
-          ''
-            # Create a single / partition.
-            ${pkgs.parted}/sbin/parted /dev/vda mklabel msdos
-            ${pkgs.parted}/sbin/parted /dev/vda -- mkpart primary ext2 1M -1s
-            . /sys/class/block/vda1/uevent
-            mknod /dev/vda1 b $MAJOR $MINOR
-  
-            # Create an empty filesystem and mount it.
-            ${pkgs.e2fsprogs}/sbin/mkfs.ext4 -L nixos /dev/vda1
-            ${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
-            mkdir /mnt
-            mount /dev/vda1 /mnt
-  
-            # The initrd expects these directories to exist.
-            mkdir /mnt/dev /mnt/proc /mnt/sys
-            mount --bind /proc /mnt/proc
-            mount --bind /dev /mnt/dev
-            mount --bind /sys /mnt/sys
-  
-            # Copy all paths in the closure to the filesystem.
-            storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
-  
-            echo "filling Nix store..."
-            mkdir -p /mnt/nix/store
-            set -f
-            cp -prd $storePaths /mnt/nix/store/
-  
-            mkdir -p /mnt/etc/nix
-            echo 'build-users-group = ' > /mnt/etc/nix/nix.conf
-  
-            # Register the paths in the Nix database.
-            printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
-                chroot /mnt ${config.nix.package}/bin/nix-store --load-db
-  
-            # Create the system profile to allow nixos-rebuild to work.
-            chroot /mnt ${config.nix.package}/bin/nix-env \
-                -p /nix/var/nix/profiles/system --set ${config.system.build.toplevel}
-  
-            # `nixos-rebuild' requires an /etc/NIXOS.
-            mkdir -p /mnt/etc/nixos
-            touch /mnt/etc/NIXOS
-  
-            # `switch-to-configuration' requires a /bin/sh
-            mkdir -p /mnt/bin
-            ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
-  
-            # Generate the GRUB menu.
-            ln -s vda /dev/sda
-            chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
-  
-            umount /mnt/proc /mnt/dev /mnt/sys
-            umount /mnt
-          ''
-      );
-  
+        '';
+
+      postVM =
+        ''
+          echo "creating VirtualBox disk image..."
+          ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -O vdi $diskImage $out/disk.vdi
+          rm $diskImage
+        '';
+    };
+
     system.build.virtualBoxOVA = pkgs.runCommand "virtualbox-ova"
       { buildInputs = [ pkgs.linuxPackages.virtualbox ];
         vmName = "NixOS ${config.system.nixosVersion} (${pkgs.stdenv.system})";
@@ -118,17 +62,17 @@ in {
         VBoxManage storagectl "$vmName" --name SATA --add sata --portcount 4 --bootable on --hostiocache on
         VBoxManage storageattach "$vmName" --storagectl SATA --port 0 --device 0 --type hdd \
           --medium ${config.system.build.virtualBoxImage}/disk.vdi
-  
+
         echo "exporting VirtualBox VM..."
         mkdir -p $out
         VBoxManage export "$vmName" --output "$out/$fileName"
       '';
-  
+
     fileSystems."/".device = "/dev/disk/by-label/nixos";
-  
-    boot.loader.grub.version = 2;
+
     boot.loader.grub.device = "/dev/sda";
-  
+
     virtualisation.virtualbox.guest.enable = true;
+
   };
 }
diff --git a/nixos/release.nix b/nixos/release.nix
index 4492ee4046ea..1a1ed4bca410 100644
--- a/nixos/release.nix
+++ b/nixos/release.nix
@@ -220,7 +220,7 @@ in rec {
   tests.dockerRegistry = hydraJob (import tests/docker-registry.nix { system = "x86_64-linux"; });
   tests.etcd = hydraJob (import tests/etcd.nix { system = "x86_64-linux"; });
   tests.ec2-nixops = hydraJob (import tests/ec2.nix { system = "x86_64-linux"; }).boot-ec2-nixops;
-  tests.ec2-config = hydraJob (import tests/ec2.nix { system = "x86_64-linux"; }).boot-ec2-config;
+  #tests.ec2-config = hydraJob (import tests/ec2.nix { system = "x86_64-linux"; }).boot-ec2-config;
   tests.firefox = callTest tests/firefox.nix {};
   tests.firewall = callTest tests/firewall.nix {};
   tests.fleet = hydraJob (import tests/fleet.nix { system = "x86_64-linux"; });
diff --git a/nixos/tests/ec2.nix b/nixos/tests/ec2.nix
index 1296ff4e8e30..b12d498e3a09 100644
--- a/nixos/tests/ec2.nix
+++ b/nixos/tests/ec2.nix
@@ -9,9 +9,18 @@ let
     (import ../lib/eval-config.nix {
       inherit system;
       modules = [
-        ../maintainers/scripts/ec2/amazon-hvm-config.nix
+        ../maintainers/scripts/ec2/amazon-image.nix
         ../../nixos/modules/testing/test-instrumentation.nix
-        { boot.initrd.kernelModules = [ "virtio" "virtio_blk" "virtio_pci" "virtio_ring" ]; }
+        { boot.initrd.kernelModules = [ "virtio" "virtio_blk" "virtio_pci" "virtio_ring" ];
+          ec2.hvm = true;
+
+          # Hack to make the partition resizing work in QEMU.
+          boot.initrd.postDeviceCommands = mkBefore
+            ''
+              ln -s vda /dev/xvda
+              ln -s vda1 /dev/xvda1
+            '';
+        }
       ];
     }).config.system.build.amazonImage;
 
@@ -34,41 +43,49 @@ let
       nodes = {};
       testScript =
         ''
-          use File::Temp qw/ tempfile /;
-          my ($fh, $filename) = tempfile();
-
-          `qemu-img create -f qcow2 -o backing_file=${image}/nixos.img $filename`;
-
-          my $startCommand = "qemu-kvm -m 768 -net nic -net 'user,net=169.254.0.0/16,guestfwd=tcp:169.254.169.254:80-cmd:${pkgs.micro-httpd}/bin/micro_httpd ${metaData}'";
-          $startCommand .= " -drive file=" . Cwd::abs_path($filename) . ",if=virtio,werror=report";
+          my $imageDir = ($ENV{'TMPDIR'} // "/tmp") . "/vm-state-machine";
+          mkdir $imageDir, 0700;
+          my $diskImage = "$imageDir/machine.qcow2";
+          system("qemu-img create -f qcow2 -o backing_file=${image}/nixos.img $diskImage") == 0 or die;
+          system("qemu-img resize $diskImage 10G") == 0 or die;
+
+          # Note: we use net=169.0.0.0/8 rather than
+          # net=169.254.0.0/16 to prevent dhcpcd from getting horribly
+          # confused. (It would get a DHCP lease in the 169.254.*
+          # range, which it would then configure and prompty delete
+          # again when it deletes link-local addresses.) Ideally we'd
+          # turn off the DHCP server, but qemu does not have an option
+          # to do that.
+          my $startCommand = "qemu-kvm -m 768 -net nic -net 'user,net=169.0.0.0/8,guestfwd=tcp:169.254.169.254:80-cmd:${pkgs.micro-httpd}/bin/micro_httpd ${metaData}'";
+          $startCommand .= " -drive file=$diskImage,if=virtio,werror=report";
           $startCommand .= " \$QEMU_OPTS";
 
           my $machine = createMachine({ startCommand => $startCommand });
+
           ${script}
         '';
     };
 
-  snakeOilPrivateKey = [
-    "-----BEGIN EC PRIVATE KEY-----"
-    "MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49"
-    "AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN"
-    "r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA=="
-    "-----END EC PRIVATE KEY-----"
-  ];
-
-  snakeOilPublicKey = pkgs.lib.concatStrings [
-    "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
-    "yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
-    "9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= snakeoil"
-  ];
+  snakeOilPrivateKey = ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACDEPmwZv5dDPrMUaq0dDP+6eBTTe+QNrz14KBEIdhHd1QAAAJDufJ4S7nye
+    EgAAAAtzc2gtZWQyNTUxOQAAACDEPmwZv5dDPrMUaq0dDP+6eBTTe+QNrz14KBEIdhHd1Q
+    AAAECgwbDlYATM5/jypuptb0GF/+zWZcJfoVIFBG3LQeRyGsQ+bBm/l0M+sxRqrR0M/7p4
+    FNN75A2vPXgoEQh2Ed3VAAAADEVDMiB0ZXN0IGtleQE=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+
+  snakeOilPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMQ+bBm/l0M+sxRqrR0M/7p4FNN75A2vPXgoEQh2Ed3V EC2 test key";
+
 in {
   boot-ec2-nixops = makeEc2Test {
     name         = "nixops-userdata";
     sshPublicKey = snakeOilPublicKey; # That's right folks! My user's key is also the host key!
 
     userData = ''
-      SSH_HOST_DSA_KEY_PUB:${snakeOilPublicKey}
-      SSH_HOST_DSA_KEY:${pkgs.lib.concatStringsSep "|" snakeOilPrivateKey}
+      SSH_HOST_ED25519_KEY_PUB:${snakeOilPublicKey}
+      SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey}
     '';
     script = ''
       $machine->start;
@@ -80,8 +97,9 @@ in {
 
       # Let's install our client private key
       $machine->succeed("mkdir -p ~/.ssh");
-      ${concatMapStrings (s: "$machine->succeed('echo ${s} >> ~/.ssh/id_ecdsa');") snakeOilPrivateKey}
-      $machine->succeed("chmod 600 ~/.ssh/id_ecdsa");
+
+      $machine->succeed("echo '${snakeOilPrivateKey}' > ~/.ssh/id_ed25519");
+      $machine->succeed("chmod 600 ~/.ssh/id_ed25519");
 
       # We haven't configured the host key yet, so this should still fail
       $machine->fail("ssh -o BatchMode=yes localhost exit");
@@ -90,7 +108,16 @@ in {
       $machine->succeed("echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts");
       $machine->succeed("ssh -o BatchMode=yes localhost exit");
 
+      # Test whether the root disk was resized.
+      my $blocks = $machine->succeed("stat -c %b -f /");
+      my $bsize = $machine->succeed("stat -c %S -f /");
+      my $size = $blocks * $bsize;
+      die "wrong free space $size" if $size < 9.7 * 1024 * 1024 * 1024 || $size > 10 * 1024 * 1024 * 1024;
+
+      # Just to make sure resizing is idempotent.
       $machine->shutdown;
+      $machine->start;
+      $machine->waitForFile("/root/user-data");
     '';
   };
 
diff --git a/nixos/tests/gnome3.nix b/nixos/tests/gnome3.nix
index f5e0159f1c7d..7662efe1b350 100644
--- a/nixos/tests/gnome3.nix
+++ b/nixos/tests/gnome3.nix
@@ -28,7 +28,8 @@ import ./make-test.nix ({ pkgs, ...} : {
 
       $machine->succeed("su - alice -c 'DISPLAY=:0.0 gnome-terminal &'");
       $machine->waitForWindow(qr/Terminal/);
-      $machine->sleep(20);
+      $machine->mustSucceed("timeout 60 bash -c 'journalctl -f|grep -m 1 \"GNOME Shell started\"'");
+      $machine->sleep(10);
       $machine->screenshot("screen");
     '';
 })
diff --git a/nixos/tests/make-test.nix b/nixos/tests/make-test.nix
index 285ca5b71d6e..f3e26aa7e74d 100644
--- a/nixos/tests/make-test.nix
+++ b/nixos/tests/make-test.nix
@@ -2,4 +2,4 @@ f: { system ? builtins.currentSystem, ... } @ args:
 
 with import ../lib/testing.nix { inherit system; };
 
-makeTest (if builtins.isFunction f then f (args // { inherit pkgs; }) else f)
+makeTest (if builtins.isFunction f then f (args // { inherit pkgs; inherit (pkgs) lib; }) else f)
diff --git a/nixos/tests/virtualbox.nix b/nixos/tests/virtualbox.nix
index 1a5a6f7b5bbc..01fcd15fd8bb 100644
--- a/nixos/tests/virtualbox.nix
+++ b/nixos/tests/virtualbox.nix
@@ -141,6 +141,7 @@ import ./make-test.nix ({ pkgs, ... }: with pkgs.lib; let
     vmFlags = mkFlags ([
       "--uart1 0x3F8 4"
       "--uartmode1 client /run/virtualbox-log-${name}.sock"
+      "--memory 768"
     ] ++ (attrs.vmFlags or []));
 
     controllerFlags = mkFlags [
@@ -324,7 +325,7 @@ in {
       mkVMConf = name: val: val.machine // { key = "${name}-config"; };
       vmConfigs = mapAttrsToList mkVMConf vboxVMs;
     in [ ./common/user-account.nix ./common/x11.nix ] ++ vmConfigs;
-    virtualisation.memorySize = 1024;
+    virtualisation.memorySize = 2048;
     virtualisation.virtualbox.host.enable = true;
     users.extraUsers.alice.extraGroups = let
       inherit (config.virtualisation.virtualbox.host) enableHardening;
@@ -389,6 +390,21 @@ in {
 
     destroyVM_simple;
 
+    sub removeUUIDs {
+      return join("\n", grep { $_ !~ /^UUID:/ } split(/\n/, $_[0]))."\n";
+    }
+
+    subtest "host-usb-permissions", sub {
+      my $userUSB = removeUUIDs vbm("list usbhost");
+      print STDERR $userUSB;
+      my $rootUSB = removeUUIDs $machine->succeed("VBoxManage list usbhost");
+      print STDERR $rootUSB;
+
+      die "USB host devices differ for root and normal user"
+        if $userUSB ne $rootUSB;
+      die "No USB host devices found" if $userUSB =~ /<none>/;
+    };
+
     subtest "systemd-detect-virt", sub {
       createVM_detectvirt;
       vbm("startvm detectvirt");
@@ -397,6 +413,7 @@ in {
       shutdownVM_detectvirt;
       my $result = $machine->succeed("cat '$detectvirt_sharepath/result'");
       chomp $result;
+      destroyVM_detectvirt;
       die "systemd-detect-virt returned \"$result\" instead of \"oracle\""
         if $result ne "oracle";
     };
@@ -407,11 +424,10 @@ in {
 
       vbm("startvm test1");
       waitForStartup_test1;
+      waitForVMBoot_test1;
 
       vbm("startvm test2");
       waitForStartup_test2;
-
-      waitForVMBoot_test1;
       waitForVMBoot_test2;
 
       $machine->screenshot("net_booted");