about summary refs log tree commit diff
path: root/nixpkgs/nixos/modules/virtualisation
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/modules/virtualisation')
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-image.nix143
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-init.nix61
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-options.nix14
-rw-r--r--nixpkgs/nixos/modules/virtualisation/anbox.nix139
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-agent-entropy.patch17
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-agent.nix199
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix3
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-common.nix64
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-config-user.nix12
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-config.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-image.nix60
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-images.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/brightbox-config.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/brightbox-image.nix166
-rw-r--r--nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix40
-rw-r--r--nixpkgs/nixos/modules/virtualisation/container-config.nix29
-rw-r--r--nixpkgs/nixos/modules/virtualisation/containers.nix783
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker-containers.nix230
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker-image.nix57
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker-preloader.nix134
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker.nix223
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ec2-amis.nix295
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ec2-data.nix87
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.nix23
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ecs-agent.nix46
-rw-r--r--nixpkgs/nixos/modules/virtualisation/gce-images.nix9
-rw-r--r--nixpkgs/nixos/modules/virtualisation/google-compute-config.nix235
-rw-r--r--nixpkgs/nixos/modules/virtualisation/google-compute-image.nix61
-rw-r--r--nixpkgs/nixos/modules/virtualisation/grow-partition.nix3
-rw-r--r--nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix64
-rw-r--r--nixpkgs/nixos/modules/virtualisation/kvmgt.nix82
-rw-r--r--nixpkgs/nixos/modules/virtualisation/libvirtd.nix251
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxc-container.nix26
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxc.nix82
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxcfs.nix45
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxd.nix83
-rw-r--r--nixpkgs/nixos/modules/virtualisation/openstack-config.nix58
-rw-r--r--nixpkgs/nixos/modules/virtualisation/openvswitch.nix176
-rw-r--r--nixpkgs/nixos/modules/virtualisation/parallels-guest.nix156
-rw-r--r--nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix36
-rw-r--r--nixpkgs/nixos/modules/virtualisation/qemu-vm.nix585
-rw-r--r--nixpkgs/nixos/modules/virtualisation/rkt.nix64
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix93
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix153
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix111
-rw-r--r--nixpkgs/nixos/modules/virtualisation/vmware-guest.nix56
-rw-r--r--nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix52
-rw-r--r--nixpkgs/nixos/modules/virtualisation/xen-dom0.nix461
-rw-r--r--nixpkgs/nixos/modules/virtualisation/xen-domU.nix19
49 files changed, 5801 insertions, 0 deletions
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-image.nix b/nixpkgs/nixos/modules/virtualisation/amazon-image.nix
new file mode 100644
index 000000000000..0c4ad90b4eb6
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-image.nix
@@ -0,0 +1,143 @@
+# Configuration for Amazon EC2 instances. (Note that this file is a
+# misnomer - it should be "amazon-config.nix" or so, not
+# "amazon-image.nix", since it's used not only to build images but
+# also to reconfigure instances. However, we can't rename it because
+# existing "configuration.nix" files on EC2 instances refer to it.)
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.ec2;
+  metadataFetcher = import ./ec2-metadata-fetcher.nix {
+    targetRoot = "$targetRoot/";
+    wgetExtraOptions = "-q";
+  };
+in
+
+{
+  imports = [ ../profiles/headless.nix ./ec2-data.nix ./amazon-init.nix ];
+
+  config = {
+
+    assertions = [
+      { assertion = cfg.hvm;
+        message = "Paravirtualized EC2 instances are no longer supported.";
+      }
+    ];
+
+    boot.growPartition = cfg.hvm;
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    boot.extraModulePackages = [
+      config.boot.kernelPackages.ena
+    ];
+    boot.initrd.kernelModules = [ "xen-blkfront" "xen-netfront" ];
+    boot.initrd.availableKernelModules = [ "ixgbevf" "ena" "nvme" ];
+    boot.kernelParams = mkIf cfg.hvm [ "console=ttyS0" ];
+
+    # Prevent the nouveau kernel module from being loaded, as it
+    # interferes with the nvidia/nvidia-uvm modules needed for CUDA.
+    # Also blacklist xen_fbfront to prevent a 30 second delay during
+    # boot.
+    boot.blacklistedKernelModules = [ "nouveau" "xen_fbfront" ];
+
+    # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
+    boot.loader.grub.version = if cfg.hvm then 2 else 1;
+    boot.loader.grub.device = if cfg.hvm then "/dev/xvda" else "nodev";
+    boot.loader.grub.extraPerEntryConfig = mkIf (!cfg.hvm) "root (hd0)";
+    boot.loader.timeout = 0;
+
+    boot.initrd.network.enable = true;
+
+    # Mount all formatted ephemeral disks and activate all swap devices.
+    # We cannot do this with the ‘fileSystems’ and ‘swapDevices’ options
+    # because the set of devices is dependent on the instance type
+    # (e.g. "m1.small" has one ephemeral filesystem and one swap device,
+    # while "m1.large" has two ephemeral filesystems and no swap
+    # devices).  Also, put /tmp and /var on /disk0, since it has a lot
+    # more space than the root device.  Similarly, "move" /nix to /disk0
+    # by layering a unionfs-fuse mount on top of it so we have a lot more space for
+    # Nix operations.
+    boot.initrd.postMountCommands =
+      ''
+        ${metadataFetcher}
+
+        diskNr=0
+        diskForUnionfs=
+        for device in /dev/xvd[abcde]*; do
+            if [ "$device" = /dev/xvda -o "$device" = /dev/xvda1 ]; then continue; fi
+            fsType=$(blkid -o value -s TYPE "$device" || true)
+            if [ "$fsType" = swap ]; then
+                echo "activating swap device $device..."
+                swapon "$device" || true
+            elif [ "$fsType" = ext3 ]; then
+                mp="/disk$diskNr"
+                diskNr=$((diskNr + 1))
+                if mountFS "$device" "$mp" "" ext3; then
+                    if [ -z "$diskForUnionfs" ]; then diskForUnionfs="$mp"; fi
+                fi
+            else
+                echo "skipping unknown device type $device"
+            fi
+        done
+
+        if [ -n "$diskForUnionfs" ]; then
+            mkdir -m 755 -p $targetRoot/$diskForUnionfs/root
+
+            mkdir -m 1777 -p $targetRoot/$diskForUnionfs/root/tmp $targetRoot/tmp
+            mount --bind $targetRoot/$diskForUnionfs/root/tmp $targetRoot/tmp
+
+            if [ "$(cat "$metaDir/ami-manifest-path")" != "(unknown)" ]; then
+                mkdir -m 755 -p $targetRoot/$diskForUnionfs/root/var $targetRoot/var
+                mount --bind $targetRoot/$diskForUnionfs/root/var $targetRoot/var
+
+                mkdir -p /unionfs-chroot/ro-nix
+                mount --rbind $targetRoot/nix /unionfs-chroot/ro-nix
+
+                mkdir -m 755 -p $targetRoot/$diskForUnionfs/root/nix
+                mkdir -p /unionfs-chroot/rw-nix
+                mount --rbind $targetRoot/$diskForUnionfs/root/nix /unionfs-chroot/rw-nix
+
+                unionfs -o allow_other,cow,nonempty,chroot=/unionfs-chroot,max_files=32768 /rw-nix=RW:/ro-nix=RO $targetRoot/nix
+            fi
+        fi
+      '';
+
+    boot.initrd.extraUtilsCommands =
+      ''
+        # We need swapon in the initrd.
+        copy_bin_and_libs ${pkgs.utillinux}/sbin/swapon
+      '';
+
+    # Don't put old configurations in the GRUB menu.  The user has no
+    # way to select them anyway.
+    boot.loader.grub.configurationLimit = 0;
+
+    # Allow root logins only using the SSH key that the user specified
+    # at instance creation time.
+    services.openssh.enable = true;
+    services.openssh.permitRootLogin = "prohibit-password";
+
+    # Force getting the hostname from EC2.
+    networking.hostName = mkDefault "";
+
+    # Always include cryptsetup so that Charon can use it.
+    environment.systemPackages = [ pkgs.cryptsetup ];
+
+    boot.initrd.supportedFilesystems = [ "unionfs-fuse" ];
+
+    # EC2 has its own NTP server provided by the hypervisor
+    networking.timeServers = [ "169.254.169.123" ];
+
+    # udisks has become too bloated to have in a headless system
+    # (e.g. it depends on GTK+).
+    services.udisks2.enable = false;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-init.nix b/nixpkgs/nixos/modules/virtualisation/amazon-init.nix
new file mode 100644
index 000000000000..8032b2c6d7ca
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-init.nix
@@ -0,0 +1,61 @@
+{ config, pkgs, ... }:
+
+let
+  script = ''
+    #!${pkgs.runtimeShell} -eu
+
+    echo "attempting to fetch configuration from EC2 user data..."
+
+    export HOME=/root
+    export PATH=${pkgs.lib.makeBinPath [ config.nix.package pkgs.systemd pkgs.gnugrep pkgs.gnused config.system.build.nixos-rebuild]}:$PATH
+    export NIX_PATH=/nix/var/nix/profiles/per-user/root/channels/nixos:nixos-config=/etc/nixos/configuration.nix:/nix/var/nix/profiles/per-user/root/channels
+
+    userData=/etc/ec2-metadata/user-data
+
+    if [ -s "$userData" ]; then
+      # If the user-data looks like it could be a nix expression,
+      # copy it over. Also, look for a magic three-hash comment and set
+      # that as the channel.
+      if sed '/^\(#\|SSH_HOST_.*\)/d' < "$userData" | grep -q '\S'; then
+        channels="$(grep '^###' "$userData" | sed 's|###\s*||')"
+        printf "%s" "$channels" | while read channel; do
+          echo "writing channel: $channel"
+        done
+
+        if [[ -n "$channels" ]]; then
+          printf "%s" "$channels" > /root/.nix-channels
+          nix-channel --update
+        fi
+
+        echo "setting configuration from EC2 user data"
+        cp "$userData" /etc/nixos/configuration.nix
+      else
+        echo "user data does not appear to be a Nix expression; ignoring"
+        exit
+      fi
+    else
+      echo "no user data is available"
+      exit
+    fi
+
+    nixos-rebuild switch
+  '';
+in {
+  systemd.services.amazon-init = {
+    inherit script;
+    description = "Reconfigure the system from EC2 userdata on startup";
+
+    wantedBy = [ "multi-user.target" ];
+    after = [ "multi-user.target" ];
+    requires = [ "network-online.target" ];
+ 
+    restartIfChanged = false;
+    unitConfig.X-StopOnRemoval = false;
+
+    serviceConfig = {
+      Type = "oneshot";
+      RemainAfterExit = true;
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-options.nix b/nixpkgs/nixos/modules/virtualisation/amazon-options.nix
new file mode 100644
index 000000000000..15de8638bbab
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-options.nix
@@ -0,0 +1,14 @@
+{ config, lib, ... }:
+{
+  options = {
+    ec2 = {
+      hvm = lib.mkOption {
+        default = lib.versionAtLeast config.system.stateVersion "17.03";
+        internal = true;
+        description = ''
+          Whether the EC2 instance is a HVM instance.
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/anbox.nix b/nixpkgs/nixos/modules/virtualisation/anbox.nix
new file mode 100644
index 000000000000..c63b971ead02
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/anbox.nix
@@ -0,0 +1,139 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.anbox;
+  kernelPackages = config.boot.kernelPackages;
+  addrOpts = v: addr: pref: name: {
+    address = mkOption {
+      default = addr;
+      type = types.str;
+      description = ''
+        IPv${toString v} ${name} address.
+      '';
+    };
+
+    prefixLength = mkOption {
+      default = pref;
+      type = types.addCheck types.int (n: n >= 0 && n <= (if v == 4 then 32 else 128));
+      description = ''
+        Subnet mask of the ${name} address, specified as the number of
+        bits in the prefix (<literal>${if v == 4 then "24" else "64"}</literal>).
+      '';
+    };
+  };
+
+in
+
+{
+
+  options.virtualisation.anbox = {
+
+    enable = mkEnableOption "Anbox";
+
+    image = mkOption {
+      default = pkgs.anbox.image;
+      example = literalExample "pkgs.anbox.image";
+      type = types.package;
+      description = ''
+        Base android image for Anbox.
+      '';
+    };
+
+    extraInit = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Extra shell commands to be run inside the container image during init.
+      '';
+    };
+
+    ipv4 = {
+      container = addrOpts 4 "192.168.250.2" 24 "Container";
+      gateway = addrOpts 4 "192.168.250.1" 24 "Host";
+
+      dns = mkOption {
+        default = "1.1.1.1";
+        type = types.string;
+        description = ''
+          Container DNS server.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = singleton {
+      assertion = versionAtLeast (getVersion config.boot.kernelPackages.kernel) "4.18";
+      message = "Anbox needs user namespace support to work properly";
+    };
+
+    environment.systemPackages = with pkgs; [ anbox ];
+
+    boot.kernelModules = [ "ashmem_linux" "binder_linux" ];
+    boot.extraModulePackages = [ kernelPackages.anbox ];
+
+    services.udev.extraRules = ''
+      KERNEL=="ashmem", NAME="%k", MODE="0666"
+      KERNEL=="binder*", NAME="%k", MODE="0666"
+    '';
+
+    virtualisation.lxc.enable = true;
+    networking.bridges.anbox0.interfaces = [];
+    networking.interfaces.anbox0.ipv4.addresses = [ cfg.ipv4.gateway ];
+
+    networking.nat = {
+      enable = true;
+      internalInterfaces = [ "anbox0" ];
+    };
+
+    systemd.services.anbox-container-manager = let
+      anboxloc = "/var/lib/anbox";
+    in {
+      description = "Anbox Container Management Daemon";
+
+      environment.XDG_RUNTIME_DIR="${anboxloc}";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+      preStart = let
+        initsh = pkgs.writeText "nixos-init" (''
+          #!/system/bin/sh
+          setprop nixos.version ${config.system.nixos.version}
+
+          # we don't have radio
+          setprop ro.radio.noril yes
+          stop ril-daemon
+
+          # speed up boot
+          setprop debug.sf.nobootanimation 1
+        '' + cfg.extraInit);
+        initshloc = "${anboxloc}/rootfs-overlay/system/etc/init.goldfish.sh";
+      in ''
+        mkdir -p ${anboxloc}
+        mkdir -p $(dirname ${initshloc})
+        [ -f ${initshloc} ] && rm ${initshloc}
+        cp ${initsh} ${initshloc}
+        chown 100000:100000 ${initshloc}
+        chmod +x ${initshloc}
+      '';
+
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.anbox}/bin/anbox container-manager \
+            --data-path=${anboxloc} \
+            --android-image=${cfg.image} \
+            --container-network-address=${cfg.ipv4.container.address} \
+            --container-network-gateway=${cfg.ipv4.gateway.address} \
+            --container-network-dns-servers=${cfg.ipv4.dns} \
+            --use-rootfs-overlay \
+            --privileged
+        '';
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-agent-entropy.patch b/nixpkgs/nixos/modules/virtualisation/azure-agent-entropy.patch
new file mode 100644
index 000000000000..2a7ad08a4afc
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-agent-entropy.patch
@@ -0,0 +1,17 @@
+--- a/waagent	2016-03-12 09:58:15.728088851 +0200
++++ a/waagent	2016-03-12 09:58:43.572680025 +0200
+@@ -6173,10 +6173,10 @@
+             Log("MAC  address: " + ":".join(["%02X" % Ord(a) for a in mac]))
+         
+         # Consume Entropy in ACPI table provided by Hyper-V
+-        try:
+-            SetFileContents("/dev/random", GetFileContents("/sys/firmware/acpi/tables/OEM0"))
+-        except:
+-            pass
++        #try:
++        #    SetFileContents("/dev/random", GetFileContents("/sys/firmware/acpi/tables/OEM0"))
++        #except:
++        #    pass
+ 
+         Log("Probing for Azure environment.")
+         self.Endpoint = self.DoDhcpWork()
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-agent.nix b/nixpkgs/nixos/modules/virtualisation/azure-agent.nix
new file mode 100644
index 000000000000..770cefbcd511
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-agent.nix
@@ -0,0 +1,199 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.azure.agent;
+
+  waagent = with pkgs; stdenv.mkDerivation rec {
+    name = "waagent-2.0";
+    src = pkgs.fetchFromGitHub {
+      owner = "Azure";
+      repo = "WALinuxAgent";
+      rev = "1b3a8407a95344d9d12a2a377f64140975f1e8e4";
+      sha256 = "10byzvmpgrmr4d5mdn2kq04aapqb3sgr1admk13wjmy5cd6bwd2x";
+    };
+
+    patches = [ ./azure-agent-entropy.patch ];
+
+    buildInputs = [ makeWrapper python pythonPackages.wrapPython ];
+    runtimeDeps = [ findutils gnugrep gawk coreutils openssl openssh
+                    nettools # for hostname
+                    procps # for pidof
+                    shadow # for useradd, usermod
+                    utillinux # for (u)mount, fdisk, sfdisk, mkswap
+                    parted
+                  ];
+    pythonPath = [ pythonPackages.pyasn1 ];
+
+    configurePhase = false;
+    buildPhase = false;
+
+    installPhase = ''
+      substituteInPlace config/99-azure-product-uuid.rules \
+          --replace /bin/chmod "${coreutils}/bin/chmod"
+      mkdir -p $out/lib/udev/rules.d
+      cp config/*.rules $out/lib/udev/rules.d
+
+      mkdir -p $out/bin
+      cp waagent $out/bin/
+      chmod +x $out/bin/waagent
+
+      wrapProgram "$out/bin/waagent" \
+          --prefix PYTHONPATH : $PYTHONPATH \
+          --prefix PATH : "${makeBinPath runtimeDeps}"
+    '';
+  };
+
+  provisionedHook = pkgs.writeScript "provisioned-hook" ''
+    #!${pkgs.runtimeShell}
+    ${config.systemd.package}/bin/systemctl start provisioned.target
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options.virtualisation.azure.agent = {
+    enable = mkOption {
+      default = false;
+      description = "Whether to enable the Windows Azure Linux Agent.";
+    };
+    verboseLogging = mkOption {
+      default = false;
+      description = "Whether to enable verbose logging.";
+    };
+    mountResourceDisk = mkOption {
+      default = true;
+      description = "Whether the agent should format (ext4) and mount the resource disk to /mnt/resource.";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64;
+      message = "Azure not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    } {
+      assertion = config.networking.networkmanager.enable == false;
+      message = "Windows Azure Linux Agent is not compatible with NetworkManager";
+    } ];
+
+    boot.initrd.kernelModules = [ "ata_piix" ];
+    networking.firewall.allowedUDPPorts = [ 68 ];
+
+
+    environment.etc."waagent.conf".text = ''
+        #
+        # Windows Azure Linux Agent Configuration
+        #
+
+        Role.StateConsumer=${provisionedHook}
+
+        # Enable instance creation
+        Provisioning.Enabled=y
+
+        # Password authentication for root account will be unavailable.
+        Provisioning.DeleteRootPassword=n
+
+        # Generate fresh host key pair.
+        Provisioning.RegenerateSshHostKeyPair=n
+
+        # Supported values are "rsa", "dsa" and "ecdsa".
+        Provisioning.SshHostKeyPairType=ed25519
+
+        # Monitor host name changes and publish changes via DHCP requests.
+        Provisioning.MonitorHostName=y
+
+        # Decode CustomData from Base64.
+        Provisioning.DecodeCustomData=n
+
+        # Execute CustomData after provisioning.
+        Provisioning.ExecuteCustomData=n
+
+        # Format if unformatted. If 'n', resource disk will not be mounted.
+        ResourceDisk.Format=${if cfg.mountResourceDisk then "y" else "n"}
+
+        # File system on the resource disk
+        # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here.
+        ResourceDisk.Filesystem=ext4
+
+        # Mount point for the resource disk
+        ResourceDisk.MountPoint=/mnt/resource
+
+        # Respond to load balancer probes if requested by Windows Azure.
+        LBProbeResponder=y
+
+        # Enable logging to serial console (y|n)
+        # When stdout is not enough...
+        # 'y' if not set
+        Logs.Console=y
+
+        # Enable verbose logging (y|n)
+        Logs.Verbose=${if cfg.verboseLogging then "y" else "n"}
+
+        # Root device timeout in seconds.
+        OS.RootDeviceScsiTimeout=300
+    '';
+
+    services.udev.packages = [ waagent ];
+
+    networking.dhcpcd.persistent = true;
+
+    services.logrotate = {
+      enable = true;
+      config = ''
+        /var/log/waagent.log {
+            compress
+            monthly
+            rotate 6
+            notifempty
+            missingok
+        }
+      '';
+    };
+
+    systemd.targets.provisioned = {
+      description = "Services Requiring Azure VM provisioning to have finished";
+    };
+
+  systemd.services.consume-hypervisor-entropy =
+    { description = "Consume entropy in ACPI table provided by Hyper-V";
+
+      wantedBy = [ "sshd.service" "waagent.service" ];
+      before = [ "sshd.service" "waagent.service" ];
+      after = [ "local-fs.target" ];
+
+      path  = [ pkgs.coreutils ];
+      script =
+        ''
+          echo "Fetching entropy..."
+          cat /sys/firmware/acpi/tables/OEM0 > /dev/random
+        '';
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      serviceConfig.StandardError = "journal+console";
+      serviceConfig.StandardOutput = "journal+console";
+     };
+
+    systemd.services.waagent = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" "sshd.service" ];
+      wants = [ "network-online.target" ];
+
+      path = [ pkgs.e2fsprogs pkgs.bash ];
+      description = "Windows Azure Agent Service";
+      unitConfig.ConditionPathExists = "/etc/waagent.conf";
+      serviceConfig = {
+        ExecStart = "${waagent}/bin/waagent -daemon";
+        Type = "simple";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix b/nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix
new file mode 100644
index 000000000000..281be9a12318
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix
@@ -0,0 +1,3 @@
+{
+    "16.03" = "https://nixos.blob.core.windows.net/images/nixos-image-16.03.847.8688c17-x86_64-linux.vhd";
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-common.nix b/nixpkgs/nixos/modules/virtualisation/azure-common.nix
new file mode 100644
index 000000000000..03239991b95a
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-common.nix
@@ -0,0 +1,64 @@
+{ lib, pkgs, ... }:
+
+with lib;
+{
+  imports = [ ../profiles/headless.nix ];
+
+  require = [ ./azure-agent.nix ];
+  virtualisation.azure.agent.enable = true;
+
+  boot.kernelParams = [ "console=ttyS0" "earlyprintk=ttyS0" "rootdelay=300" "panic=1" "boot.panic_on_fail" ];
+  boot.initrd.kernelModules = [ "hv_vmbus" "hv_netvsc" "hv_utils" "hv_storvsc" ];
+
+  # Generate a GRUB menu.
+  boot.loader.grub.device = "/dev/sda";
+  boot.loader.grub.version = 2;
+  boot.loader.timeout = 0;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  fileSystems."/".device = "/dev/disk/by-label/nixos";
+
+  # Allow root logins only using the SSH key that the user specified
+  # at instance creation time, ping client connections to avoid timeouts
+  services.openssh.enable = true;
+  services.openssh.permitRootLogin = "prohibit-password";
+  services.openssh.extraConfig = ''
+    ClientAliveInterval 180
+  '';
+
+  # Force getting the hostname from Azure
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  # sg_scan is needed to finalize disk removal on older kernels
+  environment.systemPackages = [ pkgs.cryptsetup pkgs.sg3_utils ];
+
+  networking.usePredictableInterfaceNames = false;
+
+  services.udev.extraRules = ''
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:0", ATTR{removable}=="0", SYMLINK+="disk/by-lun/0",
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:1", ATTR{removable}=="0", SYMLINK+="disk/by-lun/1",
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:2", ATTR{removable}=="0", SYMLINK+="disk/by-lun/2"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:3", ATTR{removable}=="0", SYMLINK+="disk/by-lun/3"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:4", ATTR{removable}=="0", SYMLINK+="disk/by-lun/4"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:5", ATTR{removable}=="0", SYMLINK+="disk/by-lun/5"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:6", ATTR{removable}=="0", SYMLINK+="disk/by-lun/6"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:7", ATTR{removable}=="0", SYMLINK+="disk/by-lun/7"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:8", ATTR{removable}=="0", SYMLINK+="disk/by-lun/8"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:9", ATTR{removable}=="0", SYMLINK+="disk/by-lun/9"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:10", ATTR{removable}=="0", SYMLINK+="disk/by-lun/10"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:11", ATTR{removable}=="0", SYMLINK+="disk/by-lun/11"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:12", ATTR{removable}=="0", SYMLINK+="disk/by-lun/12"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:13", ATTR{removable}=="0", SYMLINK+="disk/by-lun/13"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:14", ATTR{removable}=="0", SYMLINK+="disk/by-lun/14"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:15", ATTR{removable}=="0", SYMLINK+="disk/by-lun/15"
+
+  '';
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-config-user.nix b/nixpkgs/nixos/modules/virtualisation/azure-config-user.nix
new file mode 100644
index 000000000000..267ba50ae025
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-config-user.nix
@@ -0,0 +1,12 @@
+{ modulesPath, ... }:
+
+{
+  # To build the configuration or use nix-env, you need to run
+  # either nixos-rebuild --upgrade or nix-channel --update
+  # to fetch the nixos channel.
+
+  # This configures everything but bootstrap services,
+  # which only need to be run once and have already finished
+  # if you are able to see this comment.
+  imports = [ "${modulesPath}/virtualisation/azure-common.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-config.nix b/nixpkgs/nixos/modules/virtualisation/azure-config.nix
new file mode 100644
index 000000000000..780bd1b78dce
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-config.nix
@@ -0,0 +1,5 @@
+{ modulesPath, ... }:
+
+{
+  imports = [ "${modulesPath}/virtualisation/azure-image.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-image.nix b/nixpkgs/nixos/modules/virtualisation/azure-image.nix
new file mode 100644
index 000000000000..dd2108ccc379
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-image.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  diskSize = 2048;
+in
+{
+  system.build.azureImage = import ../../lib/make-disk-image.nix {
+    name = "azure-image";
+    postVM = ''
+      ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o subformat=fixed,force_size -O vpc $diskImage $out/disk.vhd
+    '';
+    configFile = ./azure-config-user.nix;
+    format = "raw";
+    inherit diskSize;
+    inherit config lib pkgs;
+  };
+
+  imports = [ ./azure-common.nix ];
+
+  # Azure metadata is available as a CD-ROM drive.
+  fileSystems."/metadata".device = "/dev/sr0";
+
+  systemd.services.fetch-ssh-keys =
+    { description = "Fetch host keys and authorized_keys for root user";
+
+      wantedBy = [ "sshd.service" "waagent.service" ];
+      before = [ "sshd.service" "waagent.service" ];
+      after = [ "local-fs.target" ];
+
+      path  = [ pkgs.coreutils ];
+      script =
+        ''
+          eval "$(cat /metadata/CustomData.bin)"
+          if ! [ -z "$ssh_host_ecdsa_key" ]; then
+            echo "downloaded ssh_host_ecdsa_key"
+            echo "$ssh_host_ecdsa_key" > /etc/ssh/ssh_host_ed25519_key
+            chmod 600 /etc/ssh/ssh_host_ed25519_key
+          fi
+
+          if ! [ -z "$ssh_host_ecdsa_key_pub" ]; then
+            echo "downloaded ssh_host_ecdsa_key_pub"
+            echo "$ssh_host_ecdsa_key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
+            chmod 644 /etc/ssh/ssh_host_ed25519_key.pub
+          fi
+
+          if ! [ -z "$ssh_root_auth_key" ]; then
+            echo "downloaded ssh_root_auth_key"
+            mkdir -m 0700 -p /root/.ssh
+            echo "$ssh_root_auth_key" > /root/.ssh/authorized_keys
+            chmod 600 /root/.ssh/authorized_keys
+          fi
+        '';
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      serviceConfig.StandardError = "journal+console";
+      serviceConfig.StandardOutput = "journal+console";
+     };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-images.nix b/nixpkgs/nixos/modules/virtualisation/azure-images.nix
new file mode 100644
index 000000000000..22c82fc14f65
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-images.nix
@@ -0,0 +1,5 @@
+let self = {
+  "16.09" = "https://nixos.blob.core.windows.net/images/nixos-image-16.09.1694.019dcc3-x86_64-linux.vhd";
+
+  latest = self."16.09";
+}; in self
diff --git a/nixpkgs/nixos/modules/virtualisation/brightbox-config.nix b/nixpkgs/nixos/modules/virtualisation/brightbox-config.nix
new file mode 100644
index 000000000000..0a018e4cd695
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/brightbox-config.nix
@@ -0,0 +1,5 @@
+{ modulesPath, ... }:
+
+{
+  imports = [ "${modulesPath}/virtualisation/brightbox-image.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/brightbox-image.nix b/nixpkgs/nixos/modules/virtualisation/brightbox-image.nix
new file mode 100644
index 000000000000..e716982c510a
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/brightbox-image.nix
@@ -0,0 +1,166 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  diskSize = "20G";
+in
+{
+  imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ];
+
+  system.build.brightboxImage =
+    pkgs.vmTools.runInLinuxVM (
+      pkgs.runCommand "brightbox-image"
+        { preVM =
+            ''
+              mkdir $out
+              diskImage=$out/$diskImageBase
+              truncate $diskImage --size ${diskSize}
+              mv closure xchg/
+            '';
+
+          postVM =
+            ''
+              PATH=$PATH:${lib.makeBinPath [ pkgs.gnutar pkgs.gzip ]}
+              pushd $out
+              ${pkgs.qemu_kvm}/bin/qemu-img convert -c -O qcow2 $diskImageBase nixos.qcow2
+              rm $diskImageBase
+              popd
+            '';
+          diskImageBase = "nixos-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.raw";
+          buildInputs = [ pkgs.utillinux pkgs.perl ];
+          exportReferencesGraph =
+            [ "closure" config.system.build.toplevel ];
+        }
+        ''
+          # Create partition table
+          ${pkgs.parted}/sbin/parted --script /dev/vda mklabel msdos
+          ${pkgs.parted}/sbin/parted --script /dev/vda mkpart primary ext4 1 ${diskSize}
+          ${pkgs.parted}/sbin/parted --script /dev/vda print
+          . /sys/class/block/vda1/uevent
+          mknod /dev/vda1 b $MAJOR $MINOR
+
+          # Create an empty filesystem and mount it.
+          ${pkgs.e2fsprogs}/sbin/mkfs.ext4 -L nixos /dev/vda1
+          ${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
+
+          mkdir /mnt
+          mount /dev/vda1 /mnt
+
+          # The initrd expects these directories to exist.
+          mkdir /mnt/dev /mnt/proc /mnt/sys
+
+          mount --bind /proc /mnt/proc
+          mount --bind /dev /mnt/dev
+          mount --bind /sys /mnt/sys
+
+          # Copy all paths in the closure to the filesystem.
+          storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
+
+          mkdir -p /mnt/nix/store
+          echo "copying everything (will take a while)..."
+          cp -prd $storePaths /mnt/nix/store/
+
+          # Register the paths in the Nix database.
+          printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
+              chroot /mnt ${config.nix.package.out}/bin/nix-store --load-db --option build-users-group ""
+
+          # Create the system profile to allow nixos-rebuild to work.
+          chroot /mnt ${config.nix.package.out}/bin/nix-env \
+              -p /nix/var/nix/profiles/system --set ${config.system.build.toplevel} \
+              --option build-users-group ""
+
+          # `nixos-rebuild' requires an /etc/NIXOS.
+          mkdir -p /mnt/etc
+          touch /mnt/etc/NIXOS
+
+          # `switch-to-configuration' requires a /bin/sh
+          mkdir -p /mnt/bin
+          ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
+
+          # Install a configuration.nix.
+          mkdir -p /mnt/etc/nixos /mnt/boot/grub
+          cp ${./brightbox-config.nix} /mnt/etc/nixos/configuration.nix
+
+          # Generate the GRUB menu.
+          ln -s vda /dev/sda
+          chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
+
+          umount /mnt/proc /mnt/dev /mnt/sys
+          umount /mnt
+        ''
+    );
+
+  fileSystems."/".label = "nixos";
+
+  # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
+  boot.loader.grub.device = "/dev/vda";
+  boot.loader.timeout = 0;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  # Allow root logins only using the SSH key that the user specified
+  # at instance creation time.
+  services.openssh.enable = true;
+  services.openssh.permitRootLogin = "prohibit-password";
+
+  # Force getting the hostname from Google Compute.
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  environment.systemPackages = [ pkgs.cryptsetup ];
+
+  systemd.services."fetch-ec2-data" =
+    { description = "Fetch EC2 Data";
+
+      wantedBy = [ "multi-user.target" "sshd.service" ];
+      before = [ "sshd.service" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      path = [ pkgs.wget pkgs.iproute ];
+
+      script =
+        ''
+          wget="wget -q --retry-connrefused -O -"
+
+          ${optionalString (config.networking.hostName == "") ''
+            echo "setting host name..."
+            ${pkgs.nettools}/bin/hostname $($wget http://169.254.169.254/latest/meta-data/hostname)
+          ''}
+
+          # Don't download the SSH key if it has already been injected
+          # into the image (a Nova feature).
+          if ! [ -e /root/.ssh/authorized_keys ]; then
+              echo "obtaining SSH key..."
+              mkdir -m 0700 -p /root/.ssh
+              $wget http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key > /root/key.pub
+              if [ $? -eq 0 -a -e /root/key.pub ]; then
+                  if ! grep -q -f /root/key.pub /root/.ssh/authorized_keys; then
+                      cat /root/key.pub >> /root/.ssh/authorized_keys
+                      echo "new key added to authorized_keys"
+                  fi
+                  chmod 600 /root/.ssh/authorized_keys
+                  rm -f /root/key.pub
+              fi
+          fi
+
+          # Extract the intended SSH host key for this machine from
+          # the supplied user data, if available.  Otherwise sshd will
+          # generate one normally.
+          $wget http://169.254.169.254/2011-01-01/user-data > /root/user-data || true
+          key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' /root/user-data)"
+          key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' /root/user-data)"
+          if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
+              mkdir -m 0755 -p /etc/ssh
+              (umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
+              echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
+          fi
+        '';
+
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+    };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix b/nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix
new file mode 100644
index 000000000000..78afebdc5dd3
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix
@@ -0,0 +1,40 @@
+{ lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=tty0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      permitRootLogin = "prohibit-password";
+    };
+
+    # Cloud-init configuration.
+    services.cloud-init.enable = true;
+    # Wget is needed for setting password. This is of little use as
+    # root password login is disabled above.
+    environment.systemPackages = [ pkgs.wget ];
+    # Only enable CloudStack datasource for faster boot speed.
+    environment.etc."cloud/cloud.cfg.d/99_cloudstack.cfg".text = ''
+      datasource:
+        CloudStack: {}
+        None: {}
+      datasource_list: ["CloudStack"]
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/container-config.nix b/nixpkgs/nixos/modules/virtualisation/container-config.nix
new file mode 100644
index 000000000000..604fb8a75932
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/container-config.nix
@@ -0,0 +1,29 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  config = mkIf config.boot.isContainer {
+
+    # Disable some features that are not useful in a container.
+    services.udisks2.enable = mkDefault false;
+    powerManagement.enable = mkDefault false;
+
+    networking.useHostResolvConf = mkDefault true;
+
+    # Containers should be light-weight, so start sshd on demand.
+    services.openssh.startWhenNeeded = mkDefault true;
+
+    # Shut up warnings about not having a boot loader.
+    system.build.installBootLoader = "${pkgs.coreutils}/bin/true";
+
+    # Not supported in systemd-nspawn containers.
+    security.audit.enable = false;
+
+    # Use the host's nix-daemon.
+    environment.variables.NIX_REMOTE = "daemon";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/containers.nix b/nixpkgs/nixos/modules/virtualisation/containers.nix
new file mode 100644
index 000000000000..c10e2b162ccc
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/containers.nix
@@ -0,0 +1,783 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  # The container's init script, a small wrapper around the regular
+  # NixOS stage-2 init script.
+  containerInit = (cfg:
+    let
+      renderExtraVeth = (name: cfg:
+        ''
+        echo "Bringing ${name} up"
+        ip link set dev ${name} up
+        ${optionalString (cfg.localAddress != null) ''
+          echo "Setting ip for ${name}"
+          ip addr add ${cfg.localAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.localAddress6 != null) ''
+          echo "Setting ip6 for ${name}"
+          ip -6 addr add ${cfg.localAddress6} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress != null) ''
+          echo "Setting route to host for ${name}"
+          ip route add ${cfg.hostAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress6 != null) ''
+          echo "Setting route6 to host for ${name}"
+          ip -6 route add ${cfg.hostAddress6} dev ${name}
+        ''}
+        ''
+        );
+    in
+      pkgs.writeScript "container-init"
+      ''
+        #! ${pkgs.runtimeShell} -e
+
+        # Initialise the container side of the veth pair.
+        if [ -n "$HOST_ADDRESS" ]   || [ -n "$HOST_ADDRESS6" ]  ||
+           [ -n "$LOCAL_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS6" ] ||
+           [ -n "$HOST_BRIDGE" ]; then
+          ip link set host0 name eth0
+          ip link set dev eth0 up
+
+          if [ -n "$LOCAL_ADDRESS" ]; then
+            ip addr add $LOCAL_ADDRESS dev eth0
+          fi
+          if [ -n "$LOCAL_ADDRESS6" ]; then
+            ip -6 addr add $LOCAL_ADDRESS6 dev eth0
+          fi
+          if [ -n "$HOST_ADDRESS" ]; then
+            ip route add $HOST_ADDRESS dev eth0
+            ip route add default via $HOST_ADDRESS
+          fi
+          if [ -n "$HOST_ADDRESS6" ]; then
+            ip -6 route add $HOST_ADDRESS6 dev eth0
+            ip -6 route add default via $HOST_ADDRESS6
+          fi
+
+          ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+        fi
+
+        # Start the regular stage 1 script.
+        exec "$1"
+      ''
+    );
+
+  nspawnExtraVethArgs = (name: cfg: "--network-veth-extra=${name}");
+
+  startScript = cfg:
+    ''
+      mkdir -p -m 0755 "$root/etc" "$root/var/lib"
+      mkdir -p -m 0700 "$root/var/lib/private" "$root/root" /run/containers
+      if ! [ -e "$root/etc/os-release" ]; then
+        touch "$root/etc/os-release"
+      fi
+
+      if ! [ -e "$root/etc/machine-id" ]; then
+        touch "$root/etc/machine-id"
+      fi
+
+      mkdir -p -m 0755 \
+        "/nix/var/nix/profiles/per-container/$INSTANCE" \
+        "/nix/var/nix/gcroots/per-container/$INSTANCE"
+
+      cp --remove-destination /etc/resolv.conf "$root/etc/resolv.conf"
+
+      if [ "$PRIVATE_NETWORK" = 1 ]; then
+        extraFlags+=" --private-network"
+      fi
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        extraFlags+=" --network-veth"
+      fi
+
+      if [ -n "$HOST_PORT" ]; then
+        OIFS=$IFS
+        IFS=","
+        for i in $HOST_PORT
+        do
+            extraFlags+=" --port=$i"
+        done
+        IFS=$OIFS
+      fi
+
+      if [ -n "$HOST_BRIDGE" ]; then
+        extraFlags+=" --network-bridge=$HOST_BRIDGE"
+      fi
+
+      extraFlags+=" ${concatStringsSep " " (mapAttrsToList nspawnExtraVethArgs cfg.extraVeths)}"
+
+      for iface in $INTERFACES; do
+        extraFlags+=" --network-interface=$iface"
+      done
+
+      for iface in $MACVLANS; do
+        extraFlags+=" --network-macvlan=$iface"
+      done
+
+      # If the host is 64-bit and the container is 32-bit, add a
+      # --personality flag.
+      ${optionalString (config.nixpkgs.localSystem.system == "x86_64-linux") ''
+        if [ "$(< ''${SYSTEM_PATH:-/nix/var/nix/profiles/per-container/$INSTANCE/system}/system)" = i686-linux ]; then
+          extraFlags+=" --personality=x86"
+        fi
+      ''}
+
+      # Run systemd-nspawn without startup notification (we'll
+      # wait for the container systemd to signal readiness).
+      exec ${config.systemd.package}/bin/systemd-nspawn \
+        --keep-unit \
+        -M "$INSTANCE" -D "$root" $extraFlags \
+        $EXTRA_NSPAWN_FLAGS \
+        --notify-ready=yes \
+        --bind-ro=/nix/store \
+        --bind-ro=/nix/var/nix/db \
+        --bind-ro=/nix/var/nix/daemon-socket \
+        --bind="/nix/var/nix/profiles/per-container/$INSTANCE:/nix/var/nix/profiles" \
+        --bind="/nix/var/nix/gcroots/per-container/$INSTANCE:/nix/var/nix/gcroots" \
+        --link-journal=try-guest \
+        --setenv PRIVATE_NETWORK="$PRIVATE_NETWORK" \
+        --setenv HOST_BRIDGE="$HOST_BRIDGE" \
+        --setenv HOST_ADDRESS="$HOST_ADDRESS" \
+        --setenv LOCAL_ADDRESS="$LOCAL_ADDRESS" \
+        --setenv HOST_ADDRESS6="$HOST_ADDRESS6" \
+        --setenv LOCAL_ADDRESS6="$LOCAL_ADDRESS6" \
+        --setenv HOST_PORT="$HOST_PORT" \
+        --setenv PATH="$PATH" \
+        ${if cfg.additionalCapabilities != null && cfg.additionalCapabilities != [] then
+          ''--capability="${concatStringsSep " " cfg.additionalCapabilities}"'' else ""
+        } \
+        ${if cfg.tmpfs != null && cfg.tmpfs != [] then
+          ''--tmpfs=${concatStringsSep " --tmpfs=" cfg.tmpfs}'' else ""
+        } \
+        ${containerInit cfg} "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/init"
+    '';
+
+  preStartScript = cfg:
+    ''
+      # Clean up existing machined registration and interfaces.
+      machinectl terminate "$INSTANCE" 2> /dev/null || true
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        ip link del dev "ve-$INSTANCE" 2> /dev/null || true
+        ip link del dev "vb-$INSTANCE" 2> /dev/null || true
+      fi
+
+      ${concatStringsSep "\n" (
+        mapAttrsToList (name: cfg:
+          ''ip link del dev ${name} 2> /dev/null || true ''
+        ) cfg.extraVeths
+      )}
+   '';
+
+  postStartScript = (cfg:
+    let
+      ipcall = cfg: ipcmd: variable: attribute:
+        if cfg.${attribute} == null then
+          ''
+            if [ -n "${variable}" ]; then
+              ${ipcmd} add ${variable} dev $ifaceHost
+            fi
+          ''
+        else
+          ''${ipcmd} add ${cfg.${attribute}} dev $ifaceHost'';
+      renderExtraVeth = name: cfg:
+        if cfg.hostBridge != null then
+          ''
+            # Add ${name} to bridge ${cfg.hostBridge}
+            ip link set dev ${name} master ${cfg.hostBridge} up
+          ''
+        else
+          ''
+            echo "Bring ${name} up"
+            ip link set dev ${name} up
+            # Set IPs and routes for ${name}
+            ${optionalString (cfg.hostAddress != null) ''
+              ip addr add ${cfg.hostAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.hostAddress6 != null) ''
+              ip -6 addr add ${cfg.hostAddress6} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress != null) ''
+              ip route add ${cfg.localAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress6 != null) ''
+              ip -6 route add ${cfg.localAddress6} dev ${name}
+            ''}
+          '';
+    in
+      ''
+        if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+           [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+          if [ -z "$HOST_BRIDGE" ]; then
+            ifaceHost=ve-$INSTANCE
+            ip link set dev $ifaceHost up
+
+            ${ipcall cfg "ip addr" "$HOST_ADDRESS" "hostAddress"}
+            ${ipcall cfg "ip -6 addr" "$HOST_ADDRESS6" "hostAddress6"}
+            ${ipcall cfg "ip route" "$LOCAL_ADDRESS" "localAddress"}
+            ${ipcall cfg "ip -6 route" "$LOCAL_ADDRESS6" "localAddress6"}
+          fi
+          ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+        fi
+
+        # Get the leader PID so that we can signal it in
+        # preStop. We can't use machinectl there because D-Bus
+        # might be shutting down. FIXME: in systemd 219 we can
+        # just signal systemd-nspawn to do a clean shutdown.
+        machinectl show "$INSTANCE" | sed 's/Leader=\(.*\)/\1/;t;d' > "/run/containers/$INSTANCE.pid"
+      ''
+  );
+
+  serviceDirectives = cfg: {
+    ExecReload = pkgs.writeScript "reload-container"
+      ''
+        #! ${pkgs.runtimeShell} -e
+        ${pkgs.nixos-container}/bin/nixos-container run "$INSTANCE" -- \
+          bash --login -c "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/bin/switch-to-configuration test"
+      '';
+
+    SyslogIdentifier = "container %i";
+
+    EnvironmentFile = "-/etc/containers/%i.conf";
+
+    Type = "notify";
+
+    # Note that on reboot, systemd-nspawn returns 133, so this
+    # unit will be restarted. On poweroff, it returns 0, so the
+    # unit won't be restarted.
+    RestartForceExitStatus = "133";
+    SuccessExitStatus = "133";
+
+    Restart = "on-failure";
+
+    Slice = "machine.slice";
+    Delegate = true;
+
+    # Hack: we don't want to kill systemd-nspawn, since we call
+    # "machinectl poweroff" in preStop to shut down the
+    # container cleanly. But systemd requires sending a signal
+    # (at least if we want remaining processes to be killed
+    # after the timeout). So send an ignored signal.
+    KillMode = "mixed";
+    KillSignal = "WINCH";
+
+    DevicePolicy = "closed";
+    DeviceAllow = map (d: "${d.node} ${d.modifier}") cfg.allowedDevices;
+  };
+
+
+  system = config.nixpkgs.localSystem.system;
+
+  bindMountOpts = { name, ... }: {
+
+    options = {
+      mountPoint = mkOption {
+        example = "/mnt/usb";
+        type = types.str;
+        description = "Mount point on the container file system.";
+      };
+      hostPath = mkOption {
+        default = null;
+        example = "/home/alice";
+        type = types.nullOr types.str;
+        description = "Location of the host path to be mounted.";
+      };
+      isReadOnly = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Determine whether the mounted path will be accessed in read-only mode.";
+      };
+    };
+
+    config = {
+      mountPoint = mkDefault name;
+    };
+
+  };
+
+  allowedDeviceOpts = { ... }: {
+    options = {
+      node = mkOption {
+        example = "/dev/net/tun";
+        type = types.str;
+        description = "Path to device node";
+      };
+      modifier = mkOption {
+        example = "rw";
+        type = types.str;
+        description = ''
+          Device node access modifier. Takes a combination
+          <literal>r</literal> (read), <literal>w</literal> (write), and
+          <literal>m</literal> (mknod). See the
+          <literal>systemd.resource-control(5)</literal> man page for more
+          information.'';
+      };
+    };
+  };
+
+
+  mkBindFlag = d:
+               let flagPrefix = if d.isReadOnly then " --bind-ro=" else " --bind=";
+                   mountstr = if d.hostPath != null then "${d.hostPath}:${d.mountPoint}" else "${d.mountPoint}";
+               in flagPrefix + mountstr ;
+
+  mkBindFlags = bs: concatMapStrings mkBindFlag (lib.attrValues bs);
+
+  networkOptions = {
+    hostBridge = mkOption {
+      type = types.nullOr types.string;
+      default = null;
+      example = "br0";
+      description = ''
+        Put the host-side of the veth-pair into the named bridge.
+        Only one of hostAddress* or hostBridge can be given.
+      '';
+    };
+
+    forwardPorts = mkOption {
+      type = types.listOf (types.submodule {
+        options = {
+          protocol = mkOption {
+            type = types.str;
+            default = "tcp";
+            description = "The protocol specifier for port forwarding between host and container";
+          };
+          hostPort = mkOption {
+            type = types.int;
+            description = "Source port of the external interface on host";
+          };
+          containerPort = mkOption {
+            type = types.nullOr types.int;
+            default = null;
+            description = "Target port of container";
+          };
+        };
+      });
+      default = [];
+      example = [ { protocol = "tcp"; hostPort = 8080; containerPort = 80; } ];
+      description = ''
+        List of forwarded ports from host to container. Each forwarded port
+        is specified by protocol, hostPort and containerPort. By default,
+        protocol is tcp and hostPort and containerPort are assumed to be
+        the same if containerPort is not explicitly given.
+      '';
+    };
+
+
+    hostAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.1";
+      description = ''
+        The IPv4 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    hostAddress6 = mkOption {
+      type = types.nullOr types.string;
+      default = null;
+      example = "fc00::1";
+      description = ''
+        The IPv6 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    localAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.2";
+      description = ''
+        The IPv4 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /32 and routing is
+        set up from localAddress to hostAddress and back.
+      '';
+    };
+
+    localAddress6 = mkOption {
+      type = types.nullOr types.string;
+      default = null;
+      example = "fc00::2";
+      description = ''
+        The IPv6 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /128 and routing is
+        set up from localAddress6 to hostAddress6 and back.
+      '';
+    };
+
+  };
+
+  dummyConfig =
+    {
+      extraVeths = {};
+      additionalCapabilities = [];
+      allowedDevices = [];
+      hostAddress = null;
+      hostAddress6 = null;
+      localAddress = null;
+      localAddress6 = null;
+      tmpfs = null;
+    };
+
+in
+
+{
+  options = {
+
+    boot.isContainer = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether this NixOS machine is a lightweight container running
+        in another NixOS system.
+      '';
+    };
+
+    boot.enableContainers = mkOption {
+      type = types.bool;
+      default = !config.boot.isContainer;
+      description = ''
+        Whether to enable support for NixOS containers.
+      '';
+    };
+
+    containers = mkOption {
+      type = types.attrsOf (types.submodule (
+        { config, options, name, ... }:
+        {
+          options = {
+
+            config = mkOption {
+              description = ''
+                A specification of the desired configuration of this
+                container, as a NixOS module.
+              '';
+              type = lib.mkOptionType {
+                name = "Toplevel NixOS config";
+                merge = loc: defs: (import ../../lib/eval-config.nix {
+                  inherit system;
+                  modules =
+                    let
+                      extraConfig = {
+                        _file = "module at ${__curPos.file}:${toString __curPos.line}";
+                        config = {
+                          boot.isContainer = true;
+                          networking.hostName = mkDefault name;
+                          networking.useDHCP = false;
+                          assertions = [
+                            {
+                              assertion =  config.privateNetwork -> stringLength name < 12;
+                              message = ''
+                                Container name `${name}` is too long: When `privateNetwork` is enabled, container names can
+                                not be longer than 11 characters, because the container's interface name is derived from it.
+                                This might be fixed in the future. See https://github.com/NixOS/nixpkgs/issues/38509
+                              '';
+                            }
+                          ];
+                        };
+                      };
+                    in [ extraConfig ] ++ (map (x: x.value) defs);
+                  prefix = [ "containers" name ];
+                }).config;
+              };
+            };
+
+            path = mkOption {
+              type = types.path;
+              example = "/nix/var/nix/profiles/containers/webserver";
+              description = ''
+                As an alternative to specifying
+                <option>config</option>, you can specify the path to
+                the evaluated NixOS system configuration, typically a
+                symlink to a system profile.
+              '';
+            };
+
+            additionalCapabilities = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "CAP_NET_ADMIN" "CAP_MKNOD" ];
+              description = ''
+                Grant additional capabilities to the container.  See the
+                capabilities(7) and systemd-nspawn(1) man pages for more
+                information.
+              '';
+            };
+            enableTun = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Allows the container to create and setup tunnel interfaces
+                by granting the <literal>NET_ADMIN</literal> capability and
+                enabling access to <literal>/dev/net/tun</literal>.
+              '';
+            };
+
+            privateNetwork = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Whether to give the container its own private virtual
+                Ethernet interface.  The interface is called
+                <literal>eth0</literal>, and is hooked up to the interface
+                <literal>ve-<replaceable>container-name</replaceable></literal>
+                on the host.  If this option is not set, then the
+                container shares the network interfaces of the host,
+                and can bind to any port on any interface.
+              '';
+            };
+
+            interfaces = mkOption {
+              type = types.listOf types.string;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = ''
+                The list of interfaces to be moved into the container.
+              '';
+            };
+
+            macvlans = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = ''
+                The list of host interfaces from which macvlans will be
+                created. For each interface specified, a macvlan interface
+                will be created and moved to the container.
+              '';
+            };
+
+            extraVeths = mkOption {
+              type = with types; attrsOf (submodule { options = networkOptions; });
+              default = {};
+              description = ''
+                Extra veth-pairs to be created for the container
+              '';
+            };
+
+            autoStart = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Whether the container is automatically started at boot-time.
+              '';
+            };
+
+            bindMounts = mkOption {
+              type = with types; loaOf (submodule bindMountOpts);
+              default = {};
+              example = { "/home" = { hostPath = "/home/alice";
+                                      isReadOnly = false; };
+                        };
+
+              description =
+                ''
+                  An extra list of directories that is bound to the container.
+                '';
+            };
+
+            allowedDevices = mkOption {
+              type = with types; listOf (submodule allowedDeviceOpts);
+              default = [];
+              example = [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              description = ''
+                A list of device nodes to which the containers has access to.
+              '';
+            };
+
+            tmpfs = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "/var" ];
+              description = ''
+                Mounts a set of tmpfs file systems into the container.
+                Multiple paths can be specified.
+                Valid items must conform to the --tmpfs argument
+                of systemd-nspawn. See systemd-nspawn(1) for details.
+              '';
+            };
+
+            extraFlags = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "--drop-capability=CAP_SYS_CHROOT" ];
+              description = ''
+                Extra flags passed to the systemd-nspawn command.
+                See systemd-nspawn(1) for details.
+              '';
+            };
+
+          } // networkOptions;
+
+          config = mkMerge
+            [
+              (mkIf options.config.isDefined {
+                path = config.config.system.build.toplevel;
+              })
+            ];
+        }));
+
+      default = {};
+      example = literalExample
+        ''
+          { webserver =
+              { path = "/nix/var/nix/profiles/webserver";
+              };
+            database =
+              { config =
+                  { config, pkgs, ... }:
+                  { services.postgresql.enable = true;
+                    services.postgresql.package = pkgs.postgresql_9_6;
+
+                    system.stateVersion = "17.03";
+                  };
+              };
+          }
+        '';
+      description = ''
+        A set of NixOS system configurations to be run as lightweight
+        containers.  Each container appears as a service
+        <literal>container-<replaceable>name</replaceable></literal>
+        on the host system, allowing it to be started and stopped via
+        <command>systemctl</command>.
+      '';
+    };
+
+  };
+
+
+  config = mkIf (config.boot.enableContainers) (let
+
+    unit = {
+      description = "Container '%i'";
+
+      unitConfig.RequiresMountsFor = [ "/var/lib/containers/%i" ];
+
+      path = [ pkgs.iproute ];
+
+      environment.INSTANCE = "%i";
+      environment.root = "/var/lib/containers/%i";
+
+      preStart = preStartScript dummyConfig;
+
+      script = startScript dummyConfig;
+
+      postStart = postStartScript dummyConfig;
+
+      preStop =
+        ''
+          pid="$(cat /run/containers/$INSTANCE.pid)"
+          if [ -n "$pid" ]; then
+            kill -RTMIN+4 "$pid"
+          fi
+          rm -f "/run/containers/$INSTANCE.pid"
+        '';
+
+      restartIfChanged = false;
+
+      serviceConfig = serviceDirectives dummyConfig;
+    };
+  in {
+    systemd.targets."multi-user".wants = [ "machines.target" ];
+
+    systemd.services = listToAttrs (filter (x: x.value != null) (
+      # The generic container template used by imperative containers
+      [{ name = "container@"; value = unit; }]
+      # declarative containers
+      ++ (mapAttrsToList (name: cfg: nameValuePair "container@${name}" (let
+          containerConfig = cfg // (
+          if cfg.enableTun then
+            {
+              allowedDevices = cfg.allowedDevices
+                ++ [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              additionalCapabilities = cfg.additionalCapabilities
+                ++ [ "CAP_NET_ADMIN" ];
+            }
+          else {});
+        in
+          unit // {
+            preStart = preStartScript containerConfig;
+            script = startScript containerConfig;
+            postStart = postStartScript containerConfig;
+            serviceConfig = serviceDirectives containerConfig;
+          } // (
+          if containerConfig.autoStart then
+            {
+              wantedBy = [ "machines.target" ];
+              wants = [ "network.target" ];
+              after = [ "network.target" ];
+              restartTriggers = [
+                containerConfig.path
+                config.environment.etc."containers/${name}.conf".source
+              ];
+              restartIfChanged = true;
+            }
+          else {})
+      )) config.containers)
+    ));
+
+    # Generate a configuration file in /etc/containers for each
+    # container so that container@.target can get the container
+    # configuration.
+    environment.etc =
+      let mkPortStr = p: p.protocol + ":" + (toString p.hostPort) + ":" + (if p.containerPort == null then toString p.hostPort else toString p.containerPort);
+      in mapAttrs' (name: cfg: nameValuePair "containers/${name}.conf"
+      { text =
+          ''
+            SYSTEM_PATH=${cfg.path}
+            ${optionalString cfg.privateNetwork ''
+              PRIVATE_NETWORK=1
+              ${optionalString (cfg.hostBridge != null) ''
+                HOST_BRIDGE=${cfg.hostBridge}
+              ''}
+              ${optionalString (length cfg.forwardPorts > 0) ''
+                HOST_PORT=${concatStringsSep "," (map mkPortStr cfg.forwardPorts)}
+              ''}
+              ${optionalString (cfg.hostAddress != null) ''
+                HOST_ADDRESS=${cfg.hostAddress}
+              ''}
+              ${optionalString (cfg.hostAddress6 != null) ''
+                HOST_ADDRESS6=${cfg.hostAddress6}
+              ''}
+              ${optionalString (cfg.localAddress != null) ''
+                LOCAL_ADDRESS=${cfg.localAddress}
+              ''}
+              ${optionalString (cfg.localAddress6 != null) ''
+                LOCAL_ADDRESS6=${cfg.localAddress6}
+              ''}
+            ''}
+            INTERFACES="${toString cfg.interfaces}"
+            MACVLANS="${toString cfg.macvlans}"
+            ${optionalString cfg.autoStart ''
+              AUTO_START=1
+            ''}
+            EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts +
+              optionalString (cfg.extraFlags != [])
+                (" " + concatStringsSep " " cfg.extraFlags)}"
+          '';
+      }) config.containers;
+
+    # Generate /etc/hosts entries for the containers.
+    networking.extraHosts = concatStrings (mapAttrsToList (name: cfg: optionalString (cfg.localAddress != null)
+      ''
+        ${head (splitString "/" cfg.localAddress)} ${name}.containers
+      '') config.containers);
+
+    networking.dhcpcd.denyInterfaces = [ "ve-*" "vb-*" ];
+
+    services.udev.extraRules = optionalString config.networking.networkmanager.enable ''
+      # Don't manage interfaces created by nixos-container.
+      ENV{INTERFACE}=="v[eb]-*", ENV{NM_UNMANAGED}="1"
+    '';
+
+    environment.systemPackages = [ pkgs.nixos-container ];
+  });
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/docker-containers.nix b/nixpkgs/nixos/modules/virtualisation/docker-containers.nix
new file mode 100644
index 000000000000..59b0943f591f
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker-containers.nix
@@ -0,0 +1,230 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.docker-containers;
+
+  dockerContainer =
+    { ... }: {
+
+      options = {
+
+        image = mkOption {
+          type = types.str;
+          description = "Docker image to run.";
+          example = "library/hello-world";
+        };
+
+        cmd = mkOption {
+          type =  with types; listOf str;
+          default = [];
+          description = "Commandline arguments to pass to the image's entrypoint.";
+          example = literalExample ''
+            ["--port=9000"]
+          '';
+        };
+
+        entrypoint = mkOption {
+          type = with types; nullOr str;
+          description = "Overwrite the default entrypoint of the image.";
+          default = null;
+          example = "/bin/my-app";
+        };
+
+        environment = mkOption {
+          type = with types; attrsOf str;
+          default = {};
+          description = "Environment variables to set for this container.";
+          example = literalExample ''
+            {
+              DATABASE_HOST = "db.example.com";
+              DATABASE_PORT = "3306";
+            }
+        '';
+        };
+
+        log-driver = mkOption {
+          type = types.str;
+          default = "none";
+          description = ''
+            Logging driver for the container.  The default of
+            <literal>"none"</literal> means that the container's logs will be
+            handled as part of the systemd unit.  Setting this to
+            <literal>"journald"</literal> will result in duplicate logging, but
+            the container's logs will be visible to the <command>docker
+            logs</command> command.
+
+            For more details and a full list of logging drivers, refer to the
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver">
+            Docker engine documentation</link>
+          '';
+        };
+
+        ports = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = ''
+            Network ports to publish from the container to the outer host.
+
+            Valid formats:
+
+            <itemizedlist>
+              <listitem>
+                <para>
+                  <literal>&lt;ip&gt;:&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <literal>&lt;ip&gt;::&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <literal>&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <literal>&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+            </itemizedlist>
+
+            Both <literal>hostPort</literal> and
+            <literal>containerPort</literal> can be specified as a range of
+            ports.  When specifying ranges for both, the number of container
+            ports in the range must match the number of host ports in the
+            range.  Example: <literal>1234-1236:1234-1236/tcp</literal>
+
+            When specifying a range for <literal>hostPort</literal> only, the
+            <literal>containerPort</literal> must <emphasis>not</emphasis> be a
+            range.  In this case, the container port is published somewhere
+            within the specified <literal>hostPort</literal> range.  Example:
+            <literal>1234-1236:1234/tcp</literal>
+
+            Refer to the
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#expose-incoming-ports">
+            Docker engine documentation</link> for full details.
+          '';
+          example = literalExample ''
+            [
+              "8080:9000"
+            ]
+          '';
+        };
+
+        user = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = ''
+            Override the username or UID (and optionally groupname or GID) used
+            in the container.
+          '';
+          example = "nobody:nogroup";
+        };
+
+        volumes = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = ''
+            List of volumes to attach to this container.
+
+            Note that this is a list of <literal>"src:dst"</literal> strings to
+            allow for <literal>src</literal> to refer to
+            <literal>/nix/store</literal> paths, which would difficult with an
+            attribute set.  There are also a variety of mount options available
+            as a third field; please refer to the
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#volume-shared-filesystems">
+            docker engine documentation</link> for details.
+          '';
+          example = literalExample ''
+            [
+              "volume_name:/path/inside/container"
+              "/path/on/host:/path/inside/container"
+            ]
+          '';
+        };
+
+        workdir = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = "Override the default working directory for the container.";
+          example = "/var/lib/hello_world";
+        };
+
+        extraDockerOptions = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = "Extra options for <command>docker run</command>.";
+          example = literalExample ''
+            ["--network=host"]
+          '';
+        };
+      };
+    };
+
+  mkService = name: container: {
+    wantedBy = [ "multi-user.target" ];
+    after = [ "docker.service" "docker.socket" ];
+    requires = [ "docker.service" "docker.socket" ];
+    serviceConfig = {
+      ExecStart = concatStringsSep " \\\n  " ([
+        "${pkgs.docker}/bin/docker run"
+        "--rm"
+        "--name=%n"
+        "--log-driver=${container.log-driver}"
+      ] ++ optional (container.entrypoint != null)
+        "--entrypoint=${escapeShellArg container.entrypoint}"
+        ++ (mapAttrsToList (k: v: "-e ${escapeShellArg k}=${escapeShellArg v}") container.environment)
+        ++ map (p: "-p ${escapeShellArg p}") container.ports
+        ++ optional (container.user != null) "-u ${escapeShellArg container.user}"
+        ++ map (v: "-v ${escapeShellArg v}") container.volumes
+        ++ optional (container.workdir != null) "-w ${escapeShellArg container.workdir}"
+        ++ map escapeShellArg container.extraDockerOptions
+        ++ [container.image]
+        ++ map escapeShellArg container.cmd
+      );
+      ExecStartPre = "-${pkgs.docker}/bin/docker rm -f %n";
+      ExecStop = "${pkgs.docker}/bin/docker stop %n";
+      ExecStopPost = "-${pkgs.docker}/bin/docker rm -f %n";
+
+      ### There is no generalized way of supporting `reload` for docker
+      ### containers. Some containers may respond well to SIGHUP sent to their
+      ### init process, but it is not guaranteed; some apps have other reload
+      ### mechanisms, some don't have a reload signal at all, and some docker
+      ### images just have broken signal handling.  The best compromise in this
+      ### case is probably to leave ExecReload undefined, so `systemctl reload`
+      ### will at least result in an error instead of potentially undefined
+      ### behaviour.
+      ###
+      ### Advanced users can still override this part of the unit to implement
+      ### a custom reload handler, since the result of all this is a normal
+      ### systemd service from the perspective of the NixOS module system.
+      ###
+      # ExecReload = ...;
+      ###
+
+      TimeoutStartSec = 0;
+      TimeoutStopSec = 120;
+      Restart = "always";
+    };
+  };
+
+in {
+
+  options.docker-containers = mkOption {
+    default = {};
+    type = types.attrsOf (types.submodule dockerContainer);
+    description = "Docker containers to run as systemd services.";
+  };
+
+  config = mkIf (cfg != {}) {
+
+    systemd.services = mapAttrs' (n: v: nameValuePair "docker-${n}" (mkService n v)) cfg;
+
+    virtualisation.docker.enable = true;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/docker-image.nix b/nixpkgs/nixos/modules/virtualisation/docker-image.nix
new file mode 100644
index 000000000000..baac3a35a78e
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker-image.nix
@@ -0,0 +1,57 @@
+{ ... }:
+
+{
+  imports = [
+    ../profiles/docker-container.nix # FIXME, shouldn't include something from profiles/
+  ];
+
+  boot.postBootCommands =
+    ''
+      # Set virtualisation to docker
+      echo "docker" > /run/systemd/container
+    '';
+
+  # Iptables do not work in Docker.
+  networking.firewall.enable = false;
+
+  # Socket activated ssh presents problem in Docker.
+  services.openssh.startWhenNeeded = false;
+}
+
+# Example usage:
+#
+## default.nix
+# let
+#   nixos = import <nixpkgs/nixos> {
+#     configuration = ./configuration.nix;
+#     system = "x86_64-linux";
+#   };
+# in
+# nixos.config.system.build.tarball
+#
+## configuration.nix
+# { pkgs, config, lib, ... }:
+# {
+#   imports = [
+#     <nixpkgs/nixos/modules/virtualisation/docker-image.nix>
+#     <nixpkgs/nixos/modules/installer/cd-dvd/channel.nix>
+#   ];
+#
+#   documentation.doc.enable = false;
+#
+#   environment.systemPackages = with pkgs; [
+#     bashInteractive
+#     cacert
+#     nix
+#   ];
+# }
+#
+## Run
+# Build the tarball:
+# $ nix-build default.nix
+# Load into docker:
+# $ docker import result/tarball/nixos-system-*.tar.xz nixos-docker
+# Boots into systemd
+# $ docker run --privileged -it nixos-docker /init
+# Log into the container
+# $ docker exec -it <container-name> /run/current-system/sw/bin/bash
diff --git a/nixpkgs/nixos/modules/virtualisation/docker-preloader.nix b/nixpkgs/nixos/modules/virtualisation/docker-preloader.nix
new file mode 100644
index 000000000000..6ab83058dee1
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker-preloader.nix
@@ -0,0 +1,134 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+with builtins;
+
+let
+  cfg = config.virtualisation;
+
+  sanitizeImageName = image: replaceStrings ["/"] ["-"] image.imageName;
+  hash = drv: head (split "-" (baseNameOf drv.outPath));
+  # The label of an ext4 FS is limited to 16 bytes
+  labelFromImage = image: substring 0 16 (hash image);
+
+  # The Docker image is loaded and some files from /var/lib/docker/
+  # are written into a qcow image.
+  preload = image: pkgs.vmTools.runInLinuxVM (
+    pkgs.runCommand "docker-preload-image-${sanitizeImageName image}" {
+      buildInputs = with pkgs; [ docker e2fsprogs utillinux curl kmod ];
+      preVM = pkgs.vmTools.createEmptyImage {
+        size = cfg.dockerPreloader.qcowSize;
+        fullName = "docker-deamon-image.qcow2";
+      };
+    }
+    ''
+      mkfs.ext4 /dev/vda
+      e2label /dev/vda ${labelFromImage image}
+      mkdir -p /var/lib/docker
+      mount -t ext4 /dev/vda /var/lib/docker
+
+      modprobe overlay
+
+      # from https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
+      mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
+      cd /sys/fs/cgroup
+      for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
+        mkdir -p $sys
+        if ! mountpoint -q $sys; then
+          if ! mount -n -t cgroup -o $sys cgroup $sys; then
+            rmdir $sys || true
+          fi
+        fi
+      done
+
+      dockerd -H tcp://127.0.0.1:5555 -H unix:///var/run/docker.sock &
+
+      until $(curl --output /dev/null --silent --connect-timeout 2 http://127.0.0.1:5555); do
+        printf '.'
+        sleep 1
+      done
+
+      docker load -i ${image}
+
+      kill %1
+      find /var/lib/docker/ -maxdepth 1 -mindepth 1 -not -name "image" -not -name "overlay2" | xargs rm -rf
+    '');
+
+  preloadedImages = map preload cfg.dockerPreloader.images;
+
+in
+
+{
+  options.virtualisation.dockerPreloader = {
+    images = mkOption {
+      default = [ ];
+      type = types.listOf types.package;
+      description =
+      ''
+        A list of Docker images to preload (in the /var/lib/docker directory).
+      '';
+    };
+    qcowSize = mkOption {
+      default = 1024;
+      type = types.int;
+      description =
+      ''
+        The size (MB) of qcow files.
+      '';
+    };
+  };
+
+  config = mkIf (cfg.dockerPreloader.images != []) {
+    assertions = [{
+      # If docker.storageDriver is null, Docker choose the storage
+      # driver. So, in this case, we cannot be sure overlay2 is used.
+      assertion = cfg.docker.storageDriver == "overlay2"
+        || cfg.docker.storageDriver == "overlay"
+        || cfg.docker.storageDriver == null;
+      message = "The Docker image Preloader only works with overlay2 storage driver!";
+    }];
+
+    virtualisation.qemu.options =
+      map (path: "-drive if=virtio,file=${path}/disk-image.qcow2,readonly,media=cdrom,format=qcow2")
+      preloadedImages;
+
+
+    # All attached QCOW files are mounted and their contents are linked
+    # to /var/lib/docker/ in order to make image available.
+    systemd.services.docker-preloader = {
+      description = "Preloaded Docker images";
+      wantedBy = ["docker.service"];
+      after = ["network.target"];
+      path = with pkgs; [ mount rsync jq ];
+      script = ''
+        mkdir -p /var/lib/docker/overlay2/l /var/lib/docker/image/overlay2
+        echo '{}' > /tmp/repositories.json
+
+        for i in ${concatStringsSep " " (map labelFromImage cfg.dockerPreloader.images)}; do
+          mkdir -p /mnt/docker-images/$i
+
+          # The ext4 label is limited to 16 bytes
+          mount /dev/disk/by-label/$(echo $i | cut -c1-16) -o ro,noload /mnt/docker-images/$i
+
+          find /mnt/docker-images/$i/overlay2/ -maxdepth 1 -mindepth 1 -not -name l\
+             -exec ln -s '{}' /var/lib/docker/overlay2/ \;
+          cp -P /mnt/docker-images/$i/overlay2/l/* /var/lib/docker/overlay2/l/
+
+          rsync -a /mnt/docker-images/$i/image/ /var/lib/docker/image/
+
+          # Accumulate image definitions
+          cp /tmp/repositories.json /tmp/repositories.json.tmp
+          jq -s '.[0] * .[1]' \
+            /tmp/repositories.json.tmp \
+            /mnt/docker-images/$i/image/overlay2/repositories.json \
+            > /tmp/repositories.json
+        done
+
+        mv /tmp/repositories.json /var/lib/docker/image/overlay2/repositories.json
+      '';
+      serviceConfig = {
+        Type = "oneshot";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/docker.nix b/nixpkgs/nixos/modules/virtualisation/docker.nix
new file mode 100644
index 000000000000..7d196a46276a
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker.nix
@@ -0,0 +1,223 @@
+# Systemd services for docker.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.docker;
+  proxy_env = config.networking.proxy.envVars;
+
+in
+
+{
+  ###### interface
+
+  options.virtualisation.docker = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            This option enables docker, a daemon that manages
+            linux containers. Users in the "docker" group can interact with
+            the daemon (e.g. to start or stop containers) using the
+            <command>docker</command> command line tool.
+          '';
+      };
+
+    listenOptions =
+      mkOption {
+        type = types.listOf types.str;
+        default = ["/run/docker.sock"];
+        description =
+          ''
+            A list of unix and tcp docker should listen to. The format follows
+            ListenStream as described in systemd.socket(5).
+          '';
+      };
+
+    enableOnBoot =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            When enabled dockerd is started on boot. This is required for
+            containers which are created with the
+            <literal>--restart=always</literal> flag to work. If this option is
+            disabled, docker might be started on demand by socket activation.
+          '';
+      };
+
+    enableNvidia =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable nvidia-docker wrapper, supporting NVIDIA GPUs inside docker containers.
+        '';
+      };
+
+    liveRestore =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            Allow dockerd to be restarted without affecting running container.
+            This option is incompatible with docker swarm.
+          '';
+      };
+
+    storageDriver =
+      mkOption {
+        type = types.nullOr (types.enum ["aufs" "btrfs" "devicemapper" "overlay" "overlay2" "zfs"]);
+        default = null;
+        description =
+          ''
+            This option determines which Docker storage driver to use. By default
+            it let's docker automatically choose preferred storage driver.
+          '';
+      };
+
+    logDriver =
+      mkOption {
+        type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs"];
+        default = "journald";
+        description =
+          ''
+            This option determines which Docker log driver to use.
+          '';
+      };
+
+    extraOptions =
+      mkOption {
+        type = types.separatedString " ";
+        default = "";
+        description =
+          ''
+            The extra command-line options to pass to
+            <command>docker</command> daemon.
+          '';
+      };
+
+    autoPrune = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to periodically prune Docker resources. If enabled, a
+          systemd timer will run <literal>docker system prune -f</literal>
+          as specified by the <literal>dates</literal> option.
+        '';
+      };
+
+      flags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--all" ];
+        description = ''
+          Any additional flags passed to <command>docker system prune</command>.
+        '';
+      };
+
+      dates = mkOption {
+        default = "weekly";
+        type = types.str;
+        description = ''
+          Specification (in the format described by
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>7</manvolnum></citerefentry>) of the time at
+          which the prune will occur.
+        '';
+      };
+    };
+
+    package = mkOption {
+      default = pkgs.docker;
+      type = types.package;
+      example = pkgs.docker-edge;
+      description = ''
+        Docker package to be used in the module.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [{
+      environment.systemPackages = [ cfg.package ]
+        ++ optional cfg.enableNvidia pkgs.nvidia-docker;
+      users.groups.docker.gid = config.ids.gids.docker;
+      systemd.packages = [ cfg.package ];
+
+      systemd.services.docker = {
+        wantedBy = optional cfg.enableOnBoot "multi-user.target";
+        environment = proxy_env;
+        serviceConfig = {
+          ExecStart = [
+            ""
+            ''
+              ${cfg.package}/bin/dockerd \
+                --group=docker \
+                --host=fd:// \
+                --log-driver=${cfg.logDriver} \
+                ${optionalString (cfg.storageDriver != null) "--storage-driver=${cfg.storageDriver}"} \
+                ${optionalString cfg.liveRestore "--live-restore" } \
+                ${optionalString cfg.enableNvidia "--add-runtime nvidia=${pkgs.nvidia-docker}/bin/nvidia-container-runtime" } \
+                ${cfg.extraOptions}
+            ''];
+          ExecReload=[
+            ""
+            "${pkgs.procps}/bin/kill -s HUP $MAINPID"
+          ];
+        };
+
+        path = [ pkgs.kmod ] ++ optional (cfg.storageDriver == "zfs") pkgs.zfs
+          ++ optional cfg.enableNvidia pkgs.nvidia-docker;
+      };
+
+      systemd.sockets.docker = {
+        description = "Docker Socket for the API";
+        wantedBy = [ "sockets.target" ];
+        socketConfig = {
+          ListenStream = cfg.listenOptions;
+          SocketMode = "0660";
+          SocketUser = "root";
+          SocketGroup = "docker";
+        };
+      };
+
+      systemd.services.docker-prune = {
+        description = "Prune docker resources";
+
+        restartIfChanged = false;
+        unitConfig.X-StopOnRemoval = false;
+
+        serviceConfig.Type = "oneshot";
+
+        script = ''
+          ${cfg.package}/bin/docker system prune -f ${toString cfg.autoPrune.flags}
+        '';
+
+        startAt = optional cfg.autoPrune.enable cfg.autoPrune.dates;
+      };
+
+      assertions = [
+        { assertion = cfg.enableNvidia -> config.hardware.opengl.driSupport32Bit or false;
+          message = "Option enableNvidia requires 32bit support libraries";
+        }];
+    }
+    (mkIf cfg.enableNvidia {
+      environment.etc."nvidia-container-runtime/config.toml".source = "${pkgs.nvidia-docker}/etc/config.toml";
+    })
+  ]);
+
+  imports = [
+    (mkRemovedOptionModule ["virtualisation" "docker" "socketActivation"] "This option was removed in favor of starting docker at boot")
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/ec2-amis.nix b/nixpkgs/nixos/modules/virtualisation/ec2-amis.nix
new file mode 100644
index 000000000000..f640bb21b133
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ec2-amis.nix
@@ -0,0 +1,295 @@
+let self = {
+  "14.04".ap-northeast-1.hvm-ebs = "ami-71c6f470";
+  "14.04".ap-northeast-1.pv-ebs = "ami-4dcbf84c";
+  "14.04".ap-northeast-1.pv-s3 = "ami-8fc4f68e";
+  "14.04".ap-southeast-1.hvm-ebs = "ami-da280888";
+  "14.04".ap-southeast-1.pv-ebs = "ami-7a9dbc28";
+  "14.04".ap-southeast-1.pv-s3 = "ami-c4290996";
+  "14.04".ap-southeast-2.hvm-ebs = "ami-ab523e91";
+  "14.04".ap-southeast-2.pv-ebs = "ami-6769055d";
+  "14.04".ap-southeast-2.pv-s3 = "ami-15533f2f";
+  "14.04".eu-central-1.hvm-ebs = "ami-ba0234a7";
+  "14.04".eu-west-1.hvm-ebs = "ami-96cb63e1";
+  "14.04".eu-west-1.pv-ebs = "ami-b48c25c3";
+  "14.04".eu-west-1.pv-s3 = "ami-06cd6571";
+  "14.04".sa-east-1.hvm-ebs = "ami-01b90e1c";
+  "14.04".sa-east-1.pv-ebs = "ami-69e35474";
+  "14.04".sa-east-1.pv-s3 = "ami-61b90e7c";
+  "14.04".us-east-1.hvm-ebs = "ami-58ba3a30";
+  "14.04".us-east-1.pv-ebs = "ami-9e0583f6";
+  "14.04".us-east-1.pv-s3 = "ami-9cbe3ef4";
+  "14.04".us-west-1.hvm-ebs = "ami-0bc3d74e";
+  "14.04".us-west-1.pv-ebs = "ami-8b1703ce";
+  "14.04".us-west-1.pv-s3 = "ami-27ccd862";
+  "14.04".us-west-2.hvm-ebs = "ami-3bf1bf0b";
+  "14.04".us-west-2.pv-ebs = "ami-259bd515";
+  "14.04".us-west-2.pv-s3 = "ami-07094037";
+
+  "14.12".ap-northeast-1.hvm-ebs = "ami-24435f25";
+  "14.12".ap-northeast-1.pv-ebs = "ami-b0425eb1";
+  "14.12".ap-northeast-1.pv-s3 = "ami-fed3c6ff";
+  "14.12".ap-southeast-1.hvm-ebs = "ami-6c765d3e";
+  "14.12".ap-southeast-1.pv-ebs = "ami-6a765d38";
+  "14.12".ap-southeast-1.pv-s3 = "ami-d1bf9183";
+  "14.12".ap-southeast-2.hvm-ebs = "ami-af86f395";
+  "14.12".ap-southeast-2.pv-ebs = "ami-b386f389";
+  "14.12".ap-southeast-2.pv-s3 = "ami-69c5ae53";
+  "14.12".eu-central-1.hvm-ebs = "ami-4a497a57";
+  "14.12".eu-central-1.pv-ebs = "ami-4c497a51";
+  "14.12".eu-central-1.pv-s3 = "ami-60f2c27d";
+  "14.12".eu-west-1.hvm-ebs = "ami-d126a5a6";
+  "14.12".eu-west-1.pv-ebs = "ami-0126a576";
+  "14.12".eu-west-1.pv-s3 = "ami-deda5fa9";
+  "14.12".sa-east-1.hvm-ebs = "ami-2d239e30";
+  "14.12".sa-east-1.pv-ebs = "ami-35239e28";
+  "14.12".sa-east-1.pv-s3 = "ami-81e3519c";
+  "14.12".us-east-1.hvm-ebs = "ami-0c463a64";
+  "14.12".us-east-1.pv-ebs = "ami-ac473bc4";
+  "14.12".us-east-1.pv-s3 = "ami-00e18a68";
+  "14.12".us-west-1.hvm-ebs = "ami-ca534a8f";
+  "14.12".us-west-1.pv-ebs = "ami-3e534a7b";
+  "14.12".us-west-1.pv-s3 = "ami-2905196c";
+  "14.12".us-west-2.hvm-ebs = "ami-fb9dc3cb";
+  "14.12".us-west-2.pv-ebs = "ami-899dc3b9";
+  "14.12".us-west-2.pv-s3 = "ami-cb7f2dfb";
+
+  "15.09".ap-northeast-1.hvm-ebs = "ami-58cac236";
+  "15.09".ap-northeast-1.hvm-s3 = "ami-39c8c057";
+  "15.09".ap-northeast-1.pv-ebs = "ami-5ac9c134";
+  "15.09".ap-northeast-1.pv-s3 = "ami-03cec66d";
+  "15.09".ap-southeast-1.hvm-ebs = "ami-2fc2094c";
+  "15.09".ap-southeast-1.hvm-s3 = "ami-9ec308fd";
+  "15.09".ap-southeast-1.pv-ebs = "ami-95c00bf6";
+  "15.09".ap-southeast-1.pv-s3 = "ami-bfc00bdc";
+  "15.09".ap-southeast-2.hvm-ebs = "ami-996c4cfa";
+  "15.09".ap-southeast-2.hvm-s3 = "ami-3f6e4e5c";
+  "15.09".ap-southeast-2.pv-ebs = "ami-066d4d65";
+  "15.09".ap-southeast-2.pv-s3 = "ami-cc6e4eaf";
+  "15.09".eu-central-1.hvm-ebs = "ami-3f8c6b50";
+  "15.09".eu-central-1.hvm-s3 = "ami-5b836434";
+  "15.09".eu-central-1.pv-ebs = "ami-118c6b7e";
+  "15.09".eu-central-1.pv-s3 = "ami-2c977043";
+  "15.09".eu-west-1.hvm-ebs = "ami-9cf04aef";
+  "15.09".eu-west-1.hvm-s3 = "ami-2bea5058";
+  "15.09".eu-west-1.pv-ebs = "ami-c9e852ba";
+  "15.09".eu-west-1.pv-s3 = "ami-c6f64cb5";
+  "15.09".sa-east-1.hvm-ebs = "ami-6e52df02";
+  "15.09".sa-east-1.hvm-s3 = "ami-1852df74";
+  "15.09".sa-east-1.pv-ebs = "ami-4368e52f";
+  "15.09".sa-east-1.pv-s3 = "ami-f15ad79d";
+  "15.09".us-east-1.hvm-ebs = "ami-84a6a0ee";
+  "15.09".us-east-1.hvm-s3 = "ami-06a7a16c";
+  "15.09".us-east-1.pv-ebs = "ami-a4a1a7ce";
+  "15.09".us-east-1.pv-s3 = "ami-5ba8ae31";
+  "15.09".us-west-1.hvm-ebs = "ami-22c8bb42";
+  "15.09".us-west-1.hvm-s3 = "ami-a2ccbfc2";
+  "15.09".us-west-1.pv-ebs = "ami-10cebd70";
+  "15.09".us-west-1.pv-s3 = "ami-fa30429a";
+  "15.09".us-west-2.hvm-ebs = "ami-ce57b9ae";
+  "15.09".us-west-2.hvm-s3 = "ami-2956b849";
+  "15.09".us-west-2.pv-ebs = "ami-005fb160";
+  "15.09".us-west-2.pv-s3 = "ami-cd55bbad";
+
+  "16.03".ap-northeast-1.hvm-ebs = "ami-40619d21";
+  "16.03".ap-northeast-1.hvm-s3 = "ami-ce629eaf";
+  "16.03".ap-northeast-1.pv-ebs = "ami-ef639f8e";
+  "16.03".ap-northeast-1.pv-s3 = "ami-a1609cc0";
+  "16.03".ap-northeast-2.hvm-ebs = "ami-deca00b0";
+  "16.03".ap-northeast-2.hvm-s3 = "ami-a3b77dcd";
+  "16.03".ap-northeast-2.pv-ebs = "ami-7bcb0115";
+  "16.03".ap-northeast-2.pv-s3 = "ami-a2b77dcc";
+  "16.03".ap-south-1.hvm-ebs = "ami-0dff9562";
+  "16.03".ap-south-1.hvm-s3 = "ami-13f69c7c";
+  "16.03".ap-south-1.pv-ebs = "ami-0ef39961";
+  "16.03".ap-south-1.pv-s3 = "ami-e0c8a28f";
+  "16.03".ap-southeast-1.hvm-ebs = "ami-5e964a3d";
+  "16.03".ap-southeast-1.hvm-s3 = "ami-4d964a2e";
+  "16.03".ap-southeast-1.pv-ebs = "ami-ec9b478f";
+  "16.03".ap-southeast-1.pv-s3 = "ami-999b47fa";
+  "16.03".ap-southeast-2.hvm-ebs = "ami-9f7359fc";
+  "16.03".ap-southeast-2.hvm-s3 = "ami-987359fb";
+  "16.03".ap-southeast-2.pv-ebs = "ami-a2705ac1";
+  "16.03".ap-southeast-2.pv-s3 = "ami-a3705ac0";
+  "16.03".eu-central-1.hvm-ebs = "ami-17a45178";
+  "16.03".eu-central-1.hvm-s3 = "ami-f9a55096";
+  "16.03".eu-central-1.pv-ebs = "ami-c8a550a7";
+  "16.03".eu-central-1.pv-s3 = "ami-6ea45101";
+  "16.03".eu-west-1.hvm-ebs = "ami-b5b3d5c6";
+  "16.03".eu-west-1.hvm-s3 = "ami-c986e0ba";
+  "16.03".eu-west-1.pv-ebs = "ami-b083e5c3";
+  "16.03".eu-west-1.pv-s3 = "ami-3c83e54f";
+  "16.03".sa-east-1.hvm-ebs = "ami-f6eb7f9a";
+  "16.03".sa-east-1.hvm-s3 = "ami-93e773ff";
+  "16.03".sa-east-1.pv-ebs = "ami-cbb82ca7";
+  "16.03".sa-east-1.pv-s3 = "ami-abb82cc7";
+  "16.03".us-east-1.hvm-ebs = "ami-c123a3d6";
+  "16.03".us-east-1.hvm-s3 = "ami-bc25a5ab";
+  "16.03".us-east-1.pv-ebs = "ami-bd25a5aa";
+  "16.03".us-east-1.pv-s3 = "ami-a325a5b4";
+  "16.03".us-west-1.hvm-ebs = "ami-748bcd14";
+  "16.03".us-west-1.hvm-s3 = "ami-a68dcbc6";
+  "16.03".us-west-1.pv-ebs = "ami-048acc64";
+  "16.03".us-west-1.pv-s3 = "ami-208dcb40";
+  "16.03".us-west-2.hvm-ebs = "ami-8263a0e2";
+  "16.03".us-west-2.hvm-s3 = "ami-925c9ff2";
+  "16.03".us-west-2.pv-ebs = "ami-5e61a23e";
+  "16.03".us-west-2.pv-s3 = "ami-734c8f13";
+
+  # 16.09.1508.3909827
+  "16.09".ap-northeast-1.hvm-ebs = "ami-68453b0f";
+  "16.09".ap-northeast-1.hvm-s3 = "ami-f9bec09e";
+  "16.09".ap-northeast-1.pv-ebs = "ami-254a3442";
+  "16.09".ap-northeast-1.pv-s3 = "ami-ef473988";
+  "16.09".ap-northeast-2.hvm-ebs = "ami-18ae7f76";
+  "16.09".ap-northeast-2.hvm-s3 = "ami-9eac7df0";
+  "16.09".ap-northeast-2.pv-ebs = "ami-57aa7b39";
+  "16.09".ap-northeast-2.pv-s3 = "ami-5cae7f32";
+  "16.09".ap-south-1.hvm-ebs = "ami-b3f98fdc";
+  "16.09".ap-south-1.hvm-s3 = "ami-98e690f7";
+  "16.09".ap-south-1.pv-ebs = "ami-aef98fc1";
+  "16.09".ap-south-1.pv-s3 = "ami-caf88ea5";
+  "16.09".ap-southeast-1.hvm-ebs = "ami-80fb51e3";
+  "16.09".ap-southeast-1.hvm-s3 = "ami-2df3594e";
+  "16.09".ap-southeast-1.pv-ebs = "ami-37f05a54";
+  "16.09".ap-southeast-1.pv-s3 = "ami-27f35944";
+  "16.09".ap-southeast-2.hvm-ebs = "ami-57ece834";
+  "16.09".ap-southeast-2.hvm-s3 = "ami-87f4f0e4";
+  "16.09".ap-southeast-2.pv-ebs = "ami-d8ede9bb";
+  "16.09".ap-southeast-2.pv-s3 = "ami-a6ebefc5";
+  "16.09".ca-central-1.hvm-ebs = "ami-9f863bfb";
+  "16.09".ca-central-1.hvm-s3 = "ami-ea85388e";
+  "16.09".ca-central-1.pv-ebs = "ami-ce8a37aa";
+  "16.09".ca-central-1.pv-s3 = "ami-448a3720";
+  "16.09".eu-central-1.hvm-ebs = "ami-1b884774";
+  "16.09".eu-central-1.hvm-s3 = "ami-b08c43df";
+  "16.09".eu-central-1.pv-ebs = "ami-888946e7";
+  "16.09".eu-central-1.pv-s3 = "ami-06874869";
+  "16.09".eu-west-1.hvm-ebs = "ami-1ed3e76d";
+  "16.09".eu-west-1.hvm-s3 = "ami-73d1e500";
+  "16.09".eu-west-1.pv-ebs = "ami-44c0f437";
+  "16.09".eu-west-1.pv-s3 = "ami-f3d8ec80";
+  "16.09".eu-west-2.hvm-ebs = "ami-2c9c9648";
+  "16.09".eu-west-2.hvm-s3 = "ami-6b9e940f";
+  "16.09".eu-west-2.pv-ebs = "ami-f1999395";
+  "16.09".eu-west-2.pv-s3 = "ami-bb9f95df";
+  "16.09".sa-east-1.hvm-ebs = "ami-a11882cd";
+  "16.09".sa-east-1.hvm-s3 = "ami-7726bc1b";
+  "16.09".sa-east-1.pv-ebs = "ami-9725bffb";
+  "16.09".sa-east-1.pv-s3 = "ami-b027bddc";
+  "16.09".us-east-1.hvm-ebs = "ami-854ca593";
+  "16.09".us-east-1.hvm-s3 = "ami-2241a834";
+  "16.09".us-east-1.pv-ebs = "ami-a441a8b2";
+  "16.09".us-east-1.pv-s3 = "ami-e841a8fe";
+  "16.09".us-east-2.hvm-ebs = "ami-3f41645a";
+  "16.09".us-east-2.hvm-s3 = "ami-804065e5";
+  "16.09".us-east-2.pv-ebs = "ami-f1466394";
+  "16.09".us-east-2.pv-s3 = "ami-05426760";
+  "16.09".us-west-1.hvm-ebs = "ami-c2efbca2";
+  "16.09".us-west-1.hvm-s3 = "ami-d71042b7";
+  "16.09".us-west-1.pv-ebs = "ami-04e8bb64";
+  "16.09".us-west-1.pv-s3 = "ami-31e9ba51";
+  "16.09".us-west-2.hvm-ebs = "ami-6449f504";
+  "16.09".us-west-2.hvm-s3 = "ami-344af654";
+  "16.09".us-west-2.pv-ebs = "ami-6d4af60d";
+  "16.09".us-west-2.pv-s3 = "ami-de48f4be";
+
+  # 17.03.885.6024dd4067
+  "17.03".ap-northeast-1.hvm-ebs = "ami-dbd0f7bc";
+  "17.03".ap-northeast-1.hvm-s3 = "ami-7cdff81b";
+  "17.03".ap-northeast-2.hvm-ebs = "ami-c59a48ab";
+  "17.03".ap-northeast-2.hvm-s3 = "ami-0b944665";
+  "17.03".ap-south-1.hvm-ebs = "ami-4f413220";
+  "17.03".ap-south-1.hvm-s3 = "ami-864033e9";
+  "17.03".ap-southeast-1.hvm-ebs = "ami-e08c3383";
+  "17.03".ap-southeast-1.hvm-s3 = "ami-c28f30a1";
+  "17.03".ap-southeast-2.hvm-ebs = "ami-fca9a69f";
+  "17.03".ap-southeast-2.hvm-s3 = "ami-3daaa55e";
+  "17.03".ca-central-1.hvm-ebs = "ami-9b00bdff";
+  "17.03".ca-central-1.hvm-s3 = "ami-e800bd8c";
+  "17.03".eu-central-1.hvm-ebs = "ami-5450803b";
+  "17.03".eu-central-1.hvm-s3 = "ami-6e2efe01";
+  "17.03".eu-west-1.hvm-ebs = "ami-10754c76";
+  "17.03".eu-west-1.hvm-s3 = "ami-11734a77";
+  "17.03".eu-west-2.hvm-ebs = "ami-ff1d099b";
+  "17.03".eu-west-2.hvm-s3 = "ami-fe1d099a";
+  "17.03".sa-east-1.hvm-ebs = "ami-d95d3eb5";
+  "17.03".sa-east-1.hvm-s3 = "ami-fca2c190";
+  "17.03".us-east-1.hvm-ebs = "ami-0940c61f";
+  "17.03".us-east-1.hvm-s3 = "ami-674fc971";
+  "17.03".us-east-2.hvm-ebs = "ami-afc2e6ca";
+  "17.03".us-east-2.hvm-s3 = "ami-a1cde9c4";
+  "17.03".us-west-1.hvm-ebs = "ami-587b2138";
+  "17.03".us-west-1.hvm-s3 = "ami-70411b10";
+  "17.03".us-west-2.hvm-ebs = "ami-a93daac9";
+  "17.03".us-west-2.hvm-s3 = "ami-5139ae31";
+
+  # 17.09.2681.59661f21be6
+  "17.09".eu-west-1.hvm-ebs = "ami-a30192da";
+  "17.09".eu-west-2.hvm-ebs = "ami-295a414d";
+  "17.09".eu-west-3.hvm-ebs = "ami-8c0eb9f1";
+  "17.09".eu-central-1.hvm-ebs = "ami-266cfe49";
+  "17.09".us-east-1.hvm-ebs = "ami-40bee63a";
+  "17.09".us-east-2.hvm-ebs = "ami-9d84aff8";
+  "17.09".us-west-1.hvm-ebs = "ami-d14142b1";
+  "17.09".us-west-2.hvm-ebs = "ami-3eb40346";
+  "17.09".ca-central-1.hvm-ebs = "ami-ca8207ae";
+  "17.09".ap-southeast-1.hvm-ebs = "ami-84bccff8";
+  "17.09".ap-southeast-2.hvm-ebs = "ami-0dc5386f";
+  "17.09".ap-northeast-1.hvm-ebs = "ami-89b921ef";
+  "17.09".ap-northeast-2.hvm-ebs = "ami-179b3b79";
+  "17.09".sa-east-1.hvm-ebs = "ami-4762202b";
+  "17.09".ap-south-1.hvm-ebs = "ami-4e376021";
+
+  # 18.03.132946.1caae7247b8
+  "18.03".eu-west-1.hvm-ebs = "ami-065c46ec";
+  "18.03".eu-west-2.hvm-ebs = "ami-64f31903";
+  "18.03".eu-west-3.hvm-ebs = "ami-5a8d3d27";
+  "18.03".eu-central-1.hvm-ebs = "ami-09faf9e2";
+  "18.03".us-east-1.hvm-ebs = "ami-8b3538f4";
+  "18.03".us-east-2.hvm-ebs = "ami-150b3170";
+  "18.03".us-west-1.hvm-ebs = "ami-ce06ebad";
+  "18.03".us-west-2.hvm-ebs = "ami-586c3520";
+  "18.03".ca-central-1.hvm-ebs = "ami-aca72ac8";
+  "18.03".ap-southeast-1.hvm-ebs = "ami-aa0b4d40";
+  "18.03".ap-southeast-2.hvm-ebs = "ami-d0f254b2";
+  "18.03".ap-northeast-1.hvm-ebs = "ami-456511a8";
+  "18.03".ap-northeast-2.hvm-ebs = "ami-3366d15d";
+  "18.03".sa-east-1.hvm-ebs = "ami-163e1f7a";
+  "18.03".ap-south-1.hvm-ebs = "ami-6a390b05";
+
+  # 18.09.910.c15e342304a
+  "18.09".eu-west-1.hvm-ebs = "ami-0f412186fb8a0ec97";
+  "18.09".eu-west-2.hvm-ebs = "ami-0dada3805ce43c55e";
+  "18.09".eu-west-3.hvm-ebs = "ami-074df85565f2e02e2";
+  "18.09".eu-central-1.hvm-ebs = "ami-07c9b884e679df4f8";
+  "18.09".us-east-1.hvm-ebs = "ami-009c9c3f1af480ff3";
+  "18.09".us-east-2.hvm-ebs = "ami-08199961085ea8bc6";
+  "18.09".us-west-1.hvm-ebs = "ami-07aa7f56d612ddd38";
+  "18.09".us-west-2.hvm-ebs = "ami-01c84b7c368ac24d1";
+  "18.09".ca-central-1.hvm-ebs = "ami-04f66113f76198f6c";
+  "18.09".ap-southeast-1.hvm-ebs = "ami-0892c7e24ebf2194f";
+  "18.09".ap-southeast-2.hvm-ebs = "ami-010730f36424b0a2c";
+  "18.09".ap-northeast-1.hvm-ebs = "ami-0cdba8e998f076547";
+  "18.09".ap-northeast-2.hvm-ebs = "ami-0400a698e6a9f4a15";
+  "18.09".sa-east-1.hvm-ebs = "ami-0e4a8a47fd6db6112";
+  "18.09".ap-south-1.hvm-ebs = "ami-0880a678d3f555313";
+
+  # 19.03.172286.8ea36d73256
+  "19.03".eu-west-1.hvm-ebs = "ami-0fe40176548ff0940";
+  "19.03".eu-west-2.hvm-ebs = "ami-03a40fd3a02fe95ba";
+  "19.03".eu-west-3.hvm-ebs = "ami-0436f9da0f20a638e";
+  "19.03".eu-central-1.hvm-ebs = "ami-0022b8ea9efde5de4";
+  "19.03".us-east-1.hvm-ebs = "ami-0efc58fb70ae9a217";
+  "19.03".us-east-2.hvm-ebs = "ami-0abf711b1b34da1af";
+  "19.03".us-west-1.hvm-ebs = "ami-07d126e8838c40ec5";
+  "19.03".us-west-2.hvm-ebs = "ami-03f8a737546e47fb0";
+  "19.03".ca-central-1.hvm-ebs = "ami-03f9fd0ef2e035ede";
+  "19.03".ap-southeast-1.hvm-ebs = "ami-0cff66114c652c262";
+  "19.03".ap-southeast-2.hvm-ebs = "ami-054c73a7f8d773ea9";
+  "19.03".ap-northeast-1.hvm-ebs = "ami-00db62688900456a4";
+  "19.03".ap-northeast-2.hvm-ebs = "ami-0485cdd1a5fdd2117";
+  "19.03".sa-east-1.hvm-ebs = "ami-0c6a43c6e0ad1f4e2";
+  "19.03".ap-south-1.hvm-ebs = "ami-0303deb1b5890f878";
+
+  latest = self."19.03";
+}; in self
diff --git a/nixpkgs/nixos/modules/virtualisation/ec2-data.nix b/nixpkgs/nixos/modules/virtualisation/ec2-data.nix
new file mode 100644
index 000000000000..db3dd9949c12
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ec2-data.nix
@@ -0,0 +1,87 @@
+# This module defines a systemd service that sets the SSH host key and
+# authorized client key and host name of virtual machines running on
+# Amazon EC2, Eucalyptus and OpenStack Compute (Nova).
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  config = {
+
+    systemd.services.apply-ec2-data =
+      { description = "Apply EC2 Data";
+
+        wantedBy = [ "multi-user.target" "sshd.service" ];
+        before = [ "sshd.service" ];
+
+        path = [ pkgs.iproute ];
+
+        script =
+          ''
+            ${optionalString (config.networking.hostName == "") ''
+              echo "setting host name..."
+              if [ -s /etc/ec2-metadata/hostname ]; then
+                  ${pkgs.nettools}/bin/hostname $(cat /etc/ec2-metadata/hostname)
+              fi
+            ''}
+
+            if ! [ -e /root/.ssh/authorized_keys ]; then
+                echo "obtaining SSH key..."
+                mkdir -m 0700 -p /root/.ssh
+                if [ -s /etc/ec2-metadata/public-keys-0-openssh-key ]; then
+                    cat /etc/ec2-metadata/public-keys-0-openssh-key >> /root/.ssh/authorized_keys
+                    echo "new key added to authorized_keys"
+                    chmod 600 /root/.ssh/authorized_keys
+                fi
+            fi
+
+            # Extract the intended SSH host key for this machine from
+            # the supplied user data, if available.  Otherwise sshd will
+            # generate one normally.
+            userData=/etc/ec2-metadata/user-data
+
+            mkdir -m 0755 -p /etc/ssh
+
+            if [ -s "$userData" ]; then
+              key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' $userData)"
+              key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' $userData)"
+              if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
+                  (umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
+                  echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
+              fi
+
+              key="$(sed 's/|/\n/g; s/SSH_HOST_ED25519_KEY://; t; d' $userData)"
+              key_pub="$(sed 's/SSH_HOST_ED25519_KEY_PUB://; t; d' $userData)"
+              if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_ed25519_key ]; then
+                  (umask 077; echo "$key" > /etc/ssh/ssh_host_ed25519_key)
+                  echo "$key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
+              fi
+            fi
+          '';
+
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+      };
+
+    systemd.services."print-host-key" =
+      { description = "Print SSH Host Key";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "sshd.service" ];
+        script =
+          ''
+            # Print the host public key on the console so that the user
+            # can obtain it securely by parsing the output of
+            # ec2-get-console-output.
+            echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----" > /dev/console
+            for i in /etc/ssh/ssh_host_*_key.pub; do
+                ${config.programs.ssh.package}/bin/ssh-keygen -l -f $i > /dev/console
+            done
+            echo "-----END SSH HOST KEY FINGERPRINTS-----" > /dev/console
+          '';
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+      };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.nix b/nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.nix
new file mode 100644
index 000000000000..b531787c31a2
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.nix
@@ -0,0 +1,23 @@
+{ targetRoot, wgetExtraOptions }:
+''
+  metaDir=${targetRoot}etc/ec2-metadata
+  mkdir -m 0755 -p "$metaDir"
+
+  echo "getting EC2 instance metadata..."
+
+  if ! [ -e "$metaDir/ami-manifest-path" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
+  fi
+
+  if ! [ -e "$metaDir/user-data" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/user-data" http://169.254.169.254/1.0/user-data && chmod 600 "$metaDir/user-data"
+  fi
+
+  if ! [ -e "$metaDir/hostname" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname
+  fi
+
+  if ! [ -e "$metaDir/public-keys-0-openssh-key" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key
+  fi
+''
diff --git a/nixpkgs/nixos/modules/virtualisation/ecs-agent.nix b/nixpkgs/nixos/modules/virtualisation/ecs-agent.nix
new file mode 100644
index 000000000000..fc51b159579e
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ecs-agent.nix
@@ -0,0 +1,46 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ecs-agent;
+in {
+  options.services.ecs-agent = {
+    enable = mkEnableOption "Amazon ECS agent";
+
+    package = mkOption {
+      type = types.path;
+      description = "The ECS agent package to use";
+      default = pkgs.ecs-agent;
+      defaultText = "pkgs.ecs-agent";
+    };
+
+    extra-environment = mkOption {
+      type = types.attrsOf types.str;
+      description = "The environment the ECS agent should run with. See the ECS agent documentation for keys that work here.";
+      default = {};
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    # This service doesn't run if docker isn't running, and unlike potentially remote services like e.g., postgresql, docker has
+    # to be running locally so `docker.enable` will always be set if the ECS agent is enabled.
+    virtualisation.docker.enable = true;
+
+    systemd.services.ecs-agent = {
+      inherit (cfg.package.meta) description;
+      after    = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = cfg.extra-environment;
+
+      script = ''
+        if [ ! -z "$ECS_DATADIR" ]; then
+          mkdir -p "$ECS_DATADIR"
+        fi
+        ${cfg.package.bin}/bin/agent
+      '';
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/virtualisation/gce-images.nix b/nixpkgs/nixos/modules/virtualisation/gce-images.nix
new file mode 100644
index 000000000000..5354d91deb93
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/gce-images.nix
@@ -0,0 +1,9 @@
+let self = {
+  "14.12" = "gs://nixos-cloud-images/nixos-14.12.471.1f09b77-x86_64-linux.raw.tar.gz";
+  "15.09" = "gs://nixos-cloud-images/nixos-15.09.425.7870f20-x86_64-linux.raw.tar.gz";
+  "16.03" = "gs://nixos-cloud-images/nixos-image-16.03.847.8688c17-x86_64-linux.raw.tar.gz";
+  "17.03" = "gs://nixos-cloud-images/nixos-image-17.03.1082.4aab5c5798-x86_64-linux.raw.tar.gz";
+  "18.03" = "gs://nixos-cloud-images/nixos-image-18.03.132536.fdb5ba4cdf9-x86_64-linux.raw.tar.gz";
+  "18.09" = "gs://nixos-cloud-images/nixos-image-18.09.1228.a4c4cbb613c-x86_64-linux.raw.tar.gz";
+  latest = self."18.09";
+}; in self
diff --git a/nixpkgs/nixos/modules/virtualisation/google-compute-config.nix b/nixpkgs/nixos/modules/virtualisation/google-compute-config.nix
new file mode 100644
index 000000000000..5c59188b68b2
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/google-compute-config.nix
@@ -0,0 +1,235 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  gce = pkgs.google-compute-engine;
+in
+{
+  imports = [
+    ../profiles/headless.nix
+    ../profiles/qemu-guest.nix
+  ];
+
+
+  fileSystems."/" = {
+    fsType = "ext4";
+    device = "/dev/disk/by-label/nixos";
+    autoResize = true;
+  };
+
+  boot.growPartition = true;
+  boot.kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
+  boot.initrd.kernelModules = [ "virtio_scsi" ];
+  boot.kernelModules = [ "virtio_pci" "virtio_net" ];
+
+  # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
+  boot.loader.grub.device = "/dev/sda";
+  boot.loader.timeout = 0;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  # Allow root logins only using the SSH key that the user specified
+  # at instance creation time.
+  services.openssh.enable = true;
+  services.openssh.permitRootLogin = "prohibit-password";
+  services.openssh.passwordAuthentication = mkDefault false;
+
+  # Use GCE udev rules for dynamic disk volumes
+  services.udev.packages = [ gce ];
+
+  # Force getting the hostname from Google Compute.
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  environment.systemPackages = [ pkgs.cryptsetup ];
+
+  # Make sure GCE image does not replace host key that NixOps sets
+  environment.etc."default/instance_configs.cfg".text = lib.mkDefault ''
+    [InstanceSetup]
+    set_host_keys = false
+  '';
+
+  # Rely on GCP's firewall instead
+  networking.firewall.enable = mkDefault false;
+
+  # Configure default metadata hostnames
+  networking.extraHosts = ''
+    169.254.169.254 metadata.google.internal metadata
+  '';
+
+  networking.timeServers = [ "metadata.google.internal" ];
+
+  networking.usePredictableInterfaceNames = false;
+
+  # GC has 1460 MTU
+  networking.interfaces.eth0.mtu = 1460;
+
+  security.googleOsLogin.enable = true;
+
+  systemd.services.google-clock-skew-daemon = {
+    description = "Google Compute Engine Clock Skew Daemon";
+    after = [
+      "network.target"
+      "google-instance-setup.service"
+      "google-network-setup.service"
+    ];
+    requires = ["network.target"];
+    wantedBy = ["multi-user.target"];
+    serviceConfig = {
+      Type = "simple";
+      ExecStart = "${gce}/bin/google_clock_skew_daemon --debug";
+    };
+  };
+
+  systemd.services.google-instance-setup = {
+    description = "Google Compute Engine Instance Setup";
+    after = ["local-fs.target" "network-online.target" "network.target" "rsyslog.service"];
+    before = ["sshd.service"];
+    wants = ["local-fs.target" "network-online.target" "network.target"];
+    wantedBy = [ "sshd.service" "multi-user.target" ];
+    path = with pkgs; [ ethtool openssh ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_instance_setup --debug";
+      Type = "oneshot";
+    };
+  };
+
+  systemd.services.google-network-daemon = {
+    description = "Google Compute Engine Network Daemon";
+    after = ["local-fs.target" "network-online.target" "network.target" "rsyslog.service" "google-instance-setup.service"];
+    wants = ["local-fs.target" "network-online.target" "network.target"];
+    requires = ["network.target"];
+    partOf = ["network.target"];
+    wantedBy = [ "multi-user.target" ];
+    path = with pkgs; [ iproute ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_network_daemon --debug";
+    };
+  };
+
+  systemd.services.google-shutdown-scripts = {
+    description = "Google Compute Engine Shutdown Scripts";
+    after = [
+      "local-fs.target"
+      "network-online.target"
+      "network.target"
+      "rsyslog.service"
+      "systemd-resolved.service"
+      "google-instance-setup.service"
+      "google-network-daemon.service"
+    ];
+    wants = [ "local-fs.target" "network-online.target" "network.target"];
+    wantedBy = [ "multi-user.target" ];
+    serviceConfig = {
+      ExecStart = "${pkgs.coreutils}/bin/true";
+      ExecStop = "${gce}/bin/google_metadata_script_runner --debug --script-type shutdown";
+      Type = "oneshot";
+      RemainAfterExit = true;
+      TimeoutStopSec = "infinity";
+    };
+  };
+
+  systemd.services.google-startup-scripts = {
+    description = "Google Compute Engine Startup Scripts";
+    after = [
+      "local-fs.target"
+      "network-online.target"
+      "network.target"
+      "rsyslog.service"
+      "google-instance-setup.service"
+      "google-network-daemon.service"
+    ];
+    wants = ["local-fs.target" "network-online.target" "network.target"];
+    wantedBy = [ "multi-user.target" ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_metadata_script_runner --debug --script-type startup";
+      KillMode = "process";
+      Type = "oneshot";
+    };
+  };
+
+
+  # Settings taken from https://github.com/GoogleCloudPlatform/compute-image-packages/blob/master/google_config/sysctl/11-gce-network-security.conf
+  boot.kernel.sysctl = {
+    # Turn on SYN-flood protections.  Starting with 2.6.26, there is no loss
+    # of TCP functionality/features under normal conditions.  When flood
+    # protections kick in under high unanswered-SYN load, the system
+    # should remain more stable, with a trade off of some loss of TCP
+    # functionality/features (e.g. TCP Window scaling).
+    "net.ipv4.tcp_syncookies" = mkDefault "1";
+
+    # ignores source-routed packets
+    "net.ipv4.conf.all.accept_source_route" = mkDefault "0";
+
+    # ignores source-routed packets
+    "net.ipv4.conf.default.accept_source_route" = mkDefault "0";
+
+    # ignores ICMP redirects
+    "net.ipv4.conf.all.accept_redirects" = mkDefault "0";
+
+    # ignores ICMP redirects
+    "net.ipv4.conf.default.accept_redirects" = mkDefault "0";
+
+    # ignores ICMP redirects from non-GW hosts
+    "net.ipv4.conf.all.secure_redirects" = mkDefault "1";
+
+    # ignores ICMP redirects from non-GW hosts
+    "net.ipv4.conf.default.secure_redirects" = mkDefault "1";
+
+    # don't allow traffic between networks or act as a router
+    "net.ipv4.ip_forward" = mkDefault "0";
+
+    # don't allow traffic between networks or act as a router
+    "net.ipv4.conf.all.send_redirects" = mkDefault "0";
+
+    # don't allow traffic between networks or act as a router
+    "net.ipv4.conf.default.send_redirects" = mkDefault "0";
+
+    # reverse path filtering - IP spoofing protection
+    "net.ipv4.conf.all.rp_filter" = mkDefault "1";
+
+    # reverse path filtering - IP spoofing protection
+    "net.ipv4.conf.default.rp_filter" = mkDefault "1";
+
+    # ignores ICMP broadcasts to avoid participating in Smurf attacks
+    "net.ipv4.icmp_echo_ignore_broadcasts" = mkDefault "1";
+
+    # ignores bad ICMP errors
+    "net.ipv4.icmp_ignore_bogus_error_responses" = mkDefault "1";
+
+    # logs spoofed, source-routed, and redirect packets
+    "net.ipv4.conf.all.log_martians" = mkDefault "1";
+
+    # log spoofed, source-routed, and redirect packets
+    "net.ipv4.conf.default.log_martians" = mkDefault "1";
+
+    # implements RFC 1337 fix
+    "net.ipv4.tcp_rfc1337" = mkDefault "1";
+
+    # randomizes addresses of mmap base, heap, stack and VDSO page
+    "kernel.randomize_va_space" = mkDefault "2";
+
+    # Reboot the machine soon after a kernel panic.
+    "kernel.panic" = mkDefault "10";
+
+    ## Not part of the original config
+
+    # provides protection from ToCToU races
+    "fs.protected_hardlinks" = mkDefault "1";
+
+    # provides protection from ToCToU races
+    "fs.protected_symlinks" = mkDefault "1";
+
+    # makes locating kernel addresses more difficult
+    "kernel.kptr_restrict" = mkDefault "1";
+
+    # set ptrace protections
+    "kernel.yama.ptrace_scope" = mkOverride 500 "1";
+
+    # set perf only available to root
+    "kernel.perf_event_paranoid" = mkDefault "2";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/google-compute-image.nix b/nixpkgs/nixos/modules/virtualisation/google-compute-image.nix
new file mode 100644
index 000000000000..d172ae38fdcf
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/google-compute-image.nix
@@ -0,0 +1,61 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.googleComputeImage;
+  defaultConfigFile = pkgs.writeText "configuration.nix" ''
+    { ... }:
+    {
+      imports = [
+        <nixpkgs/nixos/modules/virtualisation/google-compute-image.nix>
+      ];
+    }
+  '';
+in
+{
+
+  imports = [ ./google-compute-config.nix ];
+
+  options = {
+    virtualisation.googleComputeImage.diskSize = mkOption {
+      type = with types; int;
+      default = 1536;
+      description = ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+
+    virtualisation.googleComputeImage.configFile = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      description = ''
+        A path to a configuration file which will be placed at `/etc/nixos/configuration.nix`
+        and be used when switching to a new configuration.
+        If set to `null`, a default configuration is used, where the only import is
+        `<nixpkgs/nixos/modules/virtualisation/google-compute-image.nix>`.
+      '';
+    };
+  };
+
+  #### implementation
+  config = {
+
+    system.build.googleComputeImage = import ../../lib/make-disk-image.nix {
+      name = "google-compute-image";
+      postVM = ''
+        PATH=$PATH:${with pkgs; stdenv.lib.makeBinPath [ gnutar gzip ]}
+        pushd $out
+        mv $diskImage disk.raw
+        tar -Szcf nixos-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.raw.tar.gz disk.raw
+        rm $out/disk.raw
+        popd
+      '';
+      format = "raw";
+      configFile = if cfg.configFile == null then defaultConfigFile else cfg.configFile;
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/grow-partition.nix b/nixpkgs/nixos/modules/virtualisation/grow-partition.nix
new file mode 100644
index 000000000000..444c0bc1630e
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/grow-partition.nix
@@ -0,0 +1,3 @@
+# This profile is deprecated, use boot.growPartition directly.
+builtins.trace "the profile <nixos/modules/virtualisation/grow-partition.nix> is deprecated, use boot.growPartition instead"
+{ }
diff --git a/nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix b/nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix
new file mode 100644
index 000000000000..0f1f052880c5
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.hypervGuest;
+
+in {
+  options = {
+    virtualisation.hypervGuest = {
+      enable = mkEnableOption "Hyper-V Guest Support";
+
+      videoMode = mkOption {
+        type = types.str;
+        default = "1152x864";
+        example = "1024x768";
+        description = ''
+          Resolution at which to initialize the video adapter.
+
+          Supports screen resolution up to Full HD 1920x1080 with 32 bit color
+          on Windows Server 2012, and 1600x1200 with 16 bit color on Windows
+          Server 2008 R2 or earlier.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot = {
+      initrd.kernelModules = [
+        "hv_balloon" "hv_netvsc" "hv_storvsc" "hv_utils" "hv_vmbus"
+      ];
+
+      kernelParams = [
+        "video=hyperv_fb:${cfg.videoMode}"
+      ];
+    };
+
+    environment.systemPackages = [ config.boot.kernelPackages.hyperv-daemons.bin ];
+
+    security.rngd.enable = false;
+
+    # enable hotadding cpu/memory
+    services.udev.packages = lib.singleton (pkgs.writeTextFile {
+      name = "hyperv-cpu-and-memory-hotadd-udev-rules";
+      destination = "/etc/udev/rules.d/99-hyperv-cpu-and-memory-hotadd.rules";
+      text = ''
+        # Memory hotadd
+        SUBSYSTEM=="memory", ACTION=="add", DEVPATH=="/devices/system/memory/memory[0-9]*", TEST=="state", ATTR{state}="online"
+
+        # CPU hotadd
+        SUBSYSTEM=="cpu", ACTION=="add", DEVPATH=="/devices/system/cpu/cpu[0-9]*", TEST=="online", ATTR{online}="1"
+      '';
+    });
+
+    systemd = {
+      packages = [ config.boot.kernelPackages.hyperv-daemons.lib ];
+
+      targets.hyperv-daemons = {
+        wantedBy = [ "multi-user.target" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/kvmgt.nix b/nixpkgs/nixos/modules/virtualisation/kvmgt.nix
new file mode 100644
index 000000000000..289e26e17035
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/kvmgt.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.kvmgt;
+
+  kernelPackages = config.boot.kernelPackages;
+
+  vgpuOptions = {
+    uuid = mkOption {
+      type = types.string;
+      description = "UUID of VGPU device. You can generate one with <package>libossp_uuid</package>.";
+    };
+  };
+
+in {
+  options = {
+    virtualisation.kvmgt = {
+      enable = mkEnableOption ''
+        KVMGT (iGVT-g) VGPU support. Allows Qemu/KVM guests to share host's Intel integrated graphics card.
+        Currently only one graphical device can be shared
+      '';
+      # multi GPU support is under the question
+      device = mkOption {
+        type = types.string;
+        default = "0000:00:02.0";
+        description = "PCI ID of graphics card. You can figure it with <command>ls /sys/class/mdev_bus</command>.";
+      };
+      vgpus = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule [ { options = vgpuOptions; } ]);
+        description = ''
+          Virtual GPUs to be used in Qemu. You can find devices via <command>ls /sys/bus/pci/devices/*/mdev_supported_types</command>
+          and find info about device via <command>cat /sys/bus/pci/devices/*/mdev_supported_types/i915-GVTg_V5_4/description</command>
+        '';
+        example = {
+          "i915-GVTg_V5_8" = {
+            uuid = "a297db4a-f4c2-11e6-90f6-d3b88d6c9525";
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = singleton {
+      assertion = versionAtLeast kernelPackages.kernel.version "4.16";
+      message = "KVMGT is not properly supported for kernels older than 4.16";
+    };
+
+    boot.kernelModules = [ "kvmgt" ];
+
+    boot.extraModprobeConfig = ''
+      options i915 enable_gvt=1
+    '';
+
+    systemd.paths = mapAttrs' (name: value:
+      nameValuePair "kvmgt-${name}" {
+        description = "KVMGT VGPU ${name} path";
+        wantedBy = [ "multi-user.target" ];
+        pathConfig = {
+          PathExists = "/sys/bus/pci/devices/${cfg.device}/mdev_supported_types/${name}/create";
+        };
+      }
+    ) cfg.vgpus;
+
+    systemd.services = mapAttrs' (name: value:
+      nameValuePair "kvmgt-${name}" {
+        description = "KVMGT VGPU ${name}";
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+          ExecStart = "${pkgs.runtimeShell} -c 'echo ${value.uuid} > /sys/bus/pci/devices/${cfg.device}/mdev_supported_types/${name}/create'";
+          ExecStop = "${pkgs.runtimeShell} -c 'echo 1 > /sys/bus/pci/devices/${cfg.device}/${value.uuid}/remove'";
+        };
+      }
+    ) cfg.vgpus;
+  };
+
+  meta.maintainers = with maintainers; [ gnidorah ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/libvirtd.nix b/nixpkgs/nixos/modules/virtualisation/libvirtd.nix
new file mode 100644
index 000000000000..394b4ce56563
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/libvirtd.nix
@@ -0,0 +1,251 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.libvirtd;
+  vswitch = config.virtualisation.vswitch;
+  configFile = pkgs.writeText "libvirtd.conf" ''
+    unix_sock_group = "libvirtd"
+    unix_sock_rw_perms = "0770"
+    auth_unix_ro = "none"
+    auth_unix_rw = "none"
+    ${cfg.extraConfig}
+  '';
+  qemuConfigFile = pkgs.writeText "qemu.conf" ''
+    ${optionalString cfg.qemuOvmf ''
+      nvram = ["/run/libvirt/nix-ovmf/OVMF_CODE.fd:/run/libvirt/nix-ovmf/OVMF_VARS.fd"]
+    ''}
+    ${optionalString (!cfg.qemuRunAsRoot) ''
+      user = "qemu-libvirtd"
+      group = "qemu-libvirtd"
+    ''}
+    ${cfg.qemuVerbatimConfig}
+  '';
+  dirName = "libvirt";
+  subDirs = list: [ dirName ] ++ map (e: "${dirName}/${e}") list;
+
+in {
+
+  ###### interface
+
+  options.virtualisation.libvirtd = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        This option enables libvirtd, a daemon that manages
+        virtual machines. Users in the "libvirtd" group can interact with
+        the daemon (e.g. to start or stop VMs) using the
+        <command>virsh</command> command line tool, among others.
+      '';
+    };
+
+    qemuPackage = mkOption {
+      type = types.package;
+      default = pkgs.qemu;
+      description = ''
+        Qemu package to use with libvirt.
+        `pkgs.qemu` can emulate alien architectures (e.g. aarch64 on x86)
+        `pkgs.qemu_kvm` saves disk space allowing to emulate only host architectures.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Extra contents appended to the libvirtd configuration file,
+        libvirtd.conf.
+      '';
+    };
+
+    qemuRunAsRoot = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        If true,  libvirtd runs qemu as root.
+        If false, libvirtd runs qemu as unprivileged user qemu-libvirtd.
+        Changing this option to false may cause file permission issues
+        for existing guests. To fix these, manually change ownership
+        of affected files in /var/lib/libvirt/qemu to qemu-libvirtd.
+      '';
+    };
+
+    qemuVerbatimConfig = mkOption {
+      type = types.lines;
+      default = ''
+        namespaces = []
+      '';
+      description = ''
+        Contents written to the qemu configuration file, qemu.conf.
+        Make sure to include a proper namespace configuration when
+        supplying custom configuration.
+      '';
+    };
+
+    qemuOvmf = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Allows libvirtd to take advantage of OVMF when creating new
+        QEMU VMs with UEFI boot.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--verbose" ];
+      description = ''
+        Extra command line arguments passed to libvirtd on startup.
+      '';
+    };
+
+    onShutdown = mkOption {
+      type = types.enum ["shutdown" "suspend" ];
+      default = "suspend";
+      description = ''
+        When shutting down / restarting the host what method should
+        be used to gracefully halt the guests. Setting to "shutdown"
+        will cause an ACPI shutdown of each guest. "suspend" will
+        attempt to save the state of the guests ready to restore on boot.
+      '';
+    };
+
+    allowedBridges = mkOption {
+      type = types.listOf types.str;
+      default = [ "virbr0" ];
+      description = ''
+        List of bridge devices that can be used by qemu:///session
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment = {
+      # this file is expected in /etc/qemu and not sysconfdir (/var/lib)
+      etc."qemu/bridge.conf".text = lib.concatMapStringsSep "\n" (e:
+        "allow ${e}") cfg.allowedBridges;
+      systemPackages = with pkgs; [ libvirt libressl.nc cfg.qemuPackage ];
+    };
+
+    boot.kernelModules = [ "tun" ];
+
+    users.groups.libvirtd.gid = config.ids.gids.libvirtd;
+
+    # libvirtd runs qemu as this user and group by default
+    users.extraGroups.qemu-libvirtd.gid = config.ids.gids.qemu-libvirtd;
+    users.extraUsers.qemu-libvirtd = {
+      uid = config.ids.uids.qemu-libvirtd;
+      isNormalUser = false;
+      group = "qemu-libvirtd";
+    };
+
+    security.wrappers.qemu-bridge-helper = {
+      source = "/run/${dirName}/nix-helpers/qemu-bridge-helper";
+    };
+
+    systemd.packages = [ pkgs.libvirt ];
+
+    systemd.services.libvirtd-config = {
+      description = "Libvirt Virtual Machine Management Daemon - configuration";
+      script = ''
+        # Copy default libvirt network config .xml files to /var/lib
+        # Files modified by the user will not be overwritten
+        for i in $(cd ${pkgs.libvirt}/var/lib && echo \
+            libvirt/qemu/networks/*.xml libvirt/qemu/networks/autostart/*.xml \
+            libvirt/nwfilter/*.xml );
+        do
+            mkdir -p /var/lib/$(dirname $i) -m 755
+            cp -npd ${pkgs.libvirt}/var/lib/$i /var/lib/$i
+        done
+
+        # Copy generated qemu config to libvirt directory
+        cp -f ${qemuConfigFile} /var/lib/${dirName}/qemu.conf
+
+        # stable (not GC'able as in /nix/store) paths for using in <emulator> section of xml configs
+        for emulator in ${pkgs.libvirt}/libexec/libvirt_lxc ${cfg.qemuPackage}/bin/qemu-kvm ${cfg.qemuPackage}/bin/qemu-system-*; do
+          ln -s --force "$emulator" /run/${dirName}/nix-emulators/
+        done
+
+        for helper in libexec/qemu-bridge-helper bin/qemu-pr-helper; do
+          ln -s --force ${cfg.qemuPackage}/$helper /run/${dirName}/nix-helpers/
+        done
+
+        ${optionalString cfg.qemuOvmf ''
+          ln -s --force ${pkgs.OVMF.fd}/FV/OVMF_CODE.fd /run/${dirName}/nix-ovmf/
+          ln -s --force ${pkgs.OVMF.fd}/FV/OVMF_VARS.fd /run/${dirName}/nix-ovmf/
+        ''}
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        RuntimeDirectoryPreserve = "yes";
+        LogsDirectory = subDirs [ "qemu" ];
+        RuntimeDirectory = subDirs [ "nix-emulators" "nix-helpers" "nix-ovmf" ];
+        StateDirectory = subDirs [ "dnsmasq" ];
+      };
+    };
+
+    systemd.services.libvirtd = {
+      description = "Libvirt Virtual Machine Management Daemon";
+
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "libvirtd-config.service" ];
+      after = [ "systemd-udev-settle.service" "libvirtd-config.service" ]
+              ++ optional vswitch.enable "vswitchd.service";
+
+      environment.LIBVIRTD_ARGS = ''--config "${configFile}" ${concatStringsSep " " cfg.extraOptions}'';
+
+      path = [ cfg.qemuPackage ] # libvirtd requires qemu-img to manage disk images
+             ++ optional vswitch.enable vswitch.package;
+
+      serviceConfig = {
+        Type = "notify";
+        KillMode = "process"; # when stopping, leave the VMs alone
+        Restart = "no";
+      };
+      restartIfChanged = false;
+    };
+
+    systemd.services.libvirt-guests = {
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [ coreutils libvirt gawk ];
+      restartIfChanged = false;
+
+      environment.ON_SHUTDOWN = "${cfg.onShutdown}";
+    };
+
+    systemd.sockets.virtlogd = {
+      description = "Virtual machine log manager socket";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "/run/${dirName}/virtlogd-sock" ];
+    };
+
+    systemd.services.virtlogd = {
+      description = "Virtual machine log manager";
+      serviceConfig.ExecStart = "@${pkgs.libvirt}/sbin/virtlogd virtlogd";
+      restartIfChanged = false;
+    };
+
+    systemd.sockets.virtlockd = {
+      description = "Virtual machine lock manager socket";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "/run/${dirName}/virtlockd-sock" ];
+    };
+
+    systemd.services.virtlockd = {
+      description = "Virtual machine lock manager";
+      serviceConfig.ExecStart = "@${pkgs.libvirt}/sbin/virtlockd virtlockd";
+      restartIfChanged = false;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxc-container.nix b/nixpkgs/nixos/modules/virtualisation/lxc-container.nix
new file mode 100644
index 000000000000..d49364840187
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxc-container.nix
@@ -0,0 +1,26 @@
+{ lib, ... }:
+
+with lib;
+
+{
+  imports = [
+    ../profiles/docker-container.nix # FIXME, shouldn't include something from profiles/
+  ];
+
+  # Allow the user to login as root without password.
+  users.users.root.initialHashedPassword = mkOverride 150 "";
+
+  # Some more help text.
+  services.mingetty.helpLine =
+    ''
+
+      Log in as "root" with an empty password.
+    '';
+
+  # Containers should be light-weight, so start sshd on demand.
+  services.openssh.enable = mkDefault true;
+  services.openssh.startWhenNeeded = mkDefault true;
+
+  # Allow ssh connections
+  networking.firewall.allowedTCPPorts = [ 22 ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxc.nix b/nixpkgs/nixos/modules/virtualisation/lxc.nix
new file mode 100644
index 000000000000..9b5adaf08249
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxc.nix
@@ -0,0 +1,82 @@
+# LXC Configuration
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.lxc;
+
+in
+
+{
+  ###### interface
+
+  options.virtualisation.lxc = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            This enables Linux Containers (LXC), which provides tools
+            for creating and managing system or application containers
+            on Linux.
+          '';
+      };
+
+    systemConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          ''
+            This is the system-wide LXC config. See
+            <citerefentry><refentrytitle>lxc.system.conf</refentrytitle>
+            <manvolnum>5</manvolnum></citerefentry>.
+          '';
+      };
+
+    defaultConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          ''
+            Default config (default.conf) for new containers, i.e. for
+            network config. See <citerefentry><refentrytitle>lxc.container.conf
+            </refentrytitle><manvolnum>5</manvolnum></citerefentry>.
+          '';
+      };
+
+    usernetConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          ''
+            This is the config file for managing unprivileged user network
+            administration access in LXC. See <citerefentry>
+            <refentrytitle>lxc-user-net</refentrytitle><manvolnum>5</manvolnum>
+            </citerefentry>.
+          '';
+      };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.lxc ];
+    environment.etc."lxc/lxc.conf".text = cfg.systemConfig;
+    environment.etc."lxc/lxc-usernet".text = cfg.usernetConfig;
+    environment.etc."lxc/default.conf".text = cfg.defaultConfig;
+    systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
+
+    security.apparmor.packages = [ pkgs.lxc ];
+    security.apparmor.profiles = [
+      "${pkgs.lxc}/etc/apparmor.d/lxc-containers"
+      "${pkgs.lxc}/etc/apparmor.d/usr.bin.lxc-start"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxcfs.nix b/nixpkgs/nixos/modules/virtualisation/lxcfs.nix
new file mode 100644
index 000000000000..b2457403463a
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxcfs.nix
@@ -0,0 +1,45 @@
+# LXC Configuration
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.lxc.lxcfs;
+in {
+  meta.maintainers = [ maintainers.mic92 ];
+
+  ###### interface
+  options.virtualisation.lxc.lxcfs = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This enables LXCFS, a FUSE filesystem for LXC.
+          To use lxcfs in include the following configuration in your
+          container configuration:
+          <code>
+            virtualisation.lxc.defaultConfig = "lxc.include = ''${pkgs.lxcfs}/share/lxc/config/common.conf.d/00-lxcfs.conf";
+          </code>
+        '';
+      };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.lxcfs = {
+      description = "FUSE filesystem for LXC";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "lxc.service" ];
+      restartIfChanged = false;
+      serviceConfig = {
+        ExecStartPre="${pkgs.coreutils}/bin/mkdir -p /var/lib/lxcfs";
+        ExecStart="${pkgs.lxcfs}/bin/lxcfs /var/lib/lxcfs";
+        ExecStopPost="-${pkgs.fuse}/bin/fusermount -u /var/lib/lxcfs";
+        KillMode="process";
+        Restart="on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxd.nix b/nixpkgs/nixos/modules/virtualisation/lxd.nix
new file mode 100644
index 000000000000..505c11abd208
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxd.nix
@@ -0,0 +1,83 @@
+# Systemd services for lxd.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.lxd;
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    virtualisation.lxd = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option enables lxd, a daemon that manages
+          containers. Users in the "lxd" group can interact with
+          the daemon (e.g. to start or stop containers) using the
+          <command>lxc</command> command line tool, among others.
+        '';
+      };
+      zfsSupport = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          enables lxd to use zfs as a storage for containers.
+          This option is enabled by default if a zfs pool is configured
+          with nixos.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.lxd ];
+
+    security.apparmor = {
+      enable = true;
+      profiles = [
+        "${pkgs.lxc}/etc/apparmor.d/usr.bin.lxc-start"
+        "${pkgs.lxc}/etc/apparmor.d/lxc-containers"
+      ];
+      packages = [ pkgs.lxc ];
+    };
+
+    systemd.services.lxd = {
+      description = "LXD Container Management Daemon";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+
+      path = lib.optional cfg.zfsSupport pkgs.zfs;
+
+      preStart = ''
+        mkdir -m 0755 -p /var/lib/lxc/rootfs
+      '';
+
+      serviceConfig = {
+        ExecStart = "@${pkgs.lxd.bin}/bin/lxd lxd --group lxd";
+        Type = "simple";
+        KillMode = "process"; # when stopping, leave the containers alone
+      };
+
+    };
+
+    users.groups.lxd.gid = config.ids.gids.lxd;
+
+    users.users.root = {
+      subUidRanges = [ { startUid = 1000000; count = 65536; } ];
+      subGidRanges = [ { startGid = 1000000; count = 65536; } ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/openstack-config.nix b/nixpkgs/nixos/modules/virtualisation/openstack-config.nix
new file mode 100644
index 000000000000..c2da5d0d2301
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/openstack-config.nix
@@ -0,0 +1,58 @@
+{ pkgs, lib, ... }:
+
+with lib;
+
+let
+  metadataFetcher = import ./ec2-metadata-fetcher.nix {
+    targetRoot = "/";
+    wgetExtraOptions = "--retry-connrefused";
+  };
+in
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+    ../profiles/headless.nix
+    # The Openstack Metadata service exposes data on an EC2 API also.
+    ./ec2-data.nix
+    ./amazon-init.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=ttyS0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      permitRootLogin = "prohibit-password";
+      passwordAuthentication = mkDefault false;
+    };
+
+    # Force getting the hostname from Openstack metadata.
+    networking.hostName = mkDefault "";
+
+    systemd.services.openstack-init = {
+      path = [ pkgs.wget ];
+      description = "Fetch Metadata on startup";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "apply-ec2-data.service" "amazon-init.service"];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      script = metadataFetcher;
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/openvswitch.nix b/nixpkgs/nixos/modules/virtualisation/openvswitch.nix
new file mode 100644
index 000000000000..edec37402308
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/openvswitch.nix
@@ -0,0 +1,176 @@
+# Systemd services for openvswitch
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.vswitch;
+
+in {
+
+  options.virtualisation.vswitch = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to enable Open vSwitch. A configuration daemon (ovs-server)
+        will be started.
+        '';
+    };
+
+    resetOnStart = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to reset the Open vSwitch configuration database to a default
+        configuration on every start of the systemd <literal>ovsdb.service</literal>.
+        '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.openvswitch;
+      defaultText = "pkgs.openvswitch";
+      description = ''
+        Open vSwitch package to use.
+      '';
+    };
+
+    ipsec = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to start racoon service for openvswitch.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable (let
+
+    # Where the communication sockets live
+    runDir = "/run/openvswitch";
+
+    # The path to the an initialized version of the database
+    db = pkgs.stdenv.mkDerivation {
+      name = "vswitch.db";
+      dontUnpack = true;
+      buildPhase = "true";
+      buildInputs = with pkgs; [
+        cfg.package
+      ];
+      installPhase = "mkdir -p $out";
+    };
+
+  in (mkMerge [{
+
+    environment.systemPackages = [ cfg.package pkgs.ipsecTools ];
+
+    boot.kernelModules = [ "tun" "openvswitch" ];
+
+    boot.extraModulePackages = [ cfg.package ];
+
+    systemd.services.ovsdb = {
+      description = "Open_vSwitch Database Server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+      path = [ cfg.package ];
+      restartTriggers = [ db cfg.package ];
+      # Create the config database
+      preStart =
+        ''
+        mkdir -p ${runDir}
+        mkdir -p /var/db/openvswitch
+        chmod +w /var/db/openvswitch
+        ${optionalString cfg.resetOnStart "rm -f /var/db/openvswitch/conf.db"}
+        if [[ ! -e /var/db/openvswitch/conf.db ]]; then
+          ${cfg.package}/bin/ovsdb-tool create \
+            "/var/db/openvswitch/conf.db" \
+            "${cfg.package}/share/openvswitch/vswitch.ovsschema"
+        fi
+        chmod -R +w /var/db/openvswitch
+        '';
+      serviceConfig = {
+        ExecStart =
+          ''
+          ${cfg.package}/bin/ovsdb-server \
+            --remote=punix:${runDir}/db.sock \
+            --private-key=db:Open_vSwitch,SSL,private_key \
+            --certificate=db:Open_vSwitch,SSL,certificate \
+            --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert \
+            --unixctl=ovsdb.ctl.sock \
+            --pidfile=/run/openvswitch/ovsdb.pid \
+            --detach \
+            /var/db/openvswitch/conf.db
+          '';
+        Restart = "always";
+        RestartSec = 3;
+        PIDFile = "/run/openvswitch/ovsdb.pid";
+        # Use service type 'forking' to correctly determine when ovsdb-server is ready.
+        Type = "forking";
+      };
+      postStart = ''
+        ${cfg.package}/bin/ovs-vsctl --timeout 3 --retry --no-wait init
+      '';
+    };
+
+    systemd.services.vswitchd = {
+      description = "Open_vSwitch Daemon";
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "ovsdb.service" ];
+      after = [ "ovsdb.service" ];
+      path = [ cfg.package ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/ovs-vswitchd \
+          --pidfile=/run/openvswitch/ovs-vswitchd.pid \
+          --detach
+        '';
+        PIDFile = "/run/openvswitch/ovs-vswitchd.pid";
+        # Use service type 'forking' to correctly determine when vswitchd is ready.
+        Type = "forking";
+      };
+    };
+
+  }
+  (mkIf cfg.ipsec {
+    services.racoon.enable = true;
+    services.racoon.configPath = "${runDir}/ipsec/etc/racoon/racoon.conf";
+
+    networking.firewall.extraCommands = ''
+      iptables -I INPUT -t mangle -p esp -j MARK --set-mark 1/1
+      iptables -I INPUT -t mangle -p udp --dport 4500 -j MARK --set-mark 1/1
+    '';
+
+    systemd.services.ovs-monitor-ipsec = {
+      description = "Open_vSwitch Ipsec Daemon";
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "ovsdb.service" ];
+      before = [ "vswitchd.service" "racoon.service" ];
+      environment.UNIXCTLPATH = "/tmp/ovsdb.ctl.sock";
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/ovs-monitor-ipsec \
+            --root-prefix ${runDir}/ipsec \
+            --pidfile /run/openvswitch/ovs-monitor-ipsec.pid \
+            --monitor --detach \
+            unix:/run/openvswitch/db.sock
+        '';
+        PIDFile = "/run/openvswitch/ovs-monitor-ipsec.pid";
+        # Use service type 'forking' to correctly determine when ovs-monitor-ipsec is ready.
+        Type = "forking";
+      };
+
+      preStart = ''
+        rm -r ${runDir}/ipsec/etc/racoon/certs || true
+        mkdir -p ${runDir}/ipsec/{etc/racoon,etc/init.d/,usr/sbin/}
+        ln -fs ${pkgs.ipsecTools}/bin/setkey ${runDir}/ipsec/usr/sbin/setkey
+        ln -fs ${pkgs.writeScript "racoon-restart" ''
+        #!${pkgs.runtimeShell}
+        /run/current-system/sw/bin/systemctl $1 racoon
+        ''} ${runDir}/ipsec/etc/init.d/racoon
+      '';
+    };
+  })]));
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/parallels-guest.nix b/nixpkgs/nixos/modules/virtualisation/parallels-guest.nix
new file mode 100644
index 000000000000..828419fb4b9d
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/parallels-guest.nix
@@ -0,0 +1,156 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  prl-tools = config.hardware.parallels.package;
+in
+
+{
+
+  options = {
+    hardware.parallels = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This enables Parallels Tools for Linux guests, along with provided
+          video, mouse and other hardware drivers.
+        '';
+      };
+
+      autoMountShares = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Control prlfsmountd service. When this service is running, shares can not be manually
+          mounted through `mount -t prl_fs ...` as this service will remount and trample any set options.
+          Recommended to enable for simple file sharing, but extended share use such as for code should
+          disable this to manually mount shares.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = config.boot.kernelPackages.prl-tools;
+        defaultText = "config.boot.kernelPackages.prl-tools";
+        example = literalExample "config.boot.kernelPackages.prl-tools";
+        description = ''
+          Defines which package to use for prl-tools. Override to change the version.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf config.hardware.parallels.enable {
+    services.xserver = {
+      drivers = singleton
+        { name = "prlvideo"; modules = [ prl-tools ]; };
+
+      screenSection = ''
+        Option "NoMTRR"
+      '';
+
+      config = ''
+        Section "InputClass"
+          Identifier "prlmouse"
+          MatchIsPointer "on"
+          MatchTag "prlmouse"
+          Driver "prlmouse"
+        EndSection
+      '';
+    };
+
+    hardware.opengl.package = prl-tools;
+    hardware.opengl.package32 = pkgs.pkgsi686Linux.linuxPackages.prl-tools.override { libsOnly = true; kernel = null; };
+    hardware.opengl.setLdLibraryPath = true;
+
+    services.udev.packages = [ prl-tools ];
+
+    environment.systemPackages = [ prl-tools ];
+
+    boot.extraModulePackages = [ prl-tools ];
+
+    boot.kernelModules = [ "prl_tg" "prl_eth" "prl_fs" "prl_fs_freeze" ];
+
+    services.timesyncd.enable = false;
+
+    systemd.services.prltoolsd = {
+      description = "Parallels Tools' service";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${prl-tools}/bin/prltoolsd -f";
+        PIDFile = "/var/run/prltoolsd.pid";
+      };
+    };
+
+    systemd.services.prlfsmountd = mkIf config.hardware.parallels.autoMountShares {
+      description = "Parallels Shared Folders Daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = rec {
+        ExecStart = "${prl-tools}/sbin/prlfsmountd ${PIDFile}";
+        ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p /media";
+        ExecStopPost = "${prl-tools}/sbin/prlfsmountd -u";
+        PIDFile = "/run/prlfsmountd.pid";
+      };
+    };
+
+    systemd.services.prlshprint = {
+      description = "Parallels Shared Printer Tool";
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "cups.service" ];
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${prl-tools}/bin/prlshprint";
+      };
+    };
+
+    systemd.user.services = {
+      prlcc = {
+        description = "Parallels Control Center";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcc";
+        };
+      };
+      prldnd = {
+        description = "Parallels Control Center";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prldnd";
+        };
+      };
+      prl_wmouse_d  = {
+        description = "Parallels Walking Mouse Daemon";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prl_wmouse_d";
+        };
+      };
+      prlcp = {
+        description = "Parallels CopyPaste Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcp";
+        };
+      };
+      prlsga = {
+        description = "Parallels Shared Guest Applications Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlsga";
+        };
+      };
+      prlshprof = {
+        description = "Parallels Shared Profile Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlshprof";
+        };
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix b/nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix
new file mode 100644
index 000000000000..665224e35d8c
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.qemuGuest;
+in {
+
+  options.services.qemuGuest = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable the qemu guest agent.";
+      };
+  };
+
+  config = mkIf cfg.enable (
+      mkMerge [
+    {
+
+      services.udev.extraRules = ''
+        SUBSYSTEM=="virtio-ports", ATTR{name}=="org.qemu.guest_agent.0", TAG+="systemd" ENV{SYSTEMD_WANTS}="qemu-guest-agent.service"
+      '';
+
+      systemd.services.qemu-guest-agent = {
+        description = "Run the QEMU Guest Agent";
+        serviceConfig = {
+          ExecStart = "${pkgs.qemu.ga}/bin/qemu-ga";
+          Restart = "always";
+          RestartSec = 0;
+        };
+      };
+    }
+  ]
+  );
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix b/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix
new file mode 100644
index 000000000000..ed3431554be4
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix
@@ -0,0 +1,585 @@
+# This module creates a virtual machine from the NixOS configuration.
+# Building the `config.system.build.vm' attribute gives you a command
+# that starts a KVM/QEMU VM running the NixOS configuration defined in
+# `config'.  The Nix store is shared read-only with the host, which
+# makes (re)building VMs very efficient.  However, it also means you
+# can't reconfigure the guest inside the guest - you need to rebuild
+# the VM in the host.  On the other hand, the root filesystem is a
+# read/writable disk image persistent across VM reboots.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+with import ../../lib/qemu-flags.nix { inherit pkgs; };
+
+let
+
+  qemu = config.system.build.qemu or pkgs.qemu_test;
+
+  vmName =
+    if config.networking.hostName == ""
+    then "noname"
+    else config.networking.hostName;
+
+  cfg = config.virtualisation;
+
+  qemuGraphics = lib.optionalString (!cfg.graphics) "-nographic";
+
+  consoles = lib.concatMapStringsSep " " (c: "console=${c}") cfg.qemu.consoles;
+
+  # XXX: This is very ugly and in the future we really should use attribute
+  # sets to build ALL of the QEMU flags instead of this mixed mess of Nix
+  # expressions and shell script stuff.
+  mkDiskIfaceDriveFlag = idx: driveArgs: let
+    inherit (cfg.qemu) diskInterface;
+    # The drive identifier created by incrementing the index by one using the
+    # shell.
+    drvId = "drive$((${idx} + 1))";
+    # NOTE: DO NOT shell escape, because this may contain shell variables.
+    commonArgs = "index=${idx},id=${drvId},${driveArgs}";
+    isSCSI = diskInterface == "scsi";
+    devArgs = "${diskInterface}-hd,drive=${drvId}";
+    args = "-drive ${commonArgs},if=none -device lsi53c895a -device ${devArgs}";
+  in if isSCSI then args else "-drive ${commonArgs},if=${diskInterface}";
+
+  # Shell script to start the VM.
+  startVM =
+    ''
+      #! ${pkgs.runtimeShell}
+
+      NIX_DISK_IMAGE=$(readlink -f ''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}})
+
+      if ! test -e "$NIX_DISK_IMAGE"; then
+          ${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \
+            ${toString config.virtualisation.diskSize}M || exit 1
+      fi
+
+      # Create a directory for storing temporary data of the running VM.
+      if [ -z "$TMPDIR" -o -z "$USE_TMPDIR" ]; then
+          TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
+      fi
+
+      # Create a directory for exchanging data with the VM.
+      mkdir -p $TMPDIR/xchg
+
+      ${if cfg.useBootLoader then ''
+        # Create a writable copy/snapshot of the boot disk.
+        # A writable boot disk can be booted from automatically.
+        ${qemu}/bin/qemu-img create -f qcow2 -b ${bootDisk}/disk.img $TMPDIR/disk.img || exit 1
+
+        ${if cfg.useEFIBoot then ''
+          # VM needs a writable flash BIOS.
+          cp ${bootDisk}/bios.bin $TMPDIR || exit 1
+          chmod 0644 $TMPDIR/bios.bin || exit 1
+        '' else ''
+        ''}
+      '' else ''
+      ''}
+
+      cd $TMPDIR
+      idx=2
+      extraDisks=""
+      ${flip concatMapStrings cfg.emptyDiskImages (size: ''
+        if ! test -e "empty$idx.qcow2"; then
+            ${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${toString size}M"
+        fi
+        extraDisks="$extraDisks ${mkDiskIfaceDriveFlag "$idx" "file=$(pwd)/empty$idx.qcow2,werror=report"}"
+        idx=$((idx + 1))
+      '')}
+
+      # Start QEMU.
+      exec ${qemuBinary qemu} \
+          -name ${vmName} \
+          -m ${toString config.virtualisation.memorySize} \
+          -smp ${toString config.virtualisation.cores} \
+          -device virtio-rng-pci \
+          ${concatStringsSep " " config.virtualisation.qemu.networkingOptions} \
+          -virtfs local,path=/nix/store,security_model=none,mount_tag=store \
+          -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \
+          -virtfs local,path=''${SHARED_DIR:-$TMPDIR/xchg},security_model=none,mount_tag=shared \
+          ${if cfg.useBootLoader then ''
+            ${mkDiskIfaceDriveFlag "0" "file=$NIX_DISK_IMAGE,cache=writeback,werror=report"} \
+            ${mkDiskIfaceDriveFlag "1" "file=$TMPDIR/disk.img,media=disk"} \
+            ${if cfg.useEFIBoot then ''
+              -pflash $TMPDIR/bios.bin \
+            '' else ''
+            ''}
+          '' else ''
+            ${mkDiskIfaceDriveFlag "0" "file=$NIX_DISK_IMAGE,cache=writeback,werror=report"} \
+            -kernel ${config.system.build.toplevel}/kernel \
+            -initrd ${config.system.build.toplevel}/initrd \
+            -append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo}/registration ${consoles} $QEMU_KERNEL_PARAMS" \
+          ''} \
+          $extraDisks \
+          ${qemuGraphics} \
+          ${toString config.virtualisation.qemu.options} \
+          $QEMU_OPTS \
+          "$@"
+    '';
+
+
+  regInfo = pkgs.closureInfo { rootPaths = config.virtualisation.pathsInNixDB; };
+
+
+  # Generate a hard disk image containing a /boot partition and GRUB
+  # in the MBR.  Used when the `useBootLoader' option is set.
+  # FIXME: use nixos/lib/make-disk-image.nix.
+  bootDisk =
+    pkgs.vmTools.runInLinuxVM (
+      pkgs.runCommand "nixos-boot-disk"
+        { preVM =
+            ''
+              mkdir $out
+              diskImage=$out/disk.img
+              bootFlash=$out/bios.bin
+              ${qemu}/bin/qemu-img create -f qcow2 $diskImage "40M"
+              ${if cfg.useEFIBoot then ''
+                cp ${pkgs.OVMF-CSM.fd}/FV/OVMF.fd $bootFlash
+                chmod 0644 $bootFlash
+              '' else ''
+              ''}
+            '';
+          buildInputs = [ pkgs.utillinux ];
+          QEMU_OPTS = if cfg.useEFIBoot
+                      then "-pflash $out/bios.bin -nographic -serial pty"
+                      else "-nographic -serial pty";
+        }
+        ''
+          # Create a /boot EFI partition with 40M and arbitrary but fixed GUIDs for reproducibility
+          ${pkgs.gptfdisk}/bin/sgdisk \
+            --set-alignment=1 --new=1:34:2047 --change-name=1:BIOSBootPartition --typecode=1:ef02 \
+            --set-alignment=512 --largest-new=2 --change-name=2:EFISystem --typecode=2:ef00 \
+            --attributes=1:set:1 \
+            --attributes=2:set:2 \
+            --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C1 \
+            --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \
+            --partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \
+            --hybrid 2 \
+            --recompute-chs /dev/vda
+          ${pkgs.dosfstools}/bin/mkfs.fat -F16 /dev/vda2
+          export MTOOLS_SKIP_CHECK=1
+          ${pkgs.mtools}/bin/mlabel -i /dev/vda2 ::boot
+
+          # Mount /boot; load necessary modules first.
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_cp437.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_iso8859-1.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/fat.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/vfat.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/efivarfs/efivarfs.ko.xz || true
+          mkdir /boot
+          mount /dev/vda2 /boot
+
+          # This is needed for GRUB 0.97, which doesn't know about virtio devices.
+          mkdir /boot/grub
+          echo '(hd0) /dev/vda' > /boot/grub/device.map
+
+          # Install GRUB and generate the GRUB boot menu.
+          touch /etc/NIXOS
+          mkdir -p /nix/var/nix/profiles
+          ${config.system.build.toplevel}/bin/switch-to-configuration boot
+
+          umount /boot
+        '' # */
+    );
+
+in
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+   ./docker-preloader.nix
+  ];
+
+  options = {
+
+    virtualisation.memorySize =
+      mkOption {
+        default = 384;
+        description =
+          ''
+            Memory size (M) of virtual machine.
+          '';
+      };
+
+    virtualisation.diskSize =
+      mkOption {
+        default = 512;
+        description =
+          ''
+            Disk size (M) of virtual machine.
+          '';
+      };
+
+    virtualisation.diskImage =
+      mkOption {
+        default = "./${vmName}.qcow2";
+        description =
+          ''
+            Path to the disk image containing the root filesystem.
+            The image will be created on startup if it does not
+            exist.
+          '';
+      };
+
+    virtualisation.bootDevice =
+      mkOption {
+        type = types.str;
+        example = "/dev/vda";
+        description =
+          ''
+            The disk to be used for the root filesystem.
+          '';
+      };
+
+    virtualisation.emptyDiskImages =
+      mkOption {
+        default = [];
+        type = types.listOf types.int;
+        description =
+          ''
+            Additional disk images to provide to the VM. The value is
+            a list of size in megabytes of each disk. These disks are
+            writeable by the VM.
+          '';
+      };
+
+    virtualisation.graphics =
+      mkOption {
+        default = true;
+        description =
+          ''
+            Whether to run QEMU with a graphics window, or in nographic mode.
+            Serial console will be enabled on both settings, but this will
+            change the preferred console.
+            '';
+      };
+
+    virtualisation.cores =
+      mkOption {
+        default = 1;
+        type = types.int;
+        description =
+          ''
+            Specify the number of cores the guest is permitted to use.
+            The number can be higher than the available cores on the
+            host system.
+          '';
+      };
+
+    virtualisation.pathsInNixDB =
+      mkOption {
+        default = [];
+        description =
+          ''
+            The list of paths whose closure is registered in the Nix
+            database in the VM.  All other paths in the host Nix store
+            appear in the guest Nix store as well, but are considered
+            garbage (because they are not registered in the Nix
+            database in the guest).
+          '';
+      };
+
+    virtualisation.vlans =
+      mkOption {
+        default = [ 1 ];
+        example = [ 1 2 ];
+        description =
+          ''
+            Virtual networks to which the VM is connected.  Each
+            number <replaceable>N</replaceable> in this list causes
+            the VM to have a virtual Ethernet interface attached to a
+            separate virtual network on which it will be assigned IP
+            address
+            <literal>192.168.<replaceable>N</replaceable>.<replaceable>M</replaceable></literal>,
+            where <replaceable>M</replaceable> is the index of this VM
+            in the list of VMs.
+          '';
+      };
+
+    virtualisation.writableStore =
+      mkOption {
+        default = true; # FIXME
+        description =
+          ''
+            If enabled, the Nix store in the VM is made writable by
+            layering an overlay filesystem on top of the host's Nix
+            store.
+          '';
+      };
+
+    virtualisation.writableStoreUseTmpfs =
+      mkOption {
+        default = true;
+        description =
+          ''
+            Use a tmpfs for the writable store instead of writing to the VM's
+            own filesystem.
+          '';
+      };
+
+    networking.primaryIPAddress =
+      mkOption {
+        default = "";
+        internal = true;
+        description = "Primary IP address used in /etc/hosts.";
+      };
+
+    virtualisation.qemu = {
+      options =
+        mkOption {
+          type = types.listOf types.unspecified;
+          default = [];
+          example = [ "-vga std" ];
+          description = "Options passed to QEMU.";
+        };
+
+      consoles = mkOption {
+        type = types.listOf types.str;
+        default = let
+          consoles = [ "${qemuSerialDevice},115200n8" "tty0" ];
+        in if cfg.graphics then consoles else reverseList consoles;
+        example = [ "console=tty1" ];
+        description = ''
+          The output console devices to pass to the kernel command line via the
+          <literal>console</literal> parameter, the primary console is the last
+          item of this list.
+
+          By default it enables both serial console and
+          <literal>tty0</literal>. The preferred console (last one) is based on
+          the value of <option>virtualisation.graphics</option>.
+        '';
+      };
+
+      networkingOptions =
+        mkOption {
+          default = [
+            "-net nic,netdev=user.0,model=virtio"
+            "-netdev user,id=user.0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
+          ];
+          type = types.listOf types.str;
+          description = ''
+            Networking-related command-line options that should be passed to qemu.
+            The default is to use userspace networking (slirp).
+
+            If you override this option, be advised to keep
+            ''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS} (as seen in the default)
+            to keep the default runtime behaviour.
+          '';
+        };
+
+      diskInterface =
+        mkOption {
+          default = "virtio";
+          example = "scsi";
+          type = types.enum [ "virtio" "scsi" "ide" ];
+          description = "The interface used for the virtual hard disks.";
+        };
+
+      guestAgent.enable =
+        mkOption {
+          default = true;
+          type = types.bool;
+          description = ''
+            Enable the Qemu guest agent.
+          '';
+        };
+    };
+
+    virtualisation.useBootLoader =
+      mkOption {
+        default = false;
+        description =
+          ''
+            If enabled, the virtual machine will be booted using the
+            regular boot loader (i.e., GRUB 1 or 2).  This allows
+            testing of the boot loader.  If
+            disabled (the default), the VM directly boots the NixOS
+            kernel and initial ramdisk, bypassing the boot loader
+            altogether.
+          '';
+      };
+
+    virtualisation.useEFIBoot =
+      mkOption {
+        default = false;
+        description =
+          ''
+            If enabled, the virtual machine will provide a EFI boot
+            manager.
+            useEFIBoot is ignored if useBootLoader == false.
+          '';
+      };
+
+  };
+
+  config = {
+
+    boot.loader.grub.device = mkVMOverride cfg.bootDevice;
+
+    boot.initrd.extraUtilsCommands =
+      ''
+        # We need mke2fs in the initrd.
+        copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs
+      '';
+
+    boot.initrd.postDeviceCommands =
+      ''
+        # If the disk image appears to be empty, run mke2fs to
+        # initialise.
+        FSTYPE=$(blkid -o value -s TYPE ${cfg.bootDevice} || true)
+        if test -z "$FSTYPE"; then
+            mke2fs -t ext4 ${cfg.bootDevice}
+        fi
+      '';
+
+    boot.initrd.postMountCommands =
+      ''
+        # Mark this as a NixOS machine.
+        mkdir -p $targetRoot/etc
+        echo -n > $targetRoot/etc/NIXOS
+
+        # Fix the permissions on /tmp.
+        chmod 1777 $targetRoot/tmp
+
+        mkdir -p $targetRoot/boot
+
+        ${optionalString cfg.writableStore ''
+          echo "mounting overlay filesystem on /nix/store..."
+          mkdir -p 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
+          mount -t overlay overlay $targetRoot/nix/store \
+            -o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail
+        ''}
+      '';
+
+    # After booting, register the closure of the paths in
+    # `virtualisation.pathsInNixDB' in the Nix database in the VM.  This
+    # allows Nix operations to work in the VM.  The path to the
+    # registration file is passed through the kernel command line to
+    # allow `system.build.toplevel' to be included.  (If we had a direct
+    # reference to ${regInfo} here, then we would get a cyclic
+    # dependency.)
+    boot.postBootCommands =
+      ''
+        if [[ "$(cat /proc/cmdline)" =~ regInfo=([^ ]*) ]]; then
+          ${config.nix.package.out}/bin/nix-store --load-db < ''${BASH_REMATCH[1]}
+        fi
+      '';
+
+    boot.initrd.availableKernelModules =
+      optional cfg.writableStore "overlay"
+      ++ optional (cfg.qemu.diskInterface == "scsi") "sym53c8xx";
+
+    virtualisation.bootDevice =
+      mkDefault (if cfg.qemu.diskInterface == "scsi" then "/dev/sda" else "/dev/vda");
+
+    virtualisation.pathsInNixDB = [ config.system.build.toplevel ];
+
+    # FIXME: Consolidate this one day.
+    virtualisation.qemu.options = mkMerge [
+      (mkIf (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) [ "-vga std" "-usb" "-device usb-tablet,bus=usb-bus.0" ])
+      (mkIf (pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64) [ "-device virtio-gpu-pci" "-device usb-ehci,id=usb0" "-device usb-kbd" "-device usb-tablet" ])
+    ];
+
+    # Mount the host filesystem via 9P, and bind-mount the Nix store
+    # of the host into our own filesystem.  We use mkVMOverride to
+    # allow this module to be applied to "normal" NixOS system
+    # configuration, where the regular value for the `fileSystems'
+    # attribute should be disregarded for the purpose of building a VM
+    # test image (since those filesystems don't exist in the VM).
+    fileSystems = mkVMOverride (
+      { "/".device = cfg.bootDevice;
+        ${if cfg.writableStore then "/nix/.ro-store" else "/nix/store"} =
+          { device = "store";
+            fsType = "9p";
+            options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
+            neededForBoot = true;
+          };
+        "/tmp" = mkIf config.boot.tmpOnTmpfs
+          { device = "tmpfs";
+            fsType = "tmpfs";
+            neededForBoot = true;
+            # Sync with systemd's tmp.mount;
+            options = [ "mode=1777" "strictatime" "nosuid" "nodev" ];
+          };
+        "/tmp/xchg" =
+          { device = "xchg";
+            fsType = "9p";
+            options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
+            neededForBoot = true;
+          };
+        "/tmp/shared" =
+          { device = "shared";
+            fsType = "9p";
+            options = [ "trans=virtio" "version=9p2000.L" ];
+            neededForBoot = true;
+          };
+      } // optionalAttrs (cfg.writableStore && cfg.writableStoreUseTmpfs)
+      { "/nix/.rw-store" =
+          { fsType = "tmpfs";
+            options = [ "mode=0755" ];
+            neededForBoot = true;
+          };
+      } // optionalAttrs cfg.useBootLoader
+      { "/boot" =
+          { device = "/dev/vdb2";
+            fsType = "vfat";
+            options = [ "ro" ];
+            noCheck = true; # fsck fails on a r/o filesystem
+          };
+      });
+
+    swapDevices = mkVMOverride [ ];
+    boot.initrd.luks.devices = mkVMOverride {};
+
+    # Don't run ntpd in the guest.  It should get the correct time from KVM.
+    services.timesyncd.enable = false;
+
+    services.qemuGuest.enable = cfg.qemu.guestAgent.enable;
+
+    system.build.vm = pkgs.runCommand "nixos-vm" { preferLocalBuild = true; }
+      ''
+        mkdir -p $out/bin
+        ln -s ${config.system.build.toplevel} $out/system
+        ln -s ${pkgs.writeScript "run-nixos-vm" startVM} $out/bin/run-${vmName}-vm
+      '';
+
+    # When building a regular system configuration, override whatever
+    # video driver the host uses.
+    services.xserver.videoDrivers = mkVMOverride [ "modesetting" ];
+    services.xserver.defaultDepth = mkVMOverride 0;
+    services.xserver.resolutions = mkVMOverride [ { x = 1024; y = 768; } ];
+    services.xserver.monitorSection =
+      ''
+        # Set a higher refresh rate so that resolutions > 800x600 work.
+        HorizSync 30-140
+        VertRefresh 50-160
+      '';
+
+    # Wireless won't work in the VM.
+    networking.wireless.enable = mkVMOverride false;
+    networking.connman.enable = mkVMOverride false;
+
+    # Speed up booting by not waiting for ARP.
+    networking.dhcpcd.extraConfig = "noarp";
+
+    networking.usePredictableInterfaceNames = false;
+
+    system.requiredKernelConfig = with config.lib.kernelConfig;
+      [ (isEnabled "VIRTIO_BLK")
+        (isEnabled "VIRTIO_PCI")
+        (isEnabled "VIRTIO_NET")
+        (isEnabled "EXT4_FS")
+        (isYes "BLK_DEV")
+        (isYes "PCI")
+        (isYes "EXPERIMENTAL")
+        (isYes "NETDEVICES")
+        (isYes "NET_CORE")
+        (isYes "INET")
+        (isYes "NETWORK_FILESYSTEMS")
+      ] ++ optional (!cfg.graphics) [
+        (isYes "SERIAL_8250_CONSOLE")
+        (isYes "SERIAL_8250")
+      ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/rkt.nix b/nixpkgs/nixos/modules/virtualisation/rkt.nix
new file mode 100644
index 000000000000..fd662b52df52
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/rkt.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.rkt;
+in
+{
+  options.virtualisation.rkt = {
+    enable = mkEnableOption "rkt metadata service";
+
+    gc = {
+      automatic = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Automatically run the garbage collector at a specific time.";
+      };
+
+      dates = mkOption {
+        default = "03:15";
+        type = types.str;
+        description = ''
+          Specification (in the format described by
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>7</manvolnum></citerefentry>) of the time at
+          which the garbage collector will run.
+        '';
+      };
+
+      options = mkOption {
+        default = "--grace-period=24h";
+        type = types.str;
+        description = ''
+          Options given to <filename>rkt gc</filename> when the
+          garbage collector is run automatically.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.rkt ];
+
+    systemd.services.rkt = {
+      description = "rkt metadata service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.rkt}/bin/rkt metadata-service";
+      };
+    };
+
+    systemd.services.rkt-gc = {
+      description = "rkt garbage collection";
+      startAt = optionalString cfg.gc.automatic cfg.gc.dates;
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.rkt}/bin/rkt gc ${cfg.gc.options}";
+      };
+    };
+
+    users.groups.rkt = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
new file mode 100644
index 000000000000..834b994e92d2
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
@@ -0,0 +1,93 @@
+# Module for VirtualBox guests.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.virtualbox.guest;
+  kernel = config.boot.kernelPackages;
+
+in
+
+{
+
+  ###### interface
+
+  options.virtualisation.virtualbox.guest = {
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = "Whether to enable the VirtualBox service and other guest additions.";
+    };
+
+    x11 = mkOption {
+      default = true;
+      type = types.bool;
+      description = "Whether to enable x11 graphics";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [{
+    assertions = [{
+      assertion = pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64;
+      message = "Virtualbox not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    }];
+
+    environment.systemPackages = [ kernel.virtualboxGuestAdditions ];
+
+    boot.extraModulePackages = [ kernel.virtualboxGuestAdditions ];
+
+    boot.supportedFilesystems = [ "vboxsf" ];
+    boot.initrd.supportedFilesystems = [ "vboxsf" ];
+
+    users.groups.vboxsf.gid = config.ids.gids.vboxsf;
+
+    systemd.services.virtualbox =
+      { description = "VirtualBox Guest Services";
+
+        wantedBy = [ "multi-user.target" ];
+        requires = [ "dev-vboxguest.device" ];
+        after = [ "dev-vboxguest.device" ];
+
+        unitConfig.ConditionVirtualization = "oracle";
+
+        serviceConfig.ExecStart = "@${kernel.virtualboxGuestAdditions}/bin/VBoxService VBoxService --foreground";
+      };
+
+    services.udev.extraRules =
+      ''
+        # /dev/vboxuser is necessary for VBoxClient to work.  Maybe we
+        # should restrict this to logged-in users.
+        KERNEL=="vboxuser",  OWNER="root", GROUP="root", MODE="0666"
+
+        # Allow systemd dependencies on vboxguest.
+        SUBSYSTEM=="misc", KERNEL=="vboxguest", TAG+="systemd"
+      '';
+  } (mkIf cfg.x11 {
+    services.xserver.videoDrivers = mkOverride 50 [ "virtualbox" "modesetting" ];
+
+    services.xserver.config =
+      ''
+        Section "InputDevice"
+          Identifier "VBoxMouse"
+          Driver "vboxmouse"
+        EndSection
+      '';
+
+    services.xserver.serverLayoutSection =
+      ''
+        InputDevice "VBoxMouse"
+      '';
+
+    services.xserver.displayManager.sessionCommands =
+      ''
+        PATH=${makeBinPath [ pkgs.gnugrep pkgs.which pkgs.xorg.xorgserver.out ]}:$PATH \
+          ${kernel.virtualboxGuestAdditions}/bin/VBoxClient-all
+      '';
+  })]);
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix
new file mode 100644
index 000000000000..41bcb909fb5c
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix
@@ -0,0 +1,153 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.virtualbox.host;
+
+  virtualbox = cfg.package.override {
+    inherit (cfg) enableHardening headless;
+    extensionPack = if cfg.enableExtensionPack then pkgs.virtualboxExtpack else null;
+  };
+
+  kernelModules = config.boot.kernelPackages.virtualbox.override {
+    inherit virtualbox;
+  };
+
+in
+
+{
+  options.virtualisation.virtualbox.host = {
+    enable = mkEnableOption "VirtualBox" // {
+      description = ''
+        Whether to enable VirtualBox.
+
+        <note><para>
+          In order to pass USB devices from the host to the guests, the user
+          needs to be in the <literal>vboxusers</literal> group.
+        </para></note>
+      '';
+    };
+
+    enableExtensionPack = mkEnableOption "VirtualBox extension pack" // {
+      description = ''
+        Whether to install the Oracle Extension Pack for VirtualBox.
+
+        <important><para>
+          You must set <literal>nixpkgs.config.allowUnfree = true</literal> in
+          order to use this.  This requires you accept the VirtualBox PUEL.
+        </para></important>
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.virtualbox;
+      defaultText = "pkgs.virtualbox";
+      description = ''
+        Which VirtualBox package to use.
+      '';
+    };
+
+    addNetworkInterface = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Automatically set up a vboxnet0 host-only network interface.
+      '';
+    };
+
+    enableHardening = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Enable hardened VirtualBox, which ensures that only the binaries in the
+        system path get access to the devices exposed by the kernel modules
+        instead of all users in the vboxusers group.
+
+        <important><para>
+          Disabling this can put your system's security at risk, as local users
+          in the vboxusers group can tamper with the VirtualBox device files.
+        </para></important>
+      '';
+    };
+
+    headless = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Use VirtualBox installation without GUI and Qt dependency. Useful to enable on servers
+        and when virtual machines are controlled only via SSH.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [{
+    warnings = mkIf (config.nixpkgs.config.virtualbox.enableExtensionPack or false)
+      ["'nixpkgs.virtualbox.enableExtensionPack' has no effect, please use 'virtualisation.virtualbox.host.enableExtensionPack'"];
+    boot.kernelModules = [ "vboxdrv" "vboxnetadp" "vboxnetflt" ];
+    boot.extraModulePackages = [ kernelModules ];
+    environment.systemPackages = [ virtualbox ];
+
+    security.wrappers = let
+      mkSuid = program: {
+        source = "${virtualbox}/libexec/virtualbox/${program}";
+        owner = "root";
+        group = "vboxusers";
+        setuid = true;
+      };
+    in mkIf cfg.enableHardening
+      (builtins.listToAttrs (map (x: { name = x; value = mkSuid x; }) [
+      "VBoxHeadless"
+      "VBoxNetAdpCtl"
+      "VBoxNetDHCP"
+      "VBoxNetNAT"
+      "VBoxSDL"
+      "VBoxVolInfo"
+      "VirtualBoxVM"
+    ]));
+
+    users.groups.vboxusers.gid = config.ids.gids.vboxusers;
+
+    services.udev.extraRules =
+      ''
+        KERNEL=="vboxdrv",    OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+        KERNEL=="vboxdrvu",   OWNER="root", GROUP="root",      MODE="0666", TAG+="systemd"
+        KERNEL=="vboxnetctl", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+        SUBSYSTEM=="usb_device", ACTION=="add", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
+        SUBSYSTEM=="usb", ACTION=="add", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
+        SUBSYSTEM=="usb_device", ACTION=="remove", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
+        SUBSYSTEM=="usb", ACTION=="remove", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
+      '';
+
+    # Since we lack the right setuid/setcap binaries, set up a host-only network by default.
+  } (mkIf cfg.addNetworkInterface {
+    systemd.services."vboxnet0" =
+      { description = "VirtualBox vboxnet0 Interface";
+        requires = [ "dev-vboxnetctl.device" ];
+        after = [ "dev-vboxnetctl.device" ];
+        wantedBy = [ "network.target" "sys-subsystem-net-devices-vboxnet0.device" ];
+        path = [ virtualbox ];
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.Type = "oneshot";
+        serviceConfig.PrivateTmp = true;
+        environment.VBOX_USER_HOME = "/tmp";
+        script =
+          ''
+            if ! [ -e /sys/class/net/vboxnet0 ]; then
+              VBoxManage hostonlyif create
+              cat /tmp/VBoxSVC.log >&2
+            fi
+          '';
+        postStop =
+          ''
+            VBoxManage hostonlyif remove vboxnet0
+          '';
+      };
+
+    networking.interfaces.vboxnet0.ipv4.addresses = [{ address = "192.168.56.1"; prefixLength = 24; }];
+    # Make sure NetworkManager won't assume this interface being up
+    # means we have internet access.
+    networking.networkmanager.unmanaged = ["vboxnet0"];
+  })]);
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix
new file mode 100644
index 000000000000..ab65523592d7
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix
@@ -0,0 +1,111 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualbox;
+
+in {
+
+  options = {
+    virtualbox = {
+      baseImageSize = mkOption {
+        type = types.int;
+        default = 50 * 1024;
+        description = ''
+          The size of the VirtualBox base image in MiB.
+        '';
+      };
+      memorySize = mkOption {
+        type = types.int;
+        default = 1536;
+        description = ''
+          The amount of RAM the VirtualBox appliance can use in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-ova-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = ''
+          The name of the derivation for the VirtualBox appliance.
+        '';
+      };
+      vmName = mkOption {
+        type = types.str;
+        default = "NixOS ${config.system.nixos.label} (${pkgs.stdenv.hostPlatform.system})";
+        description = ''
+          The name of the VirtualBox appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.ova";
+        description = ''
+          The file name of the VirtualBox appliance.
+        '';
+      };
+    };
+  };
+
+  config = {
+    system.build.virtualBoxOVA = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+
+      inherit pkgs lib config;
+      partitionTableType = "legacy";
+      diskSize = cfg.baseImageSize;
+
+      postVM =
+        ''
+          export HOME=$PWD
+          export PATH=${pkgs.virtualbox}/bin:$PATH
+
+          echo "creating VirtualBox pass-through disk wrapper (no copying involved)..."
+          VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage
+
+          echo "creating VirtualBox VM..."
+          vmName="${cfg.vmName}";
+          VBoxManage createvm --name "$vmName" --register \
+            --ostype ${if pkgs.stdenv.hostPlatform.system == "x86_64-linux" then "Linux26_64" else "Linux26"}
+          VBoxManage modifyvm "$vmName" \
+            --memory ${toString cfg.memorySize} --acpi on --vram 32 \
+            ${optionalString (pkgs.stdenv.hostPlatform.system == "i686-linux") "--pae on"} \
+            --nictype1 virtio --nic1 nat \
+            --audiocontroller ac97 --audio alsa --audioout on \
+            --rtcuseutc on \
+            --usb on --usbehci on --mouse usbtablet
+          VBoxManage storagectl "$vmName" --name SATA --add sata --portcount 4 --bootable on --hostiocache on
+          VBoxManage storageattach "$vmName" --storagectl SATA --port 0 --device 0 --type hdd \
+            --medium disk.vmdk
+
+          echo "exporting VirtualBox VM..."
+          mkdir -p $out
+          fn="$out/${cfg.vmFileName}"
+          VBoxManage export "$vmName" --output "$fn" --options manifest
+
+          rm -v $diskImage
+
+          mkdir -p $out/nix-support
+          echo "file ova $fn" >> $out/nix-support/hydra-build-products
+        '';
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    boot.growPartition = true;
+    boot.loader.grub.device = "/dev/sda";
+
+    swapDevices = [{
+      device = "/var/swap";
+      size = 2048;
+    }];
+
+    virtualisation.virtualbox.guest.enable = true;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/vmware-guest.nix b/nixpkgs/nixos/modules/virtualisation/vmware-guest.nix
new file mode 100644
index 000000000000..d18778f81588
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/vmware-guest.nix
@@ -0,0 +1,56 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.vmware.guest;
+  open-vm-tools = if cfg.headless then pkgs.open-vm-tools-headless else pkgs.open-vm-tools;
+  xf86inputvmmouse = pkgs.xorg.xf86inputvmmouse;
+in
+{
+  options.virtualisation.vmware.guest = {
+    enable = mkEnableOption "VMWare Guest Support";
+    headless = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Whether to disable X11-related features.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64;
+      message = "VMWare guest is not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    } ];
+
+    boot.initrd.kernelModules = [ "vmw_pvscsi" ];
+
+    environment.systemPackages = [ open-vm-tools ];
+
+    systemd.services.vmware =
+      { description = "VMWare Guest Service";
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig.ExecStart = "${open-vm-tools}/bin/vmtoolsd";
+      };
+
+    environment.etc."vmware-tools".source = "${open-vm-tools}/etc/vmware-tools/*";
+
+    services.xserver = mkIf (!cfg.headless) {
+      videoDrivers = mkOverride 50 [ "vmware" ];
+      modules = [ xf86inputvmmouse ];
+
+      config = ''
+          Section "InputClass"
+            Identifier "VMMouse"
+            MatchDevicePath "/dev/input/event*"
+            MatchProduct "ImPS/2 Generic Wheel Mouse"
+            Driver "vmmouse"
+          EndSection
+        '';
+
+      displayManager.sessionCommands = ''
+          ${open-vm-tools}/bin/vmware-user-suid-wrapper
+        '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix b/nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix
new file mode 100644
index 000000000000..675cf9297371
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.xe-guest-utilities;
+in {
+  options = {
+    services.xe-guest-utilities = {
+      enable = mkEnableOption "the Xen guest utilities daemon";
+    };
+  };
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.xe-guest-utilities ];
+    systemd.tmpfiles.rules = [ "d /run/xenstored 0755 - - -" ];
+
+    systemd.services.xe-daemon = {
+      description = "xen daemon file";
+      wantedBy    = [ "multi-user.target" ];
+      after = [ "xe-linux-distribution.service" ];
+      requires = [ "proc-xen.mount" ];
+      path = [ pkgs.coreutils pkgs.iproute ];
+      serviceConfig = {
+        PIDFile = "/run/xe-daemon.pid";
+        ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-daemon -p /run/xe-daemon.pid";
+        ExecStop = "${pkgs.procps}/bin/pkill -TERM -F /run/xe-daemon.pid";
+      };
+    };
+
+    systemd.services.xe-linux-distribution = {
+      description = "xen linux distribution service";
+      wantedBy    = [ "multi-user.target" ];
+      before = [ "xend.service" ];
+      path = [ pkgs.xe-guest-utilities pkgs.coreutils pkgs.gawk pkgs.gnused ];
+      serviceConfig = {
+        Type = "simple";
+        RemainAfterExit = "yes";
+        ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-linux-distribution /var/cache/xe-linux-distribution";
+      };
+    };
+
+    systemd.mounts = [
+      { description = "Mount /proc/xen files";
+        what = "xenfs";
+        where = "/proc/xen";
+        type = "xenfs";
+        unitConfig = {
+          ConditionPathExists = "/proc/xen";
+          RefuseManualStop = "true";
+        };
+      }
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/xen-dom0.nix b/nixpkgs/nixos/modules/virtualisation/xen-dom0.nix
new file mode 100644
index 000000000000..70e575b6c0d2
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/xen-dom0.nix
@@ -0,0 +1,461 @@
+# Xen hypervisor (Dom0) support.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.xen;
+in
+
+{
+  ###### interface
+
+  options = {
+
+    virtualisation.xen.enable =
+      mkOption {
+        default = false;
+        type = types.bool;
+        description =
+          ''
+            Setting this option enables the Xen hypervisor, a
+            virtualisation technology that allows multiple virtual
+            machines, known as <emphasis>domains</emphasis>, to run
+            concurrently on the physical machine.  NixOS runs as the
+            privileged <emphasis>Domain 0</emphasis>.  This option
+            requires a reboot to take effect.
+          '';
+      };
+
+    virtualisation.xen.package = mkOption {
+      type = types.package;
+      defaultText = "pkgs.xen";
+      example = literalExample "pkgs.xen-light";
+      description = ''
+        The package used for Xen binary.
+      '';
+      relatedPackages = [ "xen" "xen-light" ];
+    };
+
+    virtualisation.xen.package-qemu = mkOption {
+      type = types.package;
+      defaultText = "pkgs.xen";
+      example = literalExample "pkgs.qemu_xen-light";
+      description = ''
+        The package with qemu binaries for dom0 qemu and xendomains.
+      '';
+      relatedPackages = [ "xen"
+                          { name = "qemu_xen-light"; comment = "For use with pkgs.xen-light."; }
+                        ];
+    };
+
+    virtualisation.xen.bootParams =
+      mkOption {
+        default = "";
+        description =
+          ''
+            Parameters passed to the Xen hypervisor at boot time.
+          '';
+      };
+
+    virtualisation.xen.domain0MemorySize =
+      mkOption {
+        default = 0;
+        example = 512;
+        description =
+          ''
+            Amount of memory (in MiB) allocated to Domain 0 on boot.
+            If set to 0, all memory is assigned to Domain 0.
+          '';
+      };
+
+    virtualisation.xen.bridge = {
+        name = mkOption {
+          default = "xenbr0";
+          description = ''
+              Name of bridge the Xen domUs connect to.
+            '';
+        };
+
+        address = mkOption {
+          type = types.str;
+          default = "172.16.0.1";
+          description = ''
+            IPv4 address of the bridge.
+          '';
+        };
+
+        prefixLength = mkOption {
+          type = types.addCheck types.int (n: n >= 0 && n <= 32);
+          default = 16;
+          description = ''
+            Subnet mask of the bridge interface, specified as the number of
+            bits in the prefix (<literal>24</literal>).
+            A DHCP server will provide IP addresses for the whole, remaining
+            subnet.
+          '';
+        };
+
+        forwardDns = mkOption {
+          default = false;
+          description = ''
+            If set to <literal>true</literal>, the DNS queries from the
+            hosts connected to the bridge will be forwarded to the DNS
+            servers specified in /etc/resolv.conf .
+            '';
+        };
+
+      };
+
+    virtualisation.xen.stored =
+      mkOption {
+        type = types.path;
+        description =
+          ''
+            Xen Store daemon to use. Defaults to oxenstored of the xen package.
+          '';
+      };
+
+    virtualisation.xen.domains = {
+        extraConfig = mkOption {
+          type = types.string;
+          default = "";
+          description =
+            ''
+              Options defined here will override the defaults for xendomains.
+              The default options can be seen in the file included from
+              /etc/default/xendomains.
+            '';
+          };
+      };
+
+    virtualisation.xen.trace =
+      mkOption {
+        default = false;
+        description =
+          ''
+            Enable Xen tracing.
+          '';
+      };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.isx86_64;
+      message = "Xen currently not supported on ${pkgs.stdenv.hostPlatform.system}";
+    } {
+      assertion = config.boot.loader.grub.enable && (config.boot.loader.grub.efiSupport == false);
+      message = "Xen currently does not support EFI boot";
+    } ];
+
+    virtualisation.xen.package = mkDefault pkgs.xen;
+    virtualisation.xen.package-qemu = mkDefault pkgs.xen;
+    virtualisation.xen.stored = mkDefault "${cfg.package}/bin/oxenstored";
+
+    environment.systemPackages = [ cfg.package ];
+
+    # Make sure Domain 0 gets the required configuration
+    #boot.kernelPackages = pkgs.boot.kernelPackages.override { features={xen_dom0=true;}; };
+
+    boot.kernelModules =
+      [ "xen-evtchn" "xen-gntdev" "xen-gntalloc" "xen-blkback" "xen-netback"
+        "xen-pciback" "evtchn" "gntdev" "netbk" "blkbk" "xen-scsibk"
+        "usbbk" "pciback" "xen-acpi-processor" "blktap2" "tun" "netxen_nic"
+        "xen_wdt" "xen-acpi-processor" "xen-privcmd" "xen-scsiback"
+        "xenfs"
+      ];
+
+    # The xenfs module is needed in system.activationScripts.xen, but
+    # the modprobe command there fails silently. Include xenfs in the
+    # initrd as a work around.
+    boot.initrd.kernelModules = [ "xenfs" ];
+
+    # The radeonfb kernel module causes the screen to go black as soon
+    # as it's loaded, so don't load it.
+    boot.blacklistedKernelModules = [ "radeonfb" ];
+
+    # Increase the number of loopback devices from the default (8),
+    # which is way too small because every VM virtual disk requires a
+    # loopback device.
+    boot.extraModprobeConfig =
+      ''
+        options loop max_loop=64
+      '';
+
+    virtualisation.xen.bootParams = [] ++
+      optionals cfg.trace [ "loglvl=all" "guest_loglvl=all" ] ++
+      optional (cfg.domain0MemorySize != 0) "dom0_mem=${toString cfg.domain0MemorySize}M";
+
+    system.extraSystemBuilderCmds =
+      ''
+        ln -s ${cfg.package}/boot/xen.gz $out/xen.gz
+        echo "${toString cfg.bootParams}" > $out/xen-params
+      '';
+
+    # Mount the /proc/xen pseudo-filesystem.
+    system.activationScripts.xen =
+      ''
+        if [ -d /proc/xen ]; then
+            ${pkgs.kmod}/bin/modprobe xenfs 2> /dev/null
+            ${pkgs.utillinux}/bin/mountpoint -q /proc/xen || \
+                ${pkgs.utillinux}/bin/mount -t xenfs none /proc/xen
+        fi
+      '';
+
+    # Domain 0 requires a pvops-enabled kernel.
+    system.requiredKernelConfig = with config.lib.kernelConfig;
+      [ (isYes "XEN")
+        (isYes "X86_IO_APIC")
+        (isYes "ACPI")
+        (isYes "XEN_DOM0")
+        (isYes "PCI_XEN")
+        (isYes "XEN_DEV_EVTCHN")
+        (isYes "XENFS")
+        (isYes "XEN_COMPAT_XENFS")
+        (isYes "XEN_SYS_HYPERVISOR")
+        (isYes "XEN_GNTDEV")
+        (isYes "XEN_BACKEND")
+        (isModule "XEN_NETDEV_BACKEND")
+        (isModule "XEN_BLKDEV_BACKEND")
+        (isModule "XEN_PCIDEV_BACKEND")
+        (isYes "XEN_BALLOON")
+        (isYes "XEN_SCRUB_PAGES")
+      ];
+
+
+    environment.etc =
+      [ { source = "${cfg.package}/etc/xen/xl.conf";
+          target = "xen/xl.conf";
+        }
+        { source = "${cfg.package}/etc/xen/scripts";
+          target = "xen/scripts";
+        }
+        { text = ''
+            source ${cfg.package}/etc/default/xendomains
+
+            ${cfg.domains.extraConfig}
+          '';
+          target = "default/xendomains";
+        }
+      ]
+      ++ lib.optionals (builtins.compareVersions cfg.package.version "4.10" >= 0) [
+        # in V 4.10 oxenstored requires /etc/xen/oxenstored.conf to start
+        { source = "${cfg.package}/etc/xen/oxenstored.conf";
+          target = "xen/oxenstored.conf";
+        }
+      ];
+
+    # Xen provides udev rules.
+    services.udev.packages = [ cfg.package ];
+
+    services.udev.path = [ pkgs.bridge-utils pkgs.iproute ];
+
+    systemd.services.xen-store = {
+      description = "Xen Store Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "xen-store.socket" ];
+      requires = [ "xen-store.socket" ];
+      preStart = ''
+        export XENSTORED_ROOTDIR="/var/lib/xenstored"
+        rm -f "$XENSTORED_ROOTDIR"/tdb* &>/dev/null
+
+        mkdir -p /var/run
+        mkdir -p /var/log/xen # Running xl requires /var/log/xen and /var/lib/xen,
+        mkdir -p /var/lib/xen # so we create them here unconditionally.
+        grep -q control_d /proc/xen/capabilities
+        '';
+      serviceConfig = if (builtins.compareVersions cfg.package.version "4.8" < 0) then
+        { ExecStart = ''
+            ${cfg.stored}${optionalString cfg.trace " -T /var/log/xen/xenstored-trace.log"} --no-fork
+            '';
+        } else {
+          ExecStart = ''
+            ${cfg.package}/etc/xen/scripts/launch-xenstore
+            '';
+          Type            = "notify";
+          RemainAfterExit = true;
+          NotifyAccess    = "all";
+        };
+      postStart = ''
+        ${optionalString (builtins.compareVersions cfg.package.version "4.8" < 0) ''
+          time=0
+          timeout=30
+          # Wait for xenstored to actually come up, timing out after 30 seconds
+          while [ $time -lt $timeout ] && ! `${cfg.package}/bin/xenstore-read -s / >/dev/null 2>&1` ; do
+              time=$(($time+1))
+              sleep 1
+          done
+
+          # Exit if we timed out
+          if ! [ $time -lt $timeout ] ; then
+              echo "Could not start Xenstore Daemon"
+              exit 1
+          fi
+        ''}
+        echo "executing xen-init-dom0"
+        ${cfg.package}/lib/xen/bin/xen-init-dom0
+        '';
+    };
+
+    systemd.sockets.xen-store = {
+      description = "XenStore Socket for userspace API";
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenStream = [ "/var/run/xenstored/socket" "/var/run/xenstored/socket_ro" ];
+        SocketMode = "0660";
+        SocketUser = "root";
+        SocketGroup = "root";
+      };
+    };
+
+
+    systemd.services.xen-console = {
+      description = "Xen Console Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-store.service" ];
+      requires = [ "xen-store.service" ];
+      preStart = ''
+        mkdir -p /var/run/xen
+        ${optionalString cfg.trace "mkdir -p /var/log/xen"}
+        grep -q control_d /proc/xen/capabilities
+        '';
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/xenconsoled\
+            ${optionalString ((builtins.compareVersions cfg.package.version "4.8" >= 0)) " -i"}\
+            ${optionalString cfg.trace " --log=all --log-dir=/var/log/xen"}
+          '';
+      };
+    };
+
+
+    systemd.services.xen-qemu = {
+      description = "Xen Qemu Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-console.service" ];
+      requires = [ "xen-store.service" ];
+      serviceConfig.ExecStart = ''
+        ${cfg.package-qemu}/${cfg.package-qemu.qemu-system-i386} \
+           -xen-attach -xen-domid 0 -name dom0 -M xenpv \
+           -nographic -monitor /dev/null -serial /dev/null -parallel /dev/null
+        '';
+    };
+
+
+    systemd.services.xen-watchdog = {
+      description = "Xen Watchdog Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-qemu.service" "xen-domains.service" ];
+      serviceConfig.ExecStart = "${cfg.package}/bin/xenwatchdogd 30 15";
+      serviceConfig.Type = "forking";
+      serviceConfig.RestartSec = "1";
+      serviceConfig.Restart = "on-failure";
+    };
+
+
+    systemd.services.xen-bridge = {
+      description = "Xen bridge";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "xen-domains.service" ];
+      preStart = ''
+        mkdir -p /var/run/xen
+        touch /var/run/xen/dnsmasq.pid
+        touch /var/run/xen/dnsmasq.etherfile
+        touch /var/run/xen/dnsmasq.leasefile
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Usable\ range`
+        export XEN_BRIDGE_IP_RANGE_START="${"\${data[1]//[[:blank:]]/}"}"
+        export XEN_BRIDGE_IP_RANGE_END="${"\${data[2]//[[:blank:]]/}"}"
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ address`
+        export XEN_BRIDGE_NETWORK_ADDRESS="${"\${data[1]//[[:blank:]]/}"}"
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ mask`
+        export XEN_BRIDGE_NETMASK="${"\${data[1]//[[:blank:]]/}"}"
+
+        echo "${cfg.bridge.address} host gw dns" > /var/run/xen/dnsmasq.hostsfile
+
+        cat <<EOF > /var/run/xen/dnsmasq.conf
+        no-daemon
+        pid-file=/var/run/xen/dnsmasq.pid
+        interface=${cfg.bridge.name}
+        except-interface=lo
+        bind-interfaces
+        auth-zone=xen.local,$XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength}
+        domain=xen.local
+        addn-hosts=/var/run/xen/dnsmasq.hostsfile
+        expand-hosts
+        strict-order
+        no-hosts
+        bogus-priv
+        ${optionalString (!cfg.bridge.forwardDns) ''
+          no-resolv
+          no-poll
+          auth-server=dns.xen.local,${cfg.bridge.name}
+        ''}
+        filterwin2k
+        clear-on-reload
+        domain-needed
+        dhcp-hostsfile=/var/run/xen/dnsmasq.etherfile
+        dhcp-authoritative
+        dhcp-range=$XEN_BRIDGE_IP_RANGE_START,$XEN_BRIDGE_IP_RANGE_END
+        dhcp-no-override
+        no-ping
+        dhcp-leasefile=/var/run/xen/dnsmasq.leasefile
+        EOF
+
+        # DHCP
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p tcp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p udp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        # DNS
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p tcp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p udp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+
+        ${pkgs.bridge-utils}/bin/brctl addbr ${cfg.bridge.name}
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} ${cfg.bridge.address}
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} netmask $XEN_BRIDGE_NETMASK
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} up
+      '';
+      serviceConfig.ExecStart = "${pkgs.dnsmasq}/bin/dnsmasq --conf-file=/var/run/xen/dnsmasq.conf";
+      postStop = ''
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ address`
+        export XEN_BRIDGE_NETWORK_ADDRESS="${"\${data[1]//[[:blank:]]/}"}"
+
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} down
+        ${pkgs.bridge-utils}/bin/brctl delbr ${cfg.bridge.name}
+
+        # DNS
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p udp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p tcp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        # DHCP
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p udp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p tcp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+      '';
+    };
+
+
+    systemd.services.xen-domains = {
+      description = "Xen domains - automatically starts, saves and restores Xen domains";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-bridge.service" "xen-qemu.service" ];
+      requires = [ "xen-bridge.service" "xen-qemu.service" ];
+      ## To prevent a race between dhcpcd and xend's bridge setup script
+      ## (which renames eth* to peth* and recreates eth* as a virtual
+      ## device), start dhcpcd after xend.
+      before = [ "dhcpd.service" ];
+      restartIfChanged = false;
+      serviceConfig.RemainAfterExit = "yes";
+      path = [ cfg.package cfg.package-qemu ];
+      environment.XENDOM_CONFIG = "${cfg.package}/etc/sysconfig/xendomains";
+      preStart = "mkdir -p /var/lock/subsys -m 755";
+      serviceConfig.ExecStart = "${cfg.package}/etc/init.d/xendomains start";
+      serviceConfig.ExecStop = "${cfg.package}/etc/init.d/xendomains stop";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/xen-domU.nix b/nixpkgs/nixos/modules/virtualisation/xen-domU.nix
new file mode 100644
index 000000000000..c00b984c2ce0
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/xen-domU.nix
@@ -0,0 +1,19 @@
+# Common configuration for Xen DomU NixOS virtual machines.
+
+{ ... }:
+
+{
+  boot.loader.grub.version = 2;
+  boot.loader.grub.device = "nodev";
+
+  boot.initrd.kernelModules =
+    [ "xen-blkfront" "xen-tpmfront" "xen-kbdfront" "xen-fbfront"
+      "xen-netfront" "xen-pcifront" "xen-scsifront"
+    ];
+
+  # Send syslog messages to the Xen console.
+  services.syslogd.tty = "hvc0";
+
+  # Don't run ntpd, since we should get the correct time from Dom0.
+  services.timesyncd.enable = false;
+}