about summary refs log tree commit diff
path: root/nixpkgs/nixos/modules/virtualisation
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/modules/virtualisation')
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-image.nix156
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-init.nix60
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-options.nix21
-rw-r--r--nixpkgs/nixos/modules/virtualisation/anbox.nix139
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-agent-entropy.patch17
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-agent.nix198
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix3
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-common.nix66
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-config-user.nix12
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-config.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-image.nix70
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-images.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/brightbox-config.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/brightbox-image.nix166
-rw-r--r--nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix40
-rw-r--r--nixpkgs/nixos/modules/virtualisation/container-config.nix31
-rw-r--r--nixpkgs/nixos/modules/virtualisation/containers.nix125
-rw-r--r--nixpkgs/nixos/modules/virtualisation/cri-o.nix136
-rw-r--r--nixpkgs/nixos/modules/virtualisation/digital-ocean-config.nix197
-rw-r--r--nixpkgs/nixos/modules/virtualisation/digital-ocean-image.nix69
-rw-r--r--nixpkgs/nixos/modules/virtualisation/digital-ocean-init.nix95
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker-image.nix57
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker-preloader.nix134
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker.nix223
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ec2-amis.nix333
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ec2-data.nix91
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.nix23
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ecs-agent.nix45
-rw-r--r--nixpkgs/nixos/modules/virtualisation/gce-images.nix9
-rw-r--r--nixpkgs/nixos/modules/virtualisation/google-compute-config.nix148
-rw-r--r--nixpkgs/nixos/modules/virtualisation/google-compute-image.nix61
-rw-r--r--nixpkgs/nixos/modules/virtualisation/grow-partition.nix3
-rw-r--r--nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix64
-rw-r--r--nixpkgs/nixos/modules/virtualisation/hyperv-image.nix70
-rw-r--r--nixpkgs/nixos/modules/virtualisation/kvmgt.nix86
-rw-r--r--nixpkgs/nixos/modules/virtualisation/libvirtd.nix280
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxc-container.nix26
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxc.nix82
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxcfs.nix45
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxd.nix150
-rw-r--r--nixpkgs/nixos/modules/virtualisation/nixos-containers.nix844
-rw-r--r--nixpkgs/nixos/modules/virtualisation/oci-containers.nix323
-rw-r--r--nixpkgs/nixos/modules/virtualisation/openstack-config.nix58
-rw-r--r--nixpkgs/nixos/modules/virtualisation/openvswitch.nix190
-rw-r--r--nixpkgs/nixos/modules/virtualisation/parallels-guest.nix156
-rw-r--r--nixpkgs/nixos/modules/virtualisation/podman.nix123
-rw-r--r--nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix36
-rw-r--r--nixpkgs/nixos/modules/virtualisation/qemu-vm.nix672
-rw-r--r--nixpkgs/nixos/modules/virtualisation/railcar.nix125
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix93
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix160
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix138
-rw-r--r--nixpkgs/nixos/modules/virtualisation/vmware-guest.nix60
-rw-r--r--nixpkgs/nixos/modules/virtualisation/vmware-image.nix90
-rw-r--r--nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix52
-rw-r--r--nixpkgs/nixos/modules/virtualisation/xen-dom0.nix454
-rw-r--r--nixpkgs/nixos/modules/virtualisation/xen-domU.nix19
57 files changed, 7139 insertions, 0 deletions
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-image.nix b/nixpkgs/nixos/modules/virtualisation/amazon-image.nix
new file mode 100644
index 000000000000..20d48add7129
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-image.nix
@@ -0,0 +1,156 @@
+# Configuration for Amazon EC2 instances. (Note that this file is a
+# misnomer - it should be "amazon-config.nix" or so, not
+# "amazon-image.nix", since it's used not only to build images but
+# also to reconfigure instances. However, we can't rename it because
+# existing "configuration.nix" files on EC2 instances refer to it.)
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.ec2;
+  metadataFetcher = import ./ec2-metadata-fetcher.nix {
+    targetRoot = "$targetRoot/";
+    wgetExtraOptions = "-q";
+  };
+in
+
+{
+  imports = [ ../profiles/headless.nix ./ec2-data.nix ./amazon-init.nix ];
+
+  config = {
+
+    assertions = [
+      { assertion = cfg.hvm;
+        message = "Paravirtualized EC2 instances are no longer supported.";
+      }
+      { assertion = cfg.efi -> cfg.hvm;
+        message = "EC2 instances using EFI must be HVM instances.";
+      }
+    ];
+
+    boot.growPartition = cfg.hvm;
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    fileSystems."/boot" = mkIf cfg.efi {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    boot.extraModulePackages = [
+      config.boot.kernelPackages.ena
+    ];
+    boot.initrd.kernelModules = [ "xen-blkfront" "xen-netfront" ];
+    boot.initrd.availableKernelModules = [ "ixgbevf" "ena" "nvme" ];
+    boot.kernelParams = mkIf cfg.hvm [ "console=ttyS0" ];
+
+    # Prevent the nouveau kernel module from being loaded, as it
+    # interferes with the nvidia/nvidia-uvm modules needed for CUDA.
+    # Also blacklist xen_fbfront to prevent a 30 second delay during
+    # boot.
+    boot.blacklistedKernelModules = [ "nouveau" "xen_fbfront" ];
+
+    # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
+    boot.loader.grub.version = if cfg.hvm then 2 else 1;
+    boot.loader.grub.device = if (cfg.hvm && !cfg.efi) then "/dev/xvda" else "nodev";
+    boot.loader.grub.extraPerEntryConfig = mkIf (!cfg.hvm) "root (hd0)";
+    boot.loader.grub.efiSupport = cfg.efi;
+    boot.loader.grub.efiInstallAsRemovable = cfg.efi;
+    boot.loader.timeout = 0;
+
+    boot.initrd.network.enable = true;
+
+    # Mount all formatted ephemeral disks and activate all swap devices.
+    # We cannot do this with the ‘fileSystems’ and ‘swapDevices’ options
+    # because the set of devices is dependent on the instance type
+    # (e.g. "m1.small" has one ephemeral filesystem and one swap device,
+    # while "m1.large" has two ephemeral filesystems and no swap
+    # devices).  Also, put /tmp and /var on /disk0, since it has a lot
+    # more space than the root device.  Similarly, "move" /nix to /disk0
+    # by layering a unionfs-fuse mount on top of it so we have a lot more space for
+    # Nix operations.
+    boot.initrd.postMountCommands =
+      ''
+        ${metadataFetcher}
+
+        diskNr=0
+        diskForUnionfs=
+        for device in /dev/xvd[abcde]*; do
+            if [ "$device" = /dev/xvda -o "$device" = /dev/xvda1 ]; then continue; fi
+            fsType=$(blkid -o value -s TYPE "$device" || true)
+            if [ "$fsType" = swap ]; then
+                echo "activating swap device $device..."
+                swapon "$device" || true
+            elif [ "$fsType" = ext3 ]; then
+                mp="/disk$diskNr"
+                diskNr=$((diskNr + 1))
+                if mountFS "$device" "$mp" "" ext3; then
+                    if [ -z "$diskForUnionfs" ]; then diskForUnionfs="$mp"; fi
+                fi
+            else
+                echo "skipping unknown device type $device"
+            fi
+        done
+
+        if [ -n "$diskForUnionfs" ]; then
+            mkdir -m 755 -p $targetRoot/$diskForUnionfs/root
+
+            mkdir -m 1777 -p $targetRoot/$diskForUnionfs/root/tmp $targetRoot/tmp
+            mount --bind $targetRoot/$diskForUnionfs/root/tmp $targetRoot/tmp
+
+            if [ "$(cat "$metaDir/ami-manifest-path")" != "(unknown)" ]; then
+                mkdir -m 755 -p $targetRoot/$diskForUnionfs/root/var $targetRoot/var
+                mount --bind $targetRoot/$diskForUnionfs/root/var $targetRoot/var
+
+                mkdir -p /unionfs-chroot/ro-nix
+                mount --rbind $targetRoot/nix /unionfs-chroot/ro-nix
+
+                mkdir -m 755 -p $targetRoot/$diskForUnionfs/root/nix
+                mkdir -p /unionfs-chroot/rw-nix
+                mount --rbind $targetRoot/$diskForUnionfs/root/nix /unionfs-chroot/rw-nix
+
+                unionfs -o allow_other,cow,nonempty,chroot=/unionfs-chroot,max_files=32768 /rw-nix=RW:/ro-nix=RO $targetRoot/nix
+            fi
+        fi
+      '';
+
+    boot.initrd.extraUtilsCommands =
+      ''
+        # We need swapon in the initrd.
+        copy_bin_and_libs ${pkgs.utillinux}/sbin/swapon
+      '';
+
+    # Don't put old configurations in the GRUB menu.  The user has no
+    # way to select them anyway.
+    boot.loader.grub.configurationLimit = 0;
+
+    # Allow root logins only using the SSH key that the user specified
+    # at instance creation time.
+    services.openssh.enable = true;
+    services.openssh.permitRootLogin = "prohibit-password";
+
+    # Creates symlinks for block device names.
+    services.udev.packages = [ pkgs.ec2-utils ];
+
+    # Force getting the hostname from EC2.
+    networking.hostName = mkDefault "";
+
+    # Always include cryptsetup so that Charon can use it.
+    environment.systemPackages = [ pkgs.cryptsetup ];
+
+    boot.initrd.supportedFilesystems = [ "unionfs-fuse" ];
+
+    # EC2 has its own NTP server provided by the hypervisor
+    networking.timeServers = [ "169.254.169.123" ];
+
+    # udisks has become too bloated to have in a headless system
+    # (e.g. it depends on GTK).
+    services.udisks2.enable = false;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-init.nix b/nixpkgs/nixos/modules/virtualisation/amazon-init.nix
new file mode 100644
index 000000000000..8c12e0e49bf5
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-init.nix
@@ -0,0 +1,60 @@
+{ config, pkgs, ... }:
+
+let
+  script = ''
+    #!${pkgs.runtimeShell} -eu
+
+    echo "attempting to fetch configuration from EC2 user data..."
+
+    export HOME=/root
+    export PATH=${pkgs.lib.makeBinPath [ config.nix.package pkgs.systemd pkgs.gnugrep pkgs.git pkgs.gnutar pkgs.gzip pkgs.gnused config.system.build.nixos-rebuild]}:$PATH
+    export NIX_PATH=nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos:nixos-config=/etc/nixos/configuration.nix:/nix/var/nix/profiles/per-user/root/channels
+
+    userData=/etc/ec2-metadata/user-data
+
+    if [ -s "$userData" ]; then
+      # If the user-data looks like it could be a nix expression,
+      # copy it over. Also, look for a magic three-hash comment and set
+      # that as the channel.
+      if sed '/^\(#\|SSH_HOST_.*\)/d' < "$userData" | grep -q '\S'; then
+        channels="$(grep '^###' "$userData" | sed 's|###\s*||')"
+        while IFS= read -r channel; do
+          echo "writing channel: $channel"
+        done < <(printf "%s\n" "$channels")
+
+        if [[ -n "$channels" ]]; then
+          printf "%s" "$channels" > /root/.nix-channels
+          nix-channel --update
+        fi
+
+        echo "setting configuration from EC2 user data"
+        cp "$userData" /etc/nixos/configuration.nix
+      else
+        echo "user data does not appear to be a Nix expression; ignoring"
+        exit
+      fi
+    else
+      echo "no user data is available"
+      exit
+    fi
+
+    nixos-rebuild switch
+  '';
+in {
+  systemd.services.amazon-init = {
+    inherit script;
+    description = "Reconfigure the system from EC2 userdata on startup";
+
+    wantedBy = [ "multi-user.target" ];
+    after = [ "multi-user.target" ];
+    requires = [ "network-online.target" ];
+
+    restartIfChanged = false;
+    unitConfig.X-StopOnRemoval = false;
+
+    serviceConfig = {
+      Type = "oneshot";
+      RemainAfterExit = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-options.nix b/nixpkgs/nixos/modules/virtualisation/amazon-options.nix
new file mode 100644
index 000000000000..2e807131e938
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-options.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+{
+  options = {
+    ec2 = {
+      hvm = lib.mkOption {
+        default = lib.versionAtLeast config.system.stateVersion "17.03";
+        internal = true;
+        description = ''
+          Whether the EC2 instance is a HVM instance.
+        '';
+      };
+      efi = lib.mkOption {
+        default = pkgs.stdenv.hostPlatform.isAarch64;
+        internal = true;
+        description = ''
+          Whether the EC2 instance is using EFI.
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/anbox.nix b/nixpkgs/nixos/modules/virtualisation/anbox.nix
new file mode 100644
index 000000000000..da5df3580734
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/anbox.nix
@@ -0,0 +1,139 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.anbox;
+  kernelPackages = config.boot.kernelPackages;
+  addrOpts = v: addr: pref: name: {
+    address = mkOption {
+      default = addr;
+      type = types.str;
+      description = ''
+        IPv${toString v} ${name} address.
+      '';
+    };
+
+    prefixLength = mkOption {
+      default = pref;
+      type = types.addCheck types.int (n: n >= 0 && n <= (if v == 4 then 32 else 128));
+      description = ''
+        Subnet mask of the ${name} address, specified as the number of
+        bits in the prefix (<literal>${if v == 4 then "24" else "64"}</literal>).
+      '';
+    };
+  };
+
+in
+
+{
+
+  options.virtualisation.anbox = {
+
+    enable = mkEnableOption "Anbox";
+
+    image = mkOption {
+      default = pkgs.anbox.image;
+      example = literalExample "pkgs.anbox.image";
+      type = types.package;
+      description = ''
+        Base android image for Anbox.
+      '';
+    };
+
+    extraInit = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Extra shell commands to be run inside the container image during init.
+      '';
+    };
+
+    ipv4 = {
+      container = addrOpts 4 "192.168.250.2" 24 "Container";
+      gateway = addrOpts 4 "192.168.250.1" 24 "Host";
+
+      dns = mkOption {
+        default = "1.1.1.1";
+        type = types.str;
+        description = ''
+          Container DNS server.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = singleton {
+      assertion = versionAtLeast (getVersion config.boot.kernelPackages.kernel) "4.18";
+      message = "Anbox needs user namespace support to work properly";
+    };
+
+    environment.systemPackages = with pkgs; [ anbox ];
+
+    boot.kernelModules = [ "ashmem_linux" "binder_linux" ];
+    boot.extraModulePackages = [ kernelPackages.anbox ];
+
+    services.udev.extraRules = ''
+      KERNEL=="ashmem", NAME="%k", MODE="0666"
+      KERNEL=="binder*", NAME="%k", MODE="0666"
+    '';
+
+    virtualisation.lxc.enable = true;
+    networking.bridges.anbox0.interfaces = [];
+    networking.interfaces.anbox0.ipv4.addresses = [ cfg.ipv4.gateway ];
+
+    networking.nat = {
+      enable = true;
+      internalInterfaces = [ "anbox0" ];
+    };
+
+    systemd.services.anbox-container-manager = let
+      anboxloc = "/var/lib/anbox";
+    in {
+      description = "Anbox Container Management Daemon";
+
+      environment.XDG_RUNTIME_DIR="${anboxloc}";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+      preStart = let
+        initsh = pkgs.writeText "nixos-init" (''
+          #!/system/bin/sh
+          setprop nixos.version ${config.system.nixos.version}
+
+          # we don't have radio
+          setprop ro.radio.noril yes
+          stop ril-daemon
+
+          # speed up boot
+          setprop debug.sf.nobootanimation 1
+        '' + cfg.extraInit);
+        initshloc = "${anboxloc}/rootfs-overlay/system/etc/init.goldfish.sh";
+      in ''
+        mkdir -p ${anboxloc}
+        mkdir -p $(dirname ${initshloc})
+        [ -f ${initshloc} ] && rm ${initshloc}
+        cp ${initsh} ${initshloc}
+        chown 100000:100000 ${initshloc}
+        chmod +x ${initshloc}
+      '';
+
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.anbox}/bin/anbox container-manager \
+            --data-path=${anboxloc} \
+            --android-image=${cfg.image} \
+            --container-network-address=${cfg.ipv4.container.address} \
+            --container-network-gateway=${cfg.ipv4.gateway.address} \
+            --container-network-dns-servers=${cfg.ipv4.dns} \
+            --use-rootfs-overlay \
+            --privileged
+        '';
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-agent-entropy.patch b/nixpkgs/nixos/modules/virtualisation/azure-agent-entropy.patch
new file mode 100644
index 000000000000..2a7ad08a4afc
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-agent-entropy.patch
@@ -0,0 +1,17 @@
+--- a/waagent	2016-03-12 09:58:15.728088851 +0200
++++ a/waagent	2016-03-12 09:58:43.572680025 +0200
+@@ -6173,10 +6173,10 @@
+             Log("MAC  address: " + ":".join(["%02X" % Ord(a) for a in mac]))
+         
+         # Consume Entropy in ACPI table provided by Hyper-V
+-        try:
+-            SetFileContents("/dev/random", GetFileContents("/sys/firmware/acpi/tables/OEM0"))
+-        except:
+-            pass
++        #try:
++        #    SetFileContents("/dev/random", GetFileContents("/sys/firmware/acpi/tables/OEM0"))
++        #except:
++        #    pass
+ 
+         Log("Probing for Azure environment.")
+         self.Endpoint = self.DoDhcpWork()
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-agent.nix b/nixpkgs/nixos/modules/virtualisation/azure-agent.nix
new file mode 100644
index 000000000000..e85482af8392
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-agent.nix
@@ -0,0 +1,198 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.azure.agent;
+
+  waagent = with pkgs; stdenv.mkDerivation rec {
+    name = "waagent-2.0";
+    src = pkgs.fetchFromGitHub {
+      owner = "Azure";
+      repo = "WALinuxAgent";
+      rev = "1b3a8407a95344d9d12a2a377f64140975f1e8e4";
+      sha256 = "10byzvmpgrmr4d5mdn2kq04aapqb3sgr1admk13wjmy5cd6bwd2x";
+    };
+
+    patches = [ ./azure-agent-entropy.patch ];
+
+    buildInputs = [ makeWrapper python pythonPackages.wrapPython ];
+    runtimeDeps = [ findutils gnugrep gawk coreutils openssl openssh
+                    nettools # for hostname
+                    procps # for pidof
+                    shadow # for useradd, usermod
+                    utillinux # for (u)mount, fdisk, sfdisk, mkswap
+                    parted
+                  ];
+    pythonPath = [ pythonPackages.pyasn1 ];
+
+    configurePhase = false;
+    buildPhase = false;
+
+    installPhase = ''
+      substituteInPlace config/99-azure-product-uuid.rules \
+          --replace /bin/chmod "${coreutils}/bin/chmod"
+      mkdir -p $out/lib/udev/rules.d
+      cp config/*.rules $out/lib/udev/rules.d
+
+      mkdir -p $out/bin
+      cp waagent $out/bin/
+      chmod +x $out/bin/waagent
+
+      wrapProgram "$out/bin/waagent" \
+          --prefix PYTHONPATH : $PYTHONPATH \
+          --prefix PATH : "${makeBinPath runtimeDeps}"
+    '';
+  };
+
+  provisionedHook = pkgs.writeScript "provisioned-hook" ''
+    #!${pkgs.runtimeShell}
+    /run/current-system/systemd/bin/systemctl start provisioned.target
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options.virtualisation.azure.agent = {
+    enable = mkOption {
+      default = false;
+      description = "Whether to enable the Windows Azure Linux Agent.";
+    };
+    verboseLogging = mkOption {
+      default = false;
+      description = "Whether to enable verbose logging.";
+    };
+    mountResourceDisk = mkOption {
+      default = true;
+      description = "Whether the agent should format (ext4) and mount the resource disk to /mnt/resource.";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64;
+      message = "Azure not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    } {
+      assertion = config.networking.networkmanager.enable == false;
+      message = "Windows Azure Linux Agent is not compatible with NetworkManager";
+    } ];
+
+    boot.initrd.kernelModules = [ "ata_piix" ];
+    networking.firewall.allowedUDPPorts = [ 68 ];
+
+
+    environment.etc."waagent.conf".text = ''
+        #
+        # Windows Azure Linux Agent Configuration
+        #
+
+        Role.StateConsumer=${provisionedHook}
+
+        # Enable instance creation
+        Provisioning.Enabled=y
+
+        # Password authentication for root account will be unavailable.
+        Provisioning.DeleteRootPassword=n
+
+        # Generate fresh host key pair.
+        Provisioning.RegenerateSshHostKeyPair=n
+
+        # Supported values are "rsa", "dsa" and "ecdsa".
+        Provisioning.SshHostKeyPairType=ed25519
+
+        # Monitor host name changes and publish changes via DHCP requests.
+        Provisioning.MonitorHostName=y
+
+        # Decode CustomData from Base64.
+        Provisioning.DecodeCustomData=n
+
+        # Execute CustomData after provisioning.
+        Provisioning.ExecuteCustomData=n
+
+        # Format if unformatted. If 'n', resource disk will not be mounted.
+        ResourceDisk.Format=${if cfg.mountResourceDisk then "y" else "n"}
+
+        # File system on the resource disk
+        # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here.
+        ResourceDisk.Filesystem=ext4
+
+        # Mount point for the resource disk
+        ResourceDisk.MountPoint=/mnt/resource
+
+        # Respond to load balancer probes if requested by Windows Azure.
+        LBProbeResponder=y
+
+        # Enable logging to serial console (y|n)
+        # When stdout is not enough...
+        # 'y' if not set
+        Logs.Console=y
+
+        # Enable verbose logging (y|n)
+        Logs.Verbose=${if cfg.verboseLogging then "y" else "n"}
+
+        # Root device timeout in seconds.
+        OS.RootDeviceScsiTimeout=300
+    '';
+
+    services.udev.packages = [ waagent ];
+
+    networking.dhcpcd.persistent = true;
+
+    services.logrotate = {
+      enable = true;
+      config = ''
+        /var/log/waagent.log {
+            compress
+            monthly
+            rotate 6
+            notifempty
+            missingok
+        }
+      '';
+    };
+
+    systemd.targets.provisioned = {
+      description = "Services Requiring Azure VM provisioning to have finished";
+    };
+
+  systemd.services.consume-hypervisor-entropy =
+    { description = "Consume entropy in ACPI table provided by Hyper-V";
+
+      wantedBy = [ "sshd.service" "waagent.service" ];
+      before = [ "sshd.service" "waagent.service" ];
+
+      path  = [ pkgs.coreutils ];
+      script =
+        ''
+          echo "Fetching entropy..."
+          cat /sys/firmware/acpi/tables/OEM0 > /dev/random
+        '';
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      serviceConfig.StandardError = "journal+console";
+      serviceConfig.StandardOutput = "journal+console";
+     };
+
+    systemd.services.waagent = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" "sshd.service" ];
+      wants = [ "network-online.target" ];
+
+      path = [ pkgs.e2fsprogs pkgs.bash ];
+      description = "Windows Azure Agent Service";
+      unitConfig.ConditionPathExists = "/etc/waagent.conf";
+      serviceConfig = {
+        ExecStart = "${waagent}/bin/waagent -daemon";
+        Type = "simple";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix b/nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix
new file mode 100644
index 000000000000..281be9a12318
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix
@@ -0,0 +1,3 @@
+{
+    "16.03" = "https://nixos.blob.core.windows.net/images/nixos-image-16.03.847.8688c17-x86_64-linux.vhd";
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-common.nix b/nixpkgs/nixos/modules/virtualisation/azure-common.nix
new file mode 100644
index 000000000000..8efa177e30d1
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-common.nix
@@ -0,0 +1,66 @@
+{ lib, pkgs, ... }:
+
+with lib;
+{
+  imports = [ ../profiles/headless.nix ];
+
+  require = [ ./azure-agent.nix ];
+  virtualisation.azure.agent.enable = true;
+
+  boot.kernelParams = [ "console=ttyS0" "earlyprintk=ttyS0" "rootdelay=300" "panic=1" "boot.panic_on_fail" ];
+  boot.initrd.kernelModules = [ "hv_vmbus" "hv_netvsc" "hv_utils" "hv_storvsc" ];
+
+  # Generate a GRUB menu.
+  boot.loader.grub.device = "/dev/sda";
+  boot.loader.grub.version = 2;
+  boot.loader.timeout = 0;
+
+  boot.growPartition = true;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  fileSystems."/".device = "/dev/disk/by-label/nixos";
+
+  # Allow root logins only using the SSH key that the user specified
+  # at instance creation time, ping client connections to avoid timeouts
+  services.openssh.enable = true;
+  services.openssh.permitRootLogin = "prohibit-password";
+  services.openssh.extraConfig = ''
+    ClientAliveInterval 180
+  '';
+
+  # Force getting the hostname from Azure
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  # sg_scan is needed to finalize disk removal on older kernels
+  environment.systemPackages = [ pkgs.cryptsetup pkgs.sg3_utils ];
+
+  networking.usePredictableInterfaceNames = false;
+
+  services.udev.extraRules = ''
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:0", ATTR{removable}=="0", SYMLINK+="disk/by-lun/0",
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:1", ATTR{removable}=="0", SYMLINK+="disk/by-lun/1",
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:2", ATTR{removable}=="0", SYMLINK+="disk/by-lun/2"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:3", ATTR{removable}=="0", SYMLINK+="disk/by-lun/3"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:4", ATTR{removable}=="0", SYMLINK+="disk/by-lun/4"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:5", ATTR{removable}=="0", SYMLINK+="disk/by-lun/5"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:6", ATTR{removable}=="0", SYMLINK+="disk/by-lun/6"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:7", ATTR{removable}=="0", SYMLINK+="disk/by-lun/7"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:8", ATTR{removable}=="0", SYMLINK+="disk/by-lun/8"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:9", ATTR{removable}=="0", SYMLINK+="disk/by-lun/9"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:10", ATTR{removable}=="0", SYMLINK+="disk/by-lun/10"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:11", ATTR{removable}=="0", SYMLINK+="disk/by-lun/11"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:12", ATTR{removable}=="0", SYMLINK+="disk/by-lun/12"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:13", ATTR{removable}=="0", SYMLINK+="disk/by-lun/13"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:14", ATTR{removable}=="0", SYMLINK+="disk/by-lun/14"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:15", ATTR{removable}=="0", SYMLINK+="disk/by-lun/15"
+
+  '';
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-config-user.nix b/nixpkgs/nixos/modules/virtualisation/azure-config-user.nix
new file mode 100644
index 000000000000..267ba50ae025
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-config-user.nix
@@ -0,0 +1,12 @@
+{ modulesPath, ... }:
+
+{
+  # To build the configuration or use nix-env, you need to run
+  # either nixos-rebuild --upgrade or nix-channel --update
+  # to fetch the nixos channel.
+
+  # This configures everything but bootstrap services,
+  # which only need to be run once and have already finished
+  # if you are able to see this comment.
+  imports = [ "${modulesPath}/virtualisation/azure-common.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-config.nix b/nixpkgs/nixos/modules/virtualisation/azure-config.nix
new file mode 100644
index 000000000000..780bd1b78dce
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-config.nix
@@ -0,0 +1,5 @@
+{ modulesPath, ... }:
+
+{
+  imports = [ "${modulesPath}/virtualisation/azure-image.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-image.nix b/nixpkgs/nixos/modules/virtualisation/azure-image.nix
new file mode 100644
index 000000000000..21fd58e5c902
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-image.nix
@@ -0,0 +1,70 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.azureImage;
+in
+{
+  imports = [ ./azure-common.nix ];
+  
+  options = {
+    virtualisation.azureImage.diskSize = mkOption {
+      type = with types; int;
+      default = 2048;
+      description = ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+  };
+  config = {
+    system.build.azureImage = import ../../lib/make-disk-image.nix {
+      name = "azure-image";
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o subformat=fixed,force_size -O vpc $diskImage $out/disk.vhd
+        rm $diskImage
+      '';
+      configFile = ./azure-config-user.nix;
+      format = "raw";
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+
+    # Azure metadata is available as a CD-ROM drive.
+    fileSystems."/metadata".device = "/dev/sr0";
+
+    systemd.services.fetch-ssh-keys = {
+      description = "Fetch host keys and authorized_keys for root user";
+
+      wantedBy = [ "sshd.service" "waagent.service" ];
+      before = [ "sshd.service" "waagent.service" ];
+
+      path  = [ pkgs.coreutils ];
+      script =
+        ''
+          eval "$(cat /metadata/CustomData.bin)"
+          if ! [ -z "$ssh_host_ecdsa_key" ]; then
+            echo "downloaded ssh_host_ecdsa_key"
+            echo "$ssh_host_ecdsa_key" > /etc/ssh/ssh_host_ed25519_key
+            chmod 600 /etc/ssh/ssh_host_ed25519_key
+          fi
+
+          if ! [ -z "$ssh_host_ecdsa_key_pub" ]; then
+            echo "downloaded ssh_host_ecdsa_key_pub"
+            echo "$ssh_host_ecdsa_key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
+            chmod 644 /etc/ssh/ssh_host_ed25519_key.pub
+          fi
+
+          if ! [ -z "$ssh_root_auth_key" ]; then
+            echo "downloaded ssh_root_auth_key"
+            mkdir -m 0700 -p /root/.ssh
+            echo "$ssh_root_auth_key" > /root/.ssh/authorized_keys
+            chmod 600 /root/.ssh/authorized_keys
+          fi
+        '';
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      serviceConfig.StandardError = "journal+console";
+      serviceConfig.StandardOutput = "journal+console";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-images.nix b/nixpkgs/nixos/modules/virtualisation/azure-images.nix
new file mode 100644
index 000000000000..22c82fc14f65
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-images.nix
@@ -0,0 +1,5 @@
+let self = {
+  "16.09" = "https://nixos.blob.core.windows.net/images/nixos-image-16.09.1694.019dcc3-x86_64-linux.vhd";
+
+  latest = self."16.09";
+}; in self
diff --git a/nixpkgs/nixos/modules/virtualisation/brightbox-config.nix b/nixpkgs/nixos/modules/virtualisation/brightbox-config.nix
new file mode 100644
index 000000000000..0a018e4cd695
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/brightbox-config.nix
@@ -0,0 +1,5 @@
+{ modulesPath, ... }:
+
+{
+  imports = [ "${modulesPath}/virtualisation/brightbox-image.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/brightbox-image.nix b/nixpkgs/nixos/modules/virtualisation/brightbox-image.nix
new file mode 100644
index 000000000000..d0efbcc808aa
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/brightbox-image.nix
@@ -0,0 +1,166 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  diskSize = "20G";
+in
+{
+  imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ];
+
+  system.build.brightboxImage =
+    pkgs.vmTools.runInLinuxVM (
+      pkgs.runCommand "brightbox-image"
+        { preVM =
+            ''
+              mkdir $out
+              diskImage=$out/$diskImageBase
+              truncate $diskImage --size ${diskSize}
+              mv closure xchg/
+            '';
+
+          postVM =
+            ''
+              PATH=$PATH:${lib.makeBinPath [ pkgs.gnutar pkgs.gzip ]}
+              pushd $out
+              ${pkgs.qemu_kvm}/bin/qemu-img convert -c -O qcow2 $diskImageBase nixos.qcow2
+              rm $diskImageBase
+              popd
+            '';
+          diskImageBase = "nixos-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.raw";
+          buildInputs = [ pkgs.utillinux pkgs.perl ];
+          exportReferencesGraph =
+            [ "closure" config.system.build.toplevel ];
+        }
+        ''
+          # Create partition table
+          ${pkgs.parted}/sbin/parted --script /dev/vda mklabel msdos
+          ${pkgs.parted}/sbin/parted --script /dev/vda mkpart primary ext4 1 ${diskSize}
+          ${pkgs.parted}/sbin/parted --script /dev/vda print
+          . /sys/class/block/vda1/uevent
+          mknod /dev/vda1 b $MAJOR $MINOR
+
+          # Create an empty filesystem and mount it.
+          ${pkgs.e2fsprogs}/sbin/mkfs.ext4 -L nixos /dev/vda1
+          ${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
+
+          mkdir /mnt
+          mount /dev/vda1 /mnt
+
+          # The initrd expects these directories to exist.
+          mkdir /mnt/dev /mnt/proc /mnt/sys
+
+          mount --bind /proc /mnt/proc
+          mount --bind /dev /mnt/dev
+          mount --bind /sys /mnt/sys
+
+          # Copy all paths in the closure to the filesystem.
+          storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
+
+          mkdir -p /mnt/nix/store
+          echo "copying everything (will take a while)..."
+          cp -prd $storePaths /mnt/nix/store/
+
+          # Register the paths in the Nix database.
+          printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
+              chroot /mnt ${config.nix.package.out}/bin/nix-store --load-db --option build-users-group ""
+
+          # Create the system profile to allow nixos-rebuild to work.
+          chroot /mnt ${config.nix.package.out}/bin/nix-env \
+              -p /nix/var/nix/profiles/system --set ${config.system.build.toplevel} \
+              --option build-users-group ""
+
+          # `nixos-rebuild' requires an /etc/NIXOS.
+          mkdir -p /mnt/etc
+          touch /mnt/etc/NIXOS
+
+          # `switch-to-configuration' requires a /bin/sh
+          mkdir -p /mnt/bin
+          ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
+
+          # Install a configuration.nix.
+          mkdir -p /mnt/etc/nixos /mnt/boot/grub
+          cp ${./brightbox-config.nix} /mnt/etc/nixos/configuration.nix
+
+          # Generate the GRUB menu.
+          ln -s vda /dev/sda
+          chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
+
+          umount /mnt/proc /mnt/dev /mnt/sys
+          umount /mnt
+        ''
+    );
+
+  fileSystems."/".label = "nixos";
+
+  # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
+  boot.loader.grub.device = "/dev/vda";
+  boot.loader.timeout = 0;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  # Allow root logins only using the SSH key that the user specified
+  # at instance creation time.
+  services.openssh.enable = true;
+  services.openssh.permitRootLogin = "prohibit-password";
+
+  # Force getting the hostname from Google Compute.
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  environment.systemPackages = [ pkgs.cryptsetup ];
+
+  systemd.services.fetch-ec2-data =
+    { description = "Fetch EC2 Data";
+
+      wantedBy = [ "multi-user.target" "sshd.service" ];
+      before = [ "sshd.service" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      path = [ pkgs.wget pkgs.iproute ];
+
+      script =
+        ''
+          wget="wget -q --retry-connrefused -O -"
+
+          ${optionalString (config.networking.hostName == "") ''
+            echo "setting host name..."
+            ${pkgs.nettools}/bin/hostname $($wget http://169.254.169.254/latest/meta-data/hostname)
+          ''}
+
+          # Don't download the SSH key if it has already been injected
+          # into the image (a Nova feature).
+          if ! [ -e /root/.ssh/authorized_keys ]; then
+              echo "obtaining SSH key..."
+              mkdir -m 0700 -p /root/.ssh
+              $wget http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key > /root/key.pub
+              if [ $? -eq 0 -a -e /root/key.pub ]; then
+                  if ! grep -q -f /root/key.pub /root/.ssh/authorized_keys; then
+                      cat /root/key.pub >> /root/.ssh/authorized_keys
+                      echo "new key added to authorized_keys"
+                  fi
+                  chmod 600 /root/.ssh/authorized_keys
+                  rm -f /root/key.pub
+              fi
+          fi
+
+          # Extract the intended SSH host key for this machine from
+          # the supplied user data, if available.  Otherwise sshd will
+          # generate one normally.
+          $wget http://169.254.169.254/2011-01-01/user-data > /root/user-data || true
+          key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' /root/user-data)"
+          key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' /root/user-data)"
+          if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
+              mkdir -m 0755 -p /etc/ssh
+              (umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
+              echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
+          fi
+        '';
+
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+    };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix b/nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix
new file mode 100644
index 000000000000..78afebdc5dd3
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix
@@ -0,0 +1,40 @@
+{ lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=tty0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      permitRootLogin = "prohibit-password";
+    };
+
+    # Cloud-init configuration.
+    services.cloud-init.enable = true;
+    # Wget is needed for setting password. This is of little use as
+    # root password login is disabled above.
+    environment.systemPackages = [ pkgs.wget ];
+    # Only enable CloudStack datasource for faster boot speed.
+    environment.etc."cloud/cloud.cfg.d/99_cloudstack.cfg".text = ''
+      datasource:
+        CloudStack: {}
+        None: {}
+      datasource_list: ["CloudStack"]
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/container-config.nix b/nixpkgs/nixos/modules/virtualisation/container-config.nix
new file mode 100644
index 000000000000..6ff6bdd30c20
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/container-config.nix
@@ -0,0 +1,31 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  config = mkIf config.boot.isContainer {
+
+    # Disable some features that are not useful in a container.
+    nix.optimise.automatic = mkDefault false; # the store is host managed
+    services.udisks2.enable = mkDefault false;
+    powerManagement.enable = mkDefault false;
+    documentation.nixos.enable = mkDefault false;
+
+    networking.useHostResolvConf = mkDefault true;
+
+    # Containers should be light-weight, so start sshd on demand.
+    services.openssh.startWhenNeeded = mkDefault true;
+
+    # Shut up warnings about not having a boot loader.
+    system.build.installBootLoader = "${pkgs.coreutils}/bin/true";
+
+    # Not supported in systemd-nspawn containers.
+    security.audit.enable = false;
+
+    # Use the host's nix-daemon.
+    environment.variables.NIX_REMOTE = "daemon";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/containers.nix b/nixpkgs/nixos/modules/virtualisation/containers.nix
new file mode 100644
index 000000000000..7d184575640b
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/containers.nix
@@ -0,0 +1,125 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.virtualisation.containers;
+
+  inherit (lib) mkOption types;
+
+  # Once https://github.com/NixOS/nixpkgs/pull/75584 is merged we can use the TOML generator
+  toTOML = name: value: pkgs.runCommandNoCC name {
+    nativeBuildInputs = [ pkgs.remarshal ];
+    value = builtins.toJSON value;
+    passAsFile = [ "value" ];
+  } ''
+    json2toml "$valuePath" "$out"
+  '';
+
+  # Copy configuration files to avoid having the entire sources in the system closure
+  copyFile = filePath: pkgs.runCommandNoCC (builtins.unsafeDiscardStringContext (builtins.baseNameOf filePath)) {} ''
+    cp ${filePath} $out
+  '';
+in
+{
+  meta = {
+    maintainers = [] ++ lib.teams.podman.members;
+  };
+
+  options.virtualisation.containers = {
+
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option enables the common /etc/containers configuration module.
+        '';
+      };
+
+    registries = {
+      search = mkOption {
+        type = types.listOf types.str;
+        default = [ "docker.io" "quay.io" ];
+        description = ''
+          List of repositories to search.
+        '';
+      };
+
+      insecure = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = ''
+          List of insecure repositories.
+        '';
+      };
+
+      block = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = ''
+          List of blocked repositories.
+        '';
+      };
+    };
+
+    policy = mkOption {
+      default = {};
+      type = types.attrs;
+      example = lib.literalExample ''
+        {
+          default = [ { type = "insecureAcceptAnything"; } ];
+          transports = {
+            docker-daemon = {
+              "" = [ { type = "insecureAcceptAnything"; } ];
+            };
+          };
+        }
+      '';
+      description = ''
+        Signature verification policy file.
+        If this option is empty the default policy file from
+        <literal>skopeo</literal> will be used.
+      '';
+    };
+
+    users = mkOption {
+      default = [];
+      type = types.listOf types.str;
+      description = ''
+        List of users to set up subuid/subgid mappings for.
+        This is a requirement for running rootless containers.
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    environment.etc."containers/registries.conf".source = toTOML "registries.conf" {
+      registries = lib.mapAttrs (n: v: { registries = v; }) cfg.registries;
+    };
+
+    users.extraUsers = builtins.listToAttrs (
+      (
+        builtins.foldl' (
+          acc: user: {
+            values = acc.values ++ [
+              {
+                name = user;
+                value = {
+                  subUidRanges = [ { startUid = acc.offset; count = 65536; } ];
+                  subGidRanges = [ { startGid = acc.offset; count = 65536; } ];
+                };
+              }
+            ];
+            offset = acc.offset + 65536;
+          }
+        )
+        { values = []; offset = 100000; } (lib.unique cfg.users)
+      ).values
+    );
+
+    environment.etc."containers/policy.json".source =
+      if cfg.policy != {} then pkgs.writeText "policy.json" (builtins.toJSON cfg.policy)
+      else copyFile "${pkgs.skopeo.src}/default-policy.json";
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/cri-o.nix b/nixpkgs/nixos/modules/virtualisation/cri-o.nix
new file mode 100644
index 000000000000..f267c97b1788
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/cri-o.nix
@@ -0,0 +1,136 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.cri-o;
+
+  crioPackage = (pkgs.cri-o.override { inherit (cfg) extraPackages; });
+
+  # Copy configuration files to avoid having the entire sources in the system closure
+  copyFile = filePath: pkgs.runCommandNoCC (builtins.unsafeDiscardStringContext (builtins.baseNameOf filePath)) {} ''
+    cp ${filePath} $out
+  '';
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "virtualisation" "cri-o" "registries" ] [ "virtualisation" "containers" "registries" "search" ])
+  ];
+
+  meta = {
+    maintainers = lib.teams.podman.members;
+  };
+
+  options.virtualisation.cri-o = {
+    enable = mkEnableOption "Container Runtime Interface for OCI (CRI-O)";
+
+    storageDriver = mkOption {
+      type = types.enum [ "btrfs" "overlay" "vfs" ];
+      default = "overlay";
+      description = "Storage driver to be used";
+    };
+
+    logLevel = mkOption {
+      type = types.enum [ "trace" "debug" "info" "warn" "error" "fatal" ];
+      default = "info";
+      description = "Log level to be used";
+    };
+
+    pauseImage = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Override the default pause image for pod sandboxes";
+      example = [ "k8s.gcr.io/pause:3.2" ];
+    };
+
+    pauseCommand = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Override the default pause command";
+      example = [ "/pause" ];
+    };
+
+    runtime = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Override the default runtime";
+      example = [ "crun" ];
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = [ ];
+      example = lib.literalExample ''
+        [
+          pkgs.gvisor
+        ]
+      '';
+      description = ''
+        Extra packages to be installed in the CRI-O wrapper.
+      '';
+    };
+
+    package = lib.mkOption {
+      type = types.package;
+      default = crioPackage;
+      internal = true;
+      description = ''
+        The final CRI-O package (including extra packages).
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package pkgs.cri-tools ];
+
+    environment.etc."crictl.yaml".source = copyFile "${pkgs.cri-o-unwrapped.src}/crictl.yaml";
+
+    environment.etc."crio/crio.conf".text = ''
+      [crio]
+      storage_driver = "${cfg.storageDriver}"
+
+      [crio.image]
+      ${optionalString (cfg.pauseImage != null) ''pause_image = "${cfg.pauseImage}"''}
+      ${optionalString (cfg.pauseCommand != null) ''pause_command = "${cfg.pauseCommand}"''}
+
+      [crio.network]
+      plugin_dirs = ["${pkgs.cni-plugins}/bin/"]
+
+      [crio.runtime]
+      cgroup_manager = "systemd"
+      log_level = "${cfg.logLevel}"
+      manage_ns_lifecycle = true
+
+      ${optionalString (cfg.runtime != null) ''
+      default_runtime = "${cfg.runtime}"
+      [crio.runtime.runtimes]
+      [crio.runtime.runtimes.${cfg.runtime}]
+      ''}
+    '';
+
+    environment.etc."cni/net.d/10-crio-bridge.conf".source = copyFile "${pkgs.cri-o-unwrapped.src}/contrib/cni/10-crio-bridge.conf";
+
+    # Enable common /etc/containers configuration
+    virtualisation.containers.enable = true;
+
+    systemd.services.crio = {
+      description = "Container Runtime Interface for OCI (CRI-O)";
+      documentation = [ "https://github.com/cri-o/cri-o" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ cfg.package ];
+      serviceConfig = {
+        Type = "notify";
+        ExecStart = "${cfg.package}/bin/crio";
+        ExecReload = "/bin/kill -s HUP $MAINPID";
+        TasksMax = "infinity";
+        LimitNOFILE = "1048576";
+        LimitNPROC = "1048576";
+        LimitCORE = "infinity";
+        OOMScoreAdjust = "-999";
+        TimeoutStartSec = "0";
+        Restart = "on-abnormal";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/digital-ocean-config.nix b/nixpkgs/nixos/modules/virtualisation/digital-ocean-config.nix
new file mode 100644
index 000000000000..88cb0cd450e8
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/digital-ocean-config.nix
@@ -0,0 +1,197 @@
+{ config, pkgs, lib, modulesPath, ... }:
+with lib;
+{
+  imports = [
+    (modulesPath + "/profiles/qemu-guest.nix")
+    (modulesPath + "/virtualisation/digital-ocean-init.nix")
+  ];
+  options.virtualisation.digitalOcean = with types; {
+    setRootPassword = mkOption {
+      type = bool;
+      default = false;
+      example = true;
+      description = "Whether to set the root password from the Digital Ocean metadata";
+    };
+    setSshKeys = mkOption {
+      type = bool;
+      default = true;
+      example = true;
+      description = "Whether to fetch ssh keys from Digital Ocean";
+    };
+    seedEntropy = mkOption {
+      type = bool;
+      default = true;
+      example = true;
+      description = "Whether to run the kernel RNG entropy seeding script from the Digital Ocean vendor data";
+    };
+  };
+  config =
+    let
+      cfg = config.virtualisation.digitalOcean;
+      hostName = config.networking.hostName;
+      doMetadataFile = "/run/do-metadata/v1.json";
+    in mkMerge [{
+      fileSystems."/" = {
+        device = "/dev/disk/by-label/nixos";
+        autoResize = true;
+        fsType = "ext4";
+      };
+      boot = {
+        growPartition = true;
+        kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
+        initrd.kernelModules = [ "virtio_scsi" ];
+        kernelModules = [ "virtio_pci" "virtio_net" ];
+        loader = {
+          grub.device = "/dev/vda";
+          timeout = 0;
+          grub.configurationLimit = 0;
+        };
+      };
+      services.openssh = {
+        enable = mkDefault true;
+        passwordAuthentication = mkDefault false;
+      };
+      services.do-agent.enable = mkDefault true;
+      networking = {
+        hostName = mkDefault ""; # use Digital Ocean metadata server
+      };
+
+      /* Check for and wait for the metadata server to become reachable.
+       * This serves as a dependency for all the other metadata services. */
+      systemd.services.digitalocean-metadata = {
+        path = [ pkgs.curl ];
+        description = "Get host metadata provided by Digitalocean";
+        script = ''
+          set -eu
+          DO_DELAY_ATTEMPTS=0
+          while ! curl -fsSL -o $RUNTIME_DIRECTORY/v1.json http://169.254.169.254/metadata/v1.json; do
+            DO_DELAY_ATTEMPTS=$((DO_DELAY_ATTEMPTS + 1))
+            if (( $DO_DELAY_ATTEMPTS >= $DO_DELAY_ATTEMPTS_MAX )); then
+              echo "giving up"
+              exit 1
+            fi
+
+            echo "metadata unavailable, trying again in 1s..."
+            sleep 1
+          done
+          chmod 600 $RUNTIME_DIRECTORY/v1.json
+          '';
+        environment = {
+          DO_DELAY_ATTEMPTS_MAX = "10";
+        };
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+          RuntimeDirectory = "do-metadata";
+          RuntimeDirectoryPreserve = "yes";
+        };
+        unitConfig = {
+          ConditionPathExists = "!${doMetadataFile}";
+          After = [ "network-pre.target" ] ++
+            optional config.networking.dhcpcd.enable "dhcpcd.service" ++
+            optional config.systemd.network.enable "systemd-networkd.service";
+        };
+      };
+
+      /* Fetch the root password from the digital ocean metadata.
+       * There is no specific route for this, so we use jq to get
+       * it from the One Big JSON metadata blob */
+      systemd.services.digitalocean-set-root-password = mkIf cfg.setRootPassword {
+        path = [ pkgs.shadow pkgs.jq ];
+        description = "Set root password provided by Digitalocean";
+        wantedBy = [ "multi-user.target" ];
+        script = ''
+          set -eo pipefail
+          ROOT_PASSWORD=$(jq -er '.auth_key' ${doMetadataFile})
+          echo "root:$ROOT_PASSWORD" | chpasswd
+          mkdir -p /etc/do-metadata/set-root-password
+          '';
+        unitConfig = {
+          ConditionPathExists = "!/etc/do-metadata/set-root-password";
+          Before = optional config.services.openssh.enable "sshd.service";
+          After = [ "digitalocean-metadata.service" ];
+          Requires = [ "digitalocean-metadata.service" ];
+        };
+        serviceConfig = {
+          Type = "oneshot";
+        };
+      };
+
+      /* Set the hostname from Digital Ocean, unless the user configured it in
+       * the NixOS configuration. The cached metadata file isn't used here
+       * because the hostname is a mutable part of the droplet. */
+      systemd.services.digitalocean-set-hostname = mkIf (hostName == "") {
+        path = [ pkgs.curl pkgs.nettools ];
+        description = "Set hostname provided by Digitalocean";
+        wantedBy = [ "network.target" ];
+        script = ''
+          set -e
+          DIGITALOCEAN_HOSTNAME=$(curl -fsSL http://169.254.169.254/metadata/v1/hostname)
+          hostname "$DIGITALOCEAN_HOSTNAME"
+          if [[ ! -e /etc/hostname || -w /etc/hostname ]]; then
+            printf "%s\n" "$DIGITALOCEAN_HOSTNAME" > /etc/hostname
+          fi
+        '';
+        unitConfig = {
+          Before = [ "network.target" ];
+          After = [ "digitalocean-metadata.service" ];
+          Wants = [ "digitalocean-metadata.service" ];
+        };
+        serviceConfig = {
+          Type = "oneshot";
+        };
+      };
+
+      /* Fetch the ssh keys for root from Digital Ocean */
+      systemd.services.digitalocean-ssh-keys = mkIf cfg.setSshKeys {
+        description = "Set root ssh keys provided by Digital Ocean";
+        wantedBy = [ "multi-user.target" ];
+        path = [ pkgs.jq ];
+        script = ''
+          set -e
+          mkdir -m 0700 -p /root/.ssh
+          jq -er '.public_keys[]' ${doMetadataFile} > /root/.ssh/authorized_keys
+          chmod 600 /root/.ssh/authorized_keys
+        '';
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+        unitConfig = {
+          ConditionPathExists = "!/root/.ssh/authorized_keys";
+          Before = optional config.services.openssh.enable "sshd.service";
+          After = [ "digitalocean-metadata.service" ];
+          Requires = [ "digitalocean-metadata.service" ];
+        };
+      };
+
+      /* Initialize the RNG by running the entropy-seed script from the
+       * Digital Ocean metadata
+       */
+      systemd.services.digitalocean-entropy-seed = mkIf cfg.seedEntropy {
+        description = "Run the kernel RNG entropy seeding script from the Digital Ocean vendor data";
+        wantedBy = [ "network.target" ];
+        path = [ pkgs.jq pkgs.mpack ];
+        script = ''
+          set -eo pipefail
+          TEMPDIR=$(mktemp -d)
+          jq -er '.vendor_data' ${doMetadataFile} | munpack -tC $TEMPDIR
+          ENTROPY_SEED=$(grep -rl "DigitalOcean Entropy Seed script" $TEMPDIR)
+          ${pkgs.runtimeShell} $ENTROPY_SEED
+          rm -rf $TEMPDIR
+          '';
+        unitConfig = {
+          Before = [ "network.target" ];
+          After = [ "digitalocean-metadata.service" ];
+          Requires = [ "digitalocean-metadata.service" ];
+        };
+        serviceConfig = {
+          Type = "oneshot";
+        };
+      };
+
+    }
+  ];
+  meta.maintainers = with maintainers; [ arianvp eamsden ];
+}
+
diff --git a/nixpkgs/nixos/modules/virtualisation/digital-ocean-image.nix b/nixpkgs/nixos/modules/virtualisation/digital-ocean-image.nix
new file mode 100644
index 000000000000..b582e235d435
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/digital-ocean-image.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.digitalOceanImage;
+in
+{
+
+  imports = [ ./digital-ocean-config.nix ];
+
+  options = {
+    virtualisation.digitalOceanImage.diskSize = mkOption {
+      type = with types; int;
+      default = 4096;
+      description = ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+
+    virtualisation.digitalOceanImage.configFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = ''
+        A path to a configuration file which will be placed at
+        <literal>/etc/nixos/configuration.nix</literal> and be used when switching
+        to a new configuration. If set to <literal>null</literal>, a default
+        configuration is used that imports
+        <literal>(modulesPath + "/virtualisation/digital-ocean-config.nix")</literal>.
+      '';
+    };
+
+    virtualisation.digitalOceanImage.compressionMethod = mkOption {
+      type = types.enum [ "gzip" "bzip2" ];
+      default = "gzip";
+      example = "bzip2";
+      description = ''
+        Disk image compression method. Choose bzip2 to generate smaller images that
+        take longer to generate but will consume less metered storage space on your
+        Digital Ocean account.
+      '';
+    };
+  };
+
+  #### implementation
+  config = {
+
+    system.build.digitalOceanImage = import ../../lib/make-disk-image.nix {
+      name = "digital-ocean-image";
+      format = "qcow2";
+      postVM = let
+        compress = {
+          "gzip" = "${pkgs.gzip}/bin/gzip";
+          "bzip2" = "${pkgs.bzip2}/bin/bzip2";
+        }.${cfg.compressionMethod};
+      in ''
+        ${compress} $diskImage
+      '';
+      configFile = if cfg.configFile == null
+        then config.virtualisation.digitalOcean.defaultConfigFile
+        else cfg.configFile;
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ arianvp eamsden ];
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/digital-ocean-init.nix b/nixpkgs/nixos/modules/virtualisation/digital-ocean-init.nix
new file mode 100644
index 000000000000..02f4de009fa8
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/digital-ocean-init.nix
@@ -0,0 +1,95 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.virtualisation.digitalOcean;
+  defaultConfigFile = pkgs.writeText "digitalocean-configuration.nix" ''
+    { modulesPath, lib, ... }:
+    {
+      imports = lib.optional (builtins.pathExists ./do-userdata.nix) ./do-userdata.nix ++ [
+        (modulesPath + "/virtualisation/digital-ocean-config.nix")
+      ];
+    }
+  '';
+in {
+  options.virtualisation.digitalOcean.rebuildFromUserData = mkOption {
+    type = types.bool;
+    default = true;
+    example = true;
+    description = "Whether to reconfigure the system from Digital Ocean user data";
+  };
+  options.virtualisation.digitalOcean.defaultConfigFile = mkOption {
+    type = types.path;
+    default = defaultConfigFile;
+    defaultText = ''
+      The default configuration imports user-data if applicable and
+      <literal>(modulesPath + "/virtualisation/digital-ocean-config.nix")</literal>.
+    '';
+    description = ''
+      A path to a configuration file which will be placed at
+      <literal>/etc/nixos/configuration.nix</literal> and be used when switching to
+      a new configuration.
+    '';
+  };
+
+  config = {
+    systemd.services.digitalocean-init = mkIf cfg.rebuildFromUserData {
+      description = "Reconfigure the system from Digital Ocean userdata on startup";
+      wantedBy = [ "network-online.target" ];
+      unitConfig = {
+        ConditionPathExists = "!/etc/nixos/do-userdata.nix";
+        After = [ "digitalocean-metadata.service" "network-online.target" ];
+        Requires = [ "digitalocean-metadata.service" ];
+        X-StopOnRemoval = false;
+      };
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+      restartIfChanged = false;
+      path = [ pkgs.jq pkgs.gnused pkgs.gnugrep pkgs.systemd config.nix.package config.system.build.nixos-rebuild ];
+      environment = {
+        HOME = "/root";
+        NIX_PATH = concatStringsSep ":" [
+          "/nix/var/nix/profiles/per-user/root/channels/nixos"
+          "nixos-config=/etc/nixos/configuration.nix"
+          "/nix/var/nix/profiles/per-user/root/channels"
+        ];
+      };
+      script = ''
+        set -e
+        echo "attempting to fetch configuration from Digital Ocean user data..."
+        userData=$(mktemp)
+        if jq -er '.user_data' /run/do-metadata/v1.json > $userData; then
+          # If the user-data looks like it could be a nix expression,
+          # copy it over. Also, look for a magic three-hash comment and set
+          # that as the channel.
+          if nix-instantiate --parse $userData > /dev/null; then
+            channels="$(grep '^###' "$userData" | sed 's|###\s*||')"
+            printf "%s" "$channels" | while read channel; do
+              echo "writing channel: $channel"
+            done
+
+            if [[ -n "$channels" ]]; then
+              printf "%s" "$channels" > /root/.nix-channels
+              nix-channel --update
+            fi
+
+            echo "setting configuration from Digital Ocean user data"
+            cp "$userData" /etc/nixos/do-userdata.nix
+            if [[ ! -e /etc/nixos/configuration.nix ]]; then
+              install -m0644 ${cfg.defaultConfigFile} /etc/nixos/configuration.nix
+            fi
+          else
+            echo "user data does not appear to be a Nix expression; ignoring"
+            exit
+          fi
+
+          nixos-rebuild switch
+        else
+          echo "no user data is available"
+        fi
+        '';
+    };
+  };
+  meta.maintainers = with maintainers; [ arianvp eamsden ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/docker-image.nix b/nixpkgs/nixos/modules/virtualisation/docker-image.nix
new file mode 100644
index 000000000000..baac3a35a78e
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker-image.nix
@@ -0,0 +1,57 @@
+{ ... }:
+
+{
+  imports = [
+    ../profiles/docker-container.nix # FIXME, shouldn't include something from profiles/
+  ];
+
+  boot.postBootCommands =
+    ''
+      # Set virtualisation to docker
+      echo "docker" > /run/systemd/container
+    '';
+
+  # Iptables do not work in Docker.
+  networking.firewall.enable = false;
+
+  # Socket activated ssh presents problem in Docker.
+  services.openssh.startWhenNeeded = false;
+}
+
+# Example usage:
+#
+## default.nix
+# let
+#   nixos = import <nixpkgs/nixos> {
+#     configuration = ./configuration.nix;
+#     system = "x86_64-linux";
+#   };
+# in
+# nixos.config.system.build.tarball
+#
+## configuration.nix
+# { pkgs, config, lib, ... }:
+# {
+#   imports = [
+#     <nixpkgs/nixos/modules/virtualisation/docker-image.nix>
+#     <nixpkgs/nixos/modules/installer/cd-dvd/channel.nix>
+#   ];
+#
+#   documentation.doc.enable = false;
+#
+#   environment.systemPackages = with pkgs; [
+#     bashInteractive
+#     cacert
+#     nix
+#   ];
+# }
+#
+## Run
+# Build the tarball:
+# $ nix-build default.nix
+# Load into docker:
+# $ docker import result/tarball/nixos-system-*.tar.xz nixos-docker
+# Boots into systemd
+# $ docker run --privileged -it nixos-docker /init
+# Log into the container
+# $ docker exec -it <container-name> /run/current-system/sw/bin/bash
diff --git a/nixpkgs/nixos/modules/virtualisation/docker-preloader.nix b/nixpkgs/nixos/modules/virtualisation/docker-preloader.nix
new file mode 100644
index 000000000000..6ab83058dee1
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker-preloader.nix
@@ -0,0 +1,134 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+with builtins;
+
+let
+  cfg = config.virtualisation;
+
+  sanitizeImageName = image: replaceStrings ["/"] ["-"] image.imageName;
+  hash = drv: head (split "-" (baseNameOf drv.outPath));
+  # The label of an ext4 FS is limited to 16 bytes
+  labelFromImage = image: substring 0 16 (hash image);
+
+  # The Docker image is loaded and some files from /var/lib/docker/
+  # are written into a qcow image.
+  preload = image: pkgs.vmTools.runInLinuxVM (
+    pkgs.runCommand "docker-preload-image-${sanitizeImageName image}" {
+      buildInputs = with pkgs; [ docker e2fsprogs utillinux curl kmod ];
+      preVM = pkgs.vmTools.createEmptyImage {
+        size = cfg.dockerPreloader.qcowSize;
+        fullName = "docker-deamon-image.qcow2";
+      };
+    }
+    ''
+      mkfs.ext4 /dev/vda
+      e2label /dev/vda ${labelFromImage image}
+      mkdir -p /var/lib/docker
+      mount -t ext4 /dev/vda /var/lib/docker
+
+      modprobe overlay
+
+      # from https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
+      mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
+      cd /sys/fs/cgroup
+      for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
+        mkdir -p $sys
+        if ! mountpoint -q $sys; then
+          if ! mount -n -t cgroup -o $sys cgroup $sys; then
+            rmdir $sys || true
+          fi
+        fi
+      done
+
+      dockerd -H tcp://127.0.0.1:5555 -H unix:///var/run/docker.sock &
+
+      until $(curl --output /dev/null --silent --connect-timeout 2 http://127.0.0.1:5555); do
+        printf '.'
+        sleep 1
+      done
+
+      docker load -i ${image}
+
+      kill %1
+      find /var/lib/docker/ -maxdepth 1 -mindepth 1 -not -name "image" -not -name "overlay2" | xargs rm -rf
+    '');
+
+  preloadedImages = map preload cfg.dockerPreloader.images;
+
+in
+
+{
+  options.virtualisation.dockerPreloader = {
+    images = mkOption {
+      default = [ ];
+      type = types.listOf types.package;
+      description =
+      ''
+        A list of Docker images to preload (in the /var/lib/docker directory).
+      '';
+    };
+    qcowSize = mkOption {
+      default = 1024;
+      type = types.int;
+      description =
+      ''
+        The size (MB) of qcow files.
+      '';
+    };
+  };
+
+  config = mkIf (cfg.dockerPreloader.images != []) {
+    assertions = [{
+      # If docker.storageDriver is null, Docker choose the storage
+      # driver. So, in this case, we cannot be sure overlay2 is used.
+      assertion = cfg.docker.storageDriver == "overlay2"
+        || cfg.docker.storageDriver == "overlay"
+        || cfg.docker.storageDriver == null;
+      message = "The Docker image Preloader only works with overlay2 storage driver!";
+    }];
+
+    virtualisation.qemu.options =
+      map (path: "-drive if=virtio,file=${path}/disk-image.qcow2,readonly,media=cdrom,format=qcow2")
+      preloadedImages;
+
+
+    # All attached QCOW files are mounted and their contents are linked
+    # to /var/lib/docker/ in order to make image available.
+    systemd.services.docker-preloader = {
+      description = "Preloaded Docker images";
+      wantedBy = ["docker.service"];
+      after = ["network.target"];
+      path = with pkgs; [ mount rsync jq ];
+      script = ''
+        mkdir -p /var/lib/docker/overlay2/l /var/lib/docker/image/overlay2
+        echo '{}' > /tmp/repositories.json
+
+        for i in ${concatStringsSep " " (map labelFromImage cfg.dockerPreloader.images)}; do
+          mkdir -p /mnt/docker-images/$i
+
+          # The ext4 label is limited to 16 bytes
+          mount /dev/disk/by-label/$(echo $i | cut -c1-16) -o ro,noload /mnt/docker-images/$i
+
+          find /mnt/docker-images/$i/overlay2/ -maxdepth 1 -mindepth 1 -not -name l\
+             -exec ln -s '{}' /var/lib/docker/overlay2/ \;
+          cp -P /mnt/docker-images/$i/overlay2/l/* /var/lib/docker/overlay2/l/
+
+          rsync -a /mnt/docker-images/$i/image/ /var/lib/docker/image/
+
+          # Accumulate image definitions
+          cp /tmp/repositories.json /tmp/repositories.json.tmp
+          jq -s '.[0] * .[1]' \
+            /tmp/repositories.json.tmp \
+            /mnt/docker-images/$i/image/overlay2/repositories.json \
+            > /tmp/repositories.json
+        done
+
+        mv /tmp/repositories.json /var/lib/docker/image/overlay2/repositories.json
+      '';
+      serviceConfig = {
+        Type = "oneshot";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/docker.nix b/nixpkgs/nixos/modules/virtualisation/docker.nix
new file mode 100644
index 000000000000..7d196a46276a
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker.nix
@@ -0,0 +1,223 @@
+# Systemd services for docker.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.docker;
+  proxy_env = config.networking.proxy.envVars;
+
+in
+
+{
+  ###### interface
+
+  options.virtualisation.docker = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            This option enables docker, a daemon that manages
+            linux containers. Users in the "docker" group can interact with
+            the daemon (e.g. to start or stop containers) using the
+            <command>docker</command> command line tool.
+          '';
+      };
+
+    listenOptions =
+      mkOption {
+        type = types.listOf types.str;
+        default = ["/run/docker.sock"];
+        description =
+          ''
+            A list of unix and tcp docker should listen to. The format follows
+            ListenStream as described in systemd.socket(5).
+          '';
+      };
+
+    enableOnBoot =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            When enabled dockerd is started on boot. This is required for
+            containers which are created with the
+            <literal>--restart=always</literal> flag to work. If this option is
+            disabled, docker might be started on demand by socket activation.
+          '';
+      };
+
+    enableNvidia =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable nvidia-docker wrapper, supporting NVIDIA GPUs inside docker containers.
+        '';
+      };
+
+    liveRestore =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            Allow dockerd to be restarted without affecting running container.
+            This option is incompatible with docker swarm.
+          '';
+      };
+
+    storageDriver =
+      mkOption {
+        type = types.nullOr (types.enum ["aufs" "btrfs" "devicemapper" "overlay" "overlay2" "zfs"]);
+        default = null;
+        description =
+          ''
+            This option determines which Docker storage driver to use. By default
+            it let's docker automatically choose preferred storage driver.
+          '';
+      };
+
+    logDriver =
+      mkOption {
+        type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs"];
+        default = "journald";
+        description =
+          ''
+            This option determines which Docker log driver to use.
+          '';
+      };
+
+    extraOptions =
+      mkOption {
+        type = types.separatedString " ";
+        default = "";
+        description =
+          ''
+            The extra command-line options to pass to
+            <command>docker</command> daemon.
+          '';
+      };
+
+    autoPrune = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to periodically prune Docker resources. If enabled, a
+          systemd timer will run <literal>docker system prune -f</literal>
+          as specified by the <literal>dates</literal> option.
+        '';
+      };
+
+      flags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--all" ];
+        description = ''
+          Any additional flags passed to <command>docker system prune</command>.
+        '';
+      };
+
+      dates = mkOption {
+        default = "weekly";
+        type = types.str;
+        description = ''
+          Specification (in the format described by
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>7</manvolnum></citerefentry>) of the time at
+          which the prune will occur.
+        '';
+      };
+    };
+
+    package = mkOption {
+      default = pkgs.docker;
+      type = types.package;
+      example = pkgs.docker-edge;
+      description = ''
+        Docker package to be used in the module.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [{
+      environment.systemPackages = [ cfg.package ]
+        ++ optional cfg.enableNvidia pkgs.nvidia-docker;
+      users.groups.docker.gid = config.ids.gids.docker;
+      systemd.packages = [ cfg.package ];
+
+      systemd.services.docker = {
+        wantedBy = optional cfg.enableOnBoot "multi-user.target";
+        environment = proxy_env;
+        serviceConfig = {
+          ExecStart = [
+            ""
+            ''
+              ${cfg.package}/bin/dockerd \
+                --group=docker \
+                --host=fd:// \
+                --log-driver=${cfg.logDriver} \
+                ${optionalString (cfg.storageDriver != null) "--storage-driver=${cfg.storageDriver}"} \
+                ${optionalString cfg.liveRestore "--live-restore" } \
+                ${optionalString cfg.enableNvidia "--add-runtime nvidia=${pkgs.nvidia-docker}/bin/nvidia-container-runtime" } \
+                ${cfg.extraOptions}
+            ''];
+          ExecReload=[
+            ""
+            "${pkgs.procps}/bin/kill -s HUP $MAINPID"
+          ];
+        };
+
+        path = [ pkgs.kmod ] ++ optional (cfg.storageDriver == "zfs") pkgs.zfs
+          ++ optional cfg.enableNvidia pkgs.nvidia-docker;
+      };
+
+      systemd.sockets.docker = {
+        description = "Docker Socket for the API";
+        wantedBy = [ "sockets.target" ];
+        socketConfig = {
+          ListenStream = cfg.listenOptions;
+          SocketMode = "0660";
+          SocketUser = "root";
+          SocketGroup = "docker";
+        };
+      };
+
+      systemd.services.docker-prune = {
+        description = "Prune docker resources";
+
+        restartIfChanged = false;
+        unitConfig.X-StopOnRemoval = false;
+
+        serviceConfig.Type = "oneshot";
+
+        script = ''
+          ${cfg.package}/bin/docker system prune -f ${toString cfg.autoPrune.flags}
+        '';
+
+        startAt = optional cfg.autoPrune.enable cfg.autoPrune.dates;
+      };
+
+      assertions = [
+        { assertion = cfg.enableNvidia -> config.hardware.opengl.driSupport32Bit or false;
+          message = "Option enableNvidia requires 32bit support libraries";
+        }];
+    }
+    (mkIf cfg.enableNvidia {
+      environment.etc."nvidia-container-runtime/config.toml".source = "${pkgs.nvidia-docker}/etc/config.toml";
+    })
+  ]);
+
+  imports = [
+    (mkRemovedOptionModule ["virtualisation" "docker" "socketActivation"] "This option was removed in favor of starting docker at boot")
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/ec2-amis.nix b/nixpkgs/nixos/modules/virtualisation/ec2-amis.nix
new file mode 100644
index 000000000000..24de8cf1afbf
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ec2-amis.nix
@@ -0,0 +1,333 @@
+let self = {
+  "14.04".ap-northeast-1.hvm-ebs = "ami-71c6f470";
+  "14.04".ap-northeast-1.pv-ebs = "ami-4dcbf84c";
+  "14.04".ap-northeast-1.pv-s3 = "ami-8fc4f68e";
+  "14.04".ap-southeast-1.hvm-ebs = "ami-da280888";
+  "14.04".ap-southeast-1.pv-ebs = "ami-7a9dbc28";
+  "14.04".ap-southeast-1.pv-s3 = "ami-c4290996";
+  "14.04".ap-southeast-2.hvm-ebs = "ami-ab523e91";
+  "14.04".ap-southeast-2.pv-ebs = "ami-6769055d";
+  "14.04".ap-southeast-2.pv-s3 = "ami-15533f2f";
+  "14.04".eu-central-1.hvm-ebs = "ami-ba0234a7";
+  "14.04".eu-west-1.hvm-ebs = "ami-96cb63e1";
+  "14.04".eu-west-1.pv-ebs = "ami-b48c25c3";
+  "14.04".eu-west-1.pv-s3 = "ami-06cd6571";
+  "14.04".sa-east-1.hvm-ebs = "ami-01b90e1c";
+  "14.04".sa-east-1.pv-ebs = "ami-69e35474";
+  "14.04".sa-east-1.pv-s3 = "ami-61b90e7c";
+  "14.04".us-east-1.hvm-ebs = "ami-58ba3a30";
+  "14.04".us-east-1.pv-ebs = "ami-9e0583f6";
+  "14.04".us-east-1.pv-s3 = "ami-9cbe3ef4";
+  "14.04".us-west-1.hvm-ebs = "ami-0bc3d74e";
+  "14.04".us-west-1.pv-ebs = "ami-8b1703ce";
+  "14.04".us-west-1.pv-s3 = "ami-27ccd862";
+  "14.04".us-west-2.hvm-ebs = "ami-3bf1bf0b";
+  "14.04".us-west-2.pv-ebs = "ami-259bd515";
+  "14.04".us-west-2.pv-s3 = "ami-07094037";
+
+  "14.12".ap-northeast-1.hvm-ebs = "ami-24435f25";
+  "14.12".ap-northeast-1.pv-ebs = "ami-b0425eb1";
+  "14.12".ap-northeast-1.pv-s3 = "ami-fed3c6ff";
+  "14.12".ap-southeast-1.hvm-ebs = "ami-6c765d3e";
+  "14.12".ap-southeast-1.pv-ebs = "ami-6a765d38";
+  "14.12".ap-southeast-1.pv-s3 = "ami-d1bf9183";
+  "14.12".ap-southeast-2.hvm-ebs = "ami-af86f395";
+  "14.12".ap-southeast-2.pv-ebs = "ami-b386f389";
+  "14.12".ap-southeast-2.pv-s3 = "ami-69c5ae53";
+  "14.12".eu-central-1.hvm-ebs = "ami-4a497a57";
+  "14.12".eu-central-1.pv-ebs = "ami-4c497a51";
+  "14.12".eu-central-1.pv-s3 = "ami-60f2c27d";
+  "14.12".eu-west-1.hvm-ebs = "ami-d126a5a6";
+  "14.12".eu-west-1.pv-ebs = "ami-0126a576";
+  "14.12".eu-west-1.pv-s3 = "ami-deda5fa9";
+  "14.12".sa-east-1.hvm-ebs = "ami-2d239e30";
+  "14.12".sa-east-1.pv-ebs = "ami-35239e28";
+  "14.12".sa-east-1.pv-s3 = "ami-81e3519c";
+  "14.12".us-east-1.hvm-ebs = "ami-0c463a64";
+  "14.12".us-east-1.pv-ebs = "ami-ac473bc4";
+  "14.12".us-east-1.pv-s3 = "ami-00e18a68";
+  "14.12".us-west-1.hvm-ebs = "ami-ca534a8f";
+  "14.12".us-west-1.pv-ebs = "ami-3e534a7b";
+  "14.12".us-west-1.pv-s3 = "ami-2905196c";
+  "14.12".us-west-2.hvm-ebs = "ami-fb9dc3cb";
+  "14.12".us-west-2.pv-ebs = "ami-899dc3b9";
+  "14.12".us-west-2.pv-s3 = "ami-cb7f2dfb";
+
+  "15.09".ap-northeast-1.hvm-ebs = "ami-58cac236";
+  "15.09".ap-northeast-1.hvm-s3 = "ami-39c8c057";
+  "15.09".ap-northeast-1.pv-ebs = "ami-5ac9c134";
+  "15.09".ap-northeast-1.pv-s3 = "ami-03cec66d";
+  "15.09".ap-southeast-1.hvm-ebs = "ami-2fc2094c";
+  "15.09".ap-southeast-1.hvm-s3 = "ami-9ec308fd";
+  "15.09".ap-southeast-1.pv-ebs = "ami-95c00bf6";
+  "15.09".ap-southeast-1.pv-s3 = "ami-bfc00bdc";
+  "15.09".ap-southeast-2.hvm-ebs = "ami-996c4cfa";
+  "15.09".ap-southeast-2.hvm-s3 = "ami-3f6e4e5c";
+  "15.09".ap-southeast-2.pv-ebs = "ami-066d4d65";
+  "15.09".ap-southeast-2.pv-s3 = "ami-cc6e4eaf";
+  "15.09".eu-central-1.hvm-ebs = "ami-3f8c6b50";
+  "15.09".eu-central-1.hvm-s3 = "ami-5b836434";
+  "15.09".eu-central-1.pv-ebs = "ami-118c6b7e";
+  "15.09".eu-central-1.pv-s3 = "ami-2c977043";
+  "15.09".eu-west-1.hvm-ebs = "ami-9cf04aef";
+  "15.09".eu-west-1.hvm-s3 = "ami-2bea5058";
+  "15.09".eu-west-1.pv-ebs = "ami-c9e852ba";
+  "15.09".eu-west-1.pv-s3 = "ami-c6f64cb5";
+  "15.09".sa-east-1.hvm-ebs = "ami-6e52df02";
+  "15.09".sa-east-1.hvm-s3 = "ami-1852df74";
+  "15.09".sa-east-1.pv-ebs = "ami-4368e52f";
+  "15.09".sa-east-1.pv-s3 = "ami-f15ad79d";
+  "15.09".us-east-1.hvm-ebs = "ami-84a6a0ee";
+  "15.09".us-east-1.hvm-s3 = "ami-06a7a16c";
+  "15.09".us-east-1.pv-ebs = "ami-a4a1a7ce";
+  "15.09".us-east-1.pv-s3 = "ami-5ba8ae31";
+  "15.09".us-west-1.hvm-ebs = "ami-22c8bb42";
+  "15.09".us-west-1.hvm-s3 = "ami-a2ccbfc2";
+  "15.09".us-west-1.pv-ebs = "ami-10cebd70";
+  "15.09".us-west-1.pv-s3 = "ami-fa30429a";
+  "15.09".us-west-2.hvm-ebs = "ami-ce57b9ae";
+  "15.09".us-west-2.hvm-s3 = "ami-2956b849";
+  "15.09".us-west-2.pv-ebs = "ami-005fb160";
+  "15.09".us-west-2.pv-s3 = "ami-cd55bbad";
+
+  "16.03".ap-northeast-1.hvm-ebs = "ami-40619d21";
+  "16.03".ap-northeast-1.hvm-s3 = "ami-ce629eaf";
+  "16.03".ap-northeast-1.pv-ebs = "ami-ef639f8e";
+  "16.03".ap-northeast-1.pv-s3 = "ami-a1609cc0";
+  "16.03".ap-northeast-2.hvm-ebs = "ami-deca00b0";
+  "16.03".ap-northeast-2.hvm-s3 = "ami-a3b77dcd";
+  "16.03".ap-northeast-2.pv-ebs = "ami-7bcb0115";
+  "16.03".ap-northeast-2.pv-s3 = "ami-a2b77dcc";
+  "16.03".ap-south-1.hvm-ebs = "ami-0dff9562";
+  "16.03".ap-south-1.hvm-s3 = "ami-13f69c7c";
+  "16.03".ap-south-1.pv-ebs = "ami-0ef39961";
+  "16.03".ap-south-1.pv-s3 = "ami-e0c8a28f";
+  "16.03".ap-southeast-1.hvm-ebs = "ami-5e964a3d";
+  "16.03".ap-southeast-1.hvm-s3 = "ami-4d964a2e";
+  "16.03".ap-southeast-1.pv-ebs = "ami-ec9b478f";
+  "16.03".ap-southeast-1.pv-s3 = "ami-999b47fa";
+  "16.03".ap-southeast-2.hvm-ebs = "ami-9f7359fc";
+  "16.03".ap-southeast-2.hvm-s3 = "ami-987359fb";
+  "16.03".ap-southeast-2.pv-ebs = "ami-a2705ac1";
+  "16.03".ap-southeast-2.pv-s3 = "ami-a3705ac0";
+  "16.03".eu-central-1.hvm-ebs = "ami-17a45178";
+  "16.03".eu-central-1.hvm-s3 = "ami-f9a55096";
+  "16.03".eu-central-1.pv-ebs = "ami-c8a550a7";
+  "16.03".eu-central-1.pv-s3 = "ami-6ea45101";
+  "16.03".eu-west-1.hvm-ebs = "ami-b5b3d5c6";
+  "16.03".eu-west-1.hvm-s3 = "ami-c986e0ba";
+  "16.03".eu-west-1.pv-ebs = "ami-b083e5c3";
+  "16.03".eu-west-1.pv-s3 = "ami-3c83e54f";
+  "16.03".sa-east-1.hvm-ebs = "ami-f6eb7f9a";
+  "16.03".sa-east-1.hvm-s3 = "ami-93e773ff";
+  "16.03".sa-east-1.pv-ebs = "ami-cbb82ca7";
+  "16.03".sa-east-1.pv-s3 = "ami-abb82cc7";
+  "16.03".us-east-1.hvm-ebs = "ami-c123a3d6";
+  "16.03".us-east-1.hvm-s3 = "ami-bc25a5ab";
+  "16.03".us-east-1.pv-ebs = "ami-bd25a5aa";
+  "16.03".us-east-1.pv-s3 = "ami-a325a5b4";
+  "16.03".us-west-1.hvm-ebs = "ami-748bcd14";
+  "16.03".us-west-1.hvm-s3 = "ami-a68dcbc6";
+  "16.03".us-west-1.pv-ebs = "ami-048acc64";
+  "16.03".us-west-1.pv-s3 = "ami-208dcb40";
+  "16.03".us-west-2.hvm-ebs = "ami-8263a0e2";
+  "16.03".us-west-2.hvm-s3 = "ami-925c9ff2";
+  "16.03".us-west-2.pv-ebs = "ami-5e61a23e";
+  "16.03".us-west-2.pv-s3 = "ami-734c8f13";
+
+  # 16.09.1508.3909827
+  "16.09".ap-northeast-1.hvm-ebs = "ami-68453b0f";
+  "16.09".ap-northeast-1.hvm-s3 = "ami-f9bec09e";
+  "16.09".ap-northeast-1.pv-ebs = "ami-254a3442";
+  "16.09".ap-northeast-1.pv-s3 = "ami-ef473988";
+  "16.09".ap-northeast-2.hvm-ebs = "ami-18ae7f76";
+  "16.09".ap-northeast-2.hvm-s3 = "ami-9eac7df0";
+  "16.09".ap-northeast-2.pv-ebs = "ami-57aa7b39";
+  "16.09".ap-northeast-2.pv-s3 = "ami-5cae7f32";
+  "16.09".ap-south-1.hvm-ebs = "ami-b3f98fdc";
+  "16.09".ap-south-1.hvm-s3 = "ami-98e690f7";
+  "16.09".ap-south-1.pv-ebs = "ami-aef98fc1";
+  "16.09".ap-south-1.pv-s3 = "ami-caf88ea5";
+  "16.09".ap-southeast-1.hvm-ebs = "ami-80fb51e3";
+  "16.09".ap-southeast-1.hvm-s3 = "ami-2df3594e";
+  "16.09".ap-southeast-1.pv-ebs = "ami-37f05a54";
+  "16.09".ap-southeast-1.pv-s3 = "ami-27f35944";
+  "16.09".ap-southeast-2.hvm-ebs = "ami-57ece834";
+  "16.09".ap-southeast-2.hvm-s3 = "ami-87f4f0e4";
+  "16.09".ap-southeast-2.pv-ebs = "ami-d8ede9bb";
+  "16.09".ap-southeast-2.pv-s3 = "ami-a6ebefc5";
+  "16.09".ca-central-1.hvm-ebs = "ami-9f863bfb";
+  "16.09".ca-central-1.hvm-s3 = "ami-ea85388e";
+  "16.09".ca-central-1.pv-ebs = "ami-ce8a37aa";
+  "16.09".ca-central-1.pv-s3 = "ami-448a3720";
+  "16.09".eu-central-1.hvm-ebs = "ami-1b884774";
+  "16.09".eu-central-1.hvm-s3 = "ami-b08c43df";
+  "16.09".eu-central-1.pv-ebs = "ami-888946e7";
+  "16.09".eu-central-1.pv-s3 = "ami-06874869";
+  "16.09".eu-west-1.hvm-ebs = "ami-1ed3e76d";
+  "16.09".eu-west-1.hvm-s3 = "ami-73d1e500";
+  "16.09".eu-west-1.pv-ebs = "ami-44c0f437";
+  "16.09".eu-west-1.pv-s3 = "ami-f3d8ec80";
+  "16.09".eu-west-2.hvm-ebs = "ami-2c9c9648";
+  "16.09".eu-west-2.hvm-s3 = "ami-6b9e940f";
+  "16.09".eu-west-2.pv-ebs = "ami-f1999395";
+  "16.09".eu-west-2.pv-s3 = "ami-bb9f95df";
+  "16.09".sa-east-1.hvm-ebs = "ami-a11882cd";
+  "16.09".sa-east-1.hvm-s3 = "ami-7726bc1b";
+  "16.09".sa-east-1.pv-ebs = "ami-9725bffb";
+  "16.09".sa-east-1.pv-s3 = "ami-b027bddc";
+  "16.09".us-east-1.hvm-ebs = "ami-854ca593";
+  "16.09".us-east-1.hvm-s3 = "ami-2241a834";
+  "16.09".us-east-1.pv-ebs = "ami-a441a8b2";
+  "16.09".us-east-1.pv-s3 = "ami-e841a8fe";
+  "16.09".us-east-2.hvm-ebs = "ami-3f41645a";
+  "16.09".us-east-2.hvm-s3 = "ami-804065e5";
+  "16.09".us-east-2.pv-ebs = "ami-f1466394";
+  "16.09".us-east-2.pv-s3 = "ami-05426760";
+  "16.09".us-west-1.hvm-ebs = "ami-c2efbca2";
+  "16.09".us-west-1.hvm-s3 = "ami-d71042b7";
+  "16.09".us-west-1.pv-ebs = "ami-04e8bb64";
+  "16.09".us-west-1.pv-s3 = "ami-31e9ba51";
+  "16.09".us-west-2.hvm-ebs = "ami-6449f504";
+  "16.09".us-west-2.hvm-s3 = "ami-344af654";
+  "16.09".us-west-2.pv-ebs = "ami-6d4af60d";
+  "16.09".us-west-2.pv-s3 = "ami-de48f4be";
+
+  # 17.03.885.6024dd4067
+  "17.03".ap-northeast-1.hvm-ebs = "ami-dbd0f7bc";
+  "17.03".ap-northeast-1.hvm-s3 = "ami-7cdff81b";
+  "17.03".ap-northeast-2.hvm-ebs = "ami-c59a48ab";
+  "17.03".ap-northeast-2.hvm-s3 = "ami-0b944665";
+  "17.03".ap-south-1.hvm-ebs = "ami-4f413220";
+  "17.03".ap-south-1.hvm-s3 = "ami-864033e9";
+  "17.03".ap-southeast-1.hvm-ebs = "ami-e08c3383";
+  "17.03".ap-southeast-1.hvm-s3 = "ami-c28f30a1";
+  "17.03".ap-southeast-2.hvm-ebs = "ami-fca9a69f";
+  "17.03".ap-southeast-2.hvm-s3 = "ami-3daaa55e";
+  "17.03".ca-central-1.hvm-ebs = "ami-9b00bdff";
+  "17.03".ca-central-1.hvm-s3 = "ami-e800bd8c";
+  "17.03".eu-central-1.hvm-ebs = "ami-5450803b";
+  "17.03".eu-central-1.hvm-s3 = "ami-6e2efe01";
+  "17.03".eu-west-1.hvm-ebs = "ami-10754c76";
+  "17.03".eu-west-1.hvm-s3 = "ami-11734a77";
+  "17.03".eu-west-2.hvm-ebs = "ami-ff1d099b";
+  "17.03".eu-west-2.hvm-s3 = "ami-fe1d099a";
+  "17.03".sa-east-1.hvm-ebs = "ami-d95d3eb5";
+  "17.03".sa-east-1.hvm-s3 = "ami-fca2c190";
+  "17.03".us-east-1.hvm-ebs = "ami-0940c61f";
+  "17.03".us-east-1.hvm-s3 = "ami-674fc971";
+  "17.03".us-east-2.hvm-ebs = "ami-afc2e6ca";
+  "17.03".us-east-2.hvm-s3 = "ami-a1cde9c4";
+  "17.03".us-west-1.hvm-ebs = "ami-587b2138";
+  "17.03".us-west-1.hvm-s3 = "ami-70411b10";
+  "17.03".us-west-2.hvm-ebs = "ami-a93daac9";
+  "17.03".us-west-2.hvm-s3 = "ami-5139ae31";
+
+  # 17.09.2681.59661f21be6
+  "17.09".eu-west-1.hvm-ebs = "ami-a30192da";
+  "17.09".eu-west-2.hvm-ebs = "ami-295a414d";
+  "17.09".eu-west-3.hvm-ebs = "ami-8c0eb9f1";
+  "17.09".eu-central-1.hvm-ebs = "ami-266cfe49";
+  "17.09".us-east-1.hvm-ebs = "ami-40bee63a";
+  "17.09".us-east-2.hvm-ebs = "ami-9d84aff8";
+  "17.09".us-west-1.hvm-ebs = "ami-d14142b1";
+  "17.09".us-west-2.hvm-ebs = "ami-3eb40346";
+  "17.09".ca-central-1.hvm-ebs = "ami-ca8207ae";
+  "17.09".ap-southeast-1.hvm-ebs = "ami-84bccff8";
+  "17.09".ap-southeast-2.hvm-ebs = "ami-0dc5386f";
+  "17.09".ap-northeast-1.hvm-ebs = "ami-89b921ef";
+  "17.09".ap-northeast-2.hvm-ebs = "ami-179b3b79";
+  "17.09".sa-east-1.hvm-ebs = "ami-4762202b";
+  "17.09".ap-south-1.hvm-ebs = "ami-4e376021";
+
+  # 18.03.132946.1caae7247b8
+  "18.03".eu-west-1.hvm-ebs = "ami-065c46ec";
+  "18.03".eu-west-2.hvm-ebs = "ami-64f31903";
+  "18.03".eu-west-3.hvm-ebs = "ami-5a8d3d27";
+  "18.03".eu-central-1.hvm-ebs = "ami-09faf9e2";
+  "18.03".us-east-1.hvm-ebs = "ami-8b3538f4";
+  "18.03".us-east-2.hvm-ebs = "ami-150b3170";
+  "18.03".us-west-1.hvm-ebs = "ami-ce06ebad";
+  "18.03".us-west-2.hvm-ebs = "ami-586c3520";
+  "18.03".ca-central-1.hvm-ebs = "ami-aca72ac8";
+  "18.03".ap-southeast-1.hvm-ebs = "ami-aa0b4d40";
+  "18.03".ap-southeast-2.hvm-ebs = "ami-d0f254b2";
+  "18.03".ap-northeast-1.hvm-ebs = "ami-456511a8";
+  "18.03".ap-northeast-2.hvm-ebs = "ami-3366d15d";
+  "18.03".sa-east-1.hvm-ebs = "ami-163e1f7a";
+  "18.03".ap-south-1.hvm-ebs = "ami-6a390b05";
+
+  # 18.09.910.c15e342304a
+  "18.09".eu-west-1.hvm-ebs = "ami-0f412186fb8a0ec97";
+  "18.09".eu-west-2.hvm-ebs = "ami-0dada3805ce43c55e";
+  "18.09".eu-west-3.hvm-ebs = "ami-074df85565f2e02e2";
+  "18.09".eu-central-1.hvm-ebs = "ami-07c9b884e679df4f8";
+  "18.09".us-east-1.hvm-ebs = "ami-009c9c3f1af480ff3";
+  "18.09".us-east-2.hvm-ebs = "ami-08199961085ea8bc6";
+  "18.09".us-west-1.hvm-ebs = "ami-07aa7f56d612ddd38";
+  "18.09".us-west-2.hvm-ebs = "ami-01c84b7c368ac24d1";
+  "18.09".ca-central-1.hvm-ebs = "ami-04f66113f76198f6c";
+  "18.09".ap-southeast-1.hvm-ebs = "ami-0892c7e24ebf2194f";
+  "18.09".ap-southeast-2.hvm-ebs = "ami-010730f36424b0a2c";
+  "18.09".ap-northeast-1.hvm-ebs = "ami-0cdba8e998f076547";
+  "18.09".ap-northeast-2.hvm-ebs = "ami-0400a698e6a9f4a15";
+  "18.09".sa-east-1.hvm-ebs = "ami-0e4a8a47fd6db6112";
+  "18.09".ap-south-1.hvm-ebs = "ami-0880a678d3f555313";
+
+  # 19.03.172286.8ea36d73256
+  "19.03".eu-west-1.hvm-ebs = "ami-0fe40176548ff0940";
+  "19.03".eu-west-2.hvm-ebs = "ami-03a40fd3a02fe95ba";
+  "19.03".eu-west-3.hvm-ebs = "ami-0436f9da0f20a638e";
+  "19.03".eu-central-1.hvm-ebs = "ami-0022b8ea9efde5de4";
+  "19.03".us-east-1.hvm-ebs = "ami-0efc58fb70ae9a217";
+  "19.03".us-east-2.hvm-ebs = "ami-0abf711b1b34da1af";
+  "19.03".us-west-1.hvm-ebs = "ami-07d126e8838c40ec5";
+  "19.03".us-west-2.hvm-ebs = "ami-03f8a737546e47fb0";
+  "19.03".ca-central-1.hvm-ebs = "ami-03f9fd0ef2e035ede";
+  "19.03".ap-southeast-1.hvm-ebs = "ami-0cff66114c652c262";
+  "19.03".ap-southeast-2.hvm-ebs = "ami-054c73a7f8d773ea9";
+  "19.03".ap-northeast-1.hvm-ebs = "ami-00db62688900456a4";
+  "19.03".ap-northeast-2.hvm-ebs = "ami-0485cdd1a5fdd2117";
+  "19.03".sa-east-1.hvm-ebs = "ami-0c6a43c6e0ad1f4e2";
+  "19.03".ap-south-1.hvm-ebs = "ami-0303deb1b5890f878";
+
+  # 19.09.2243.84af403f54f
+  "19.09".eu-west-1.hvm-ebs = "ami-071082f0fa035374f";
+  "19.09".eu-west-2.hvm-ebs = "ami-0d9dc33c54d1dc4c3";
+  "19.09".eu-west-3.hvm-ebs = "ami-09566799591d1bfed";
+  "19.09".eu-central-1.hvm-ebs = "ami-015f8efc2be419b79";
+  "19.09".eu-north-1.hvm-ebs = "ami-07fc0a32d885e01ed";
+  "19.09".us-east-1.hvm-ebs = "ami-03330d8b51287412f";
+  "19.09".us-east-2.hvm-ebs = "ami-0518b4c84972e967f";
+  "19.09".us-west-1.hvm-ebs = "ami-06ad07e61a353b4a6";
+  "19.09".us-west-2.hvm-ebs = "ami-0e31e30925cf3ce4e";
+  "19.09".ca-central-1.hvm-ebs = "ami-07df50fc76702a36d";
+  "19.09".ap-southeast-1.hvm-ebs = "ami-0f71ae5d4b0b78d95";
+  "19.09".ap-southeast-2.hvm-ebs = "ami-057bbf2b4bd62d210";
+  "19.09".ap-northeast-1.hvm-ebs = "ami-02a62555ca182fb5b";
+  "19.09".ap-northeast-2.hvm-ebs = "ami-0219dde0e6b7b7b93";
+  "19.09".ap-south-1.hvm-ebs = "ami-066f7f2a895c821a1";
+  "19.09".ap-east-1.hvm-ebs = "ami-055b2348db2827ff1";
+  "19.09".sa-east-1.hvm-ebs = "ami-018aab68377227e06";
+
+  # 20.03.1554.94e39623a49
+  "20.03".eu-west-1.hvm-ebs = "ami-02c34db5766cc7013";
+  "20.03".eu-west-2.hvm-ebs = "ami-0e32bd8c7853883f1";
+  "20.03".eu-west-3.hvm-ebs = "ami-061edb1356c1d69fd";
+  "20.03".eu-central-1.hvm-ebs = "ami-0a1a94722dcbff94c";
+  "20.03".eu-north-1.hvm-ebs = "ami-02699abfacbb6464b";
+  "20.03".us-east-1.hvm-ebs = "ami-0c5e7760748b74e85";
+  "20.03".us-east-2.hvm-ebs = "ami-030296bb256764655";
+  "20.03".us-west-1.hvm-ebs = "ami-050be818e0266b741";
+  "20.03".us-west-2.hvm-ebs = "ami-06562f78dca68eda2";
+  "20.03".ca-central-1.hvm-ebs = "ami-02365684a173255c7";
+  "20.03".ap-southeast-1.hvm-ebs = "ami-0dbf353e168d155f7";
+  "20.03".ap-southeast-2.hvm-ebs = "ami-04c0f3a75f63daddd";
+  "20.03".ap-northeast-1.hvm-ebs = "ami-093d9cc49c191eb6c";
+  "20.03".ap-northeast-2.hvm-ebs = "ami-0087df91a7b6ebd45";
+  "20.03".ap-south-1.hvm-ebs = "ami-0a1a6b569af04af9d";
+  "20.03".ap-east-1.hvm-ebs = "ami-0d18fdd309cdefa86";
+  "20.03".sa-east-1.hvm-ebs = "ami-09859378158ae971d";
+
+  latest = self."20.03";
+}; in self
diff --git a/nixpkgs/nixos/modules/virtualisation/ec2-data.nix b/nixpkgs/nixos/modules/virtualisation/ec2-data.nix
new file mode 100644
index 000000000000..629125350182
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ec2-data.nix
@@ -0,0 +1,91 @@
+# This module defines a systemd service that sets the SSH host key and
+# authorized client key and host name of virtual machines running on
+# Amazon EC2, Eucalyptus and OpenStack Compute (Nova).
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "ec2" "metadata" ] "")
+  ];
+
+  config = {
+
+    systemd.services.apply-ec2-data =
+      { description = "Apply EC2 Data";
+
+        wantedBy = [ "multi-user.target" "sshd.service" ];
+        before = [ "sshd.service" ];
+
+        path = [ pkgs.iproute ];
+
+        script =
+          ''
+            ${optionalString (config.networking.hostName == "") ''
+              echo "setting host name..."
+              if [ -s /etc/ec2-metadata/hostname ]; then
+                  ${pkgs.nettools}/bin/hostname $(cat /etc/ec2-metadata/hostname)
+              fi
+            ''}
+
+            if ! [ -e /root/.ssh/authorized_keys ]; then
+                echo "obtaining SSH key..."
+                mkdir -m 0700 -p /root/.ssh
+                if [ -s /etc/ec2-metadata/public-keys-0-openssh-key ]; then
+                    cat /etc/ec2-metadata/public-keys-0-openssh-key >> /root/.ssh/authorized_keys
+                    echo "new key added to authorized_keys"
+                    chmod 600 /root/.ssh/authorized_keys
+                fi
+            fi
+
+            # Extract the intended SSH host key for this machine from
+            # the supplied user data, if available.  Otherwise sshd will
+            # generate one normally.
+            userData=/etc/ec2-metadata/user-data
+
+            mkdir -m 0755 -p /etc/ssh
+
+            if [ -s "$userData" ]; then
+              key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' $userData)"
+              key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' $userData)"
+              if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
+                  (umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
+                  echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
+              fi
+
+              key="$(sed 's/|/\n/g; s/SSH_HOST_ED25519_KEY://; t; d' $userData)"
+              key_pub="$(sed 's/SSH_HOST_ED25519_KEY_PUB://; t; d' $userData)"
+              if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_ed25519_key ]; then
+                  (umask 077; echo "$key" > /etc/ssh/ssh_host_ed25519_key)
+                  echo "$key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
+              fi
+            fi
+          '';
+
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+      };
+
+    systemd.services.print-host-key =
+      { description = "Print SSH Host Key";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "sshd.service" ];
+        script =
+          ''
+            # Print the host public key on the console so that the user
+            # can obtain it securely by parsing the output of
+            # ec2-get-console-output.
+            echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----" > /dev/console
+            for i in /etc/ssh/ssh_host_*_key.pub; do
+                ${config.programs.ssh.package}/bin/ssh-keygen -l -f $i > /dev/console
+            done
+            echo "-----END SSH HOST KEY FINGERPRINTS-----" > /dev/console
+          '';
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+      };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.nix b/nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.nix
new file mode 100644
index 000000000000..b531787c31a2
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.nix
@@ -0,0 +1,23 @@
+{ targetRoot, wgetExtraOptions }:
+''
+  metaDir=${targetRoot}etc/ec2-metadata
+  mkdir -m 0755 -p "$metaDir"
+
+  echo "getting EC2 instance metadata..."
+
+  if ! [ -e "$metaDir/ami-manifest-path" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
+  fi
+
+  if ! [ -e "$metaDir/user-data" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/user-data" http://169.254.169.254/1.0/user-data && chmod 600 "$metaDir/user-data"
+  fi
+
+  if ! [ -e "$metaDir/hostname" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname
+  fi
+
+  if ! [ -e "$metaDir/public-keys-0-openssh-key" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key
+  fi
+''
diff --git a/nixpkgs/nixos/modules/virtualisation/ecs-agent.nix b/nixpkgs/nixos/modules/virtualisation/ecs-agent.nix
new file mode 100644
index 000000000000..93fefe56d1a5
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ecs-agent.nix
@@ -0,0 +1,45 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ecs-agent;
+in {
+  options.services.ecs-agent = {
+    enable = mkEnableOption "Amazon ECS agent";
+
+    package = mkOption {
+      type = types.path;
+      description = "The ECS agent package to use";
+      default = pkgs.ecs-agent;
+      defaultText = "pkgs.ecs-agent";
+    };
+
+    extra-environment = mkOption {
+      type = types.attrsOf types.str;
+      description = "The environment the ECS agent should run with. See the ECS agent documentation for keys that work here.";
+      default = {};
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    # This service doesn't run if docker isn't running, and unlike potentially remote services like e.g., postgresql, docker has
+    # to be running locally so `docker.enable` will always be set if the ECS agent is enabled.
+    virtualisation.docker.enable = true;
+
+    systemd.services.ecs-agent = {
+      inherit (cfg.package.meta) description;
+      after    = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = cfg.extra-environment;
+
+      script = ''
+        if [ ! -z "$ECS_DATADIR" ]; then
+          mkdir -p "$ECS_DATADIR"
+        fi
+        ${cfg.package}/bin/agent
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/gce-images.nix b/nixpkgs/nixos/modules/virtualisation/gce-images.nix
new file mode 100644
index 000000000000..5354d91deb93
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/gce-images.nix
@@ -0,0 +1,9 @@
+let self = {
+  "14.12" = "gs://nixos-cloud-images/nixos-14.12.471.1f09b77-x86_64-linux.raw.tar.gz";
+  "15.09" = "gs://nixos-cloud-images/nixos-15.09.425.7870f20-x86_64-linux.raw.tar.gz";
+  "16.03" = "gs://nixos-cloud-images/nixos-image-16.03.847.8688c17-x86_64-linux.raw.tar.gz";
+  "17.03" = "gs://nixos-cloud-images/nixos-image-17.03.1082.4aab5c5798-x86_64-linux.raw.tar.gz";
+  "18.03" = "gs://nixos-cloud-images/nixos-image-18.03.132536.fdb5ba4cdf9-x86_64-linux.raw.tar.gz";
+  "18.09" = "gs://nixos-cloud-images/nixos-image-18.09.1228.a4c4cbb613c-x86_64-linux.raw.tar.gz";
+  latest = self."18.09";
+}; in self
diff --git a/nixpkgs/nixos/modules/virtualisation/google-compute-config.nix b/nixpkgs/nixos/modules/virtualisation/google-compute-config.nix
new file mode 100644
index 000000000000..327324f2921d
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/google-compute-config.nix
@@ -0,0 +1,148 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  gce = pkgs.google-compute-engine;
+in
+{
+  imports = [
+    ../profiles/headless.nix
+    ../profiles/qemu-guest.nix
+  ];
+
+
+  fileSystems."/" = {
+    fsType = "ext4";
+    device = "/dev/disk/by-label/nixos";
+    autoResize = true;
+  };
+
+  boot.growPartition = true;
+  boot.kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
+  boot.initrd.kernelModules = [ "virtio_scsi" ];
+  boot.kernelModules = [ "virtio_pci" "virtio_net" ];
+
+  # Generate a GRUB menu.
+  boot.loader.grub.device = "/dev/sda";
+  boot.loader.timeout = 0;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  # Allow root logins only using SSH keys
+  # and disable password authentication in general
+  services.openssh.enable = true;
+  services.openssh.permitRootLogin = "prohibit-password";
+  services.openssh.passwordAuthentication = mkDefault false;
+
+  # enable OS Login. This also requires setting enable-oslogin=TRUE metadata on
+  # instance or project level
+  security.googleOsLogin.enable = true;
+
+  # Use GCE udev rules for dynamic disk volumes
+  services.udev.packages = [ gce ];
+
+  # Force getting the hostname from Google Compute.
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  environment.systemPackages = [ pkgs.cryptsetup ];
+
+  # Make sure GCE image does not replace host key that NixOps sets
+  environment.etc."default/instance_configs.cfg".text = lib.mkDefault ''
+    [InstanceSetup]
+    set_host_keys = false
+  '';
+
+  # Rely on GCP's firewall instead
+  networking.firewall.enable = mkDefault false;
+
+  # Configure default metadata hostnames
+  networking.extraHosts = ''
+    169.254.169.254 metadata.google.internal metadata
+  '';
+
+  networking.timeServers = [ "metadata.google.internal" ];
+
+  networking.usePredictableInterfaceNames = false;
+
+  # GC has 1460 MTU
+  networking.interfaces.eth0.mtu = 1460;
+
+  systemd.services.google-instance-setup = {
+    description = "Google Compute Engine Instance Setup";
+    after = [ "network-online.target" "network.target" "rsyslog.service" ];
+    before = [ "sshd.service" ];
+    path = with pkgs; [ coreutils ethtool openssh ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_instance_setup";
+      StandardOutput="journal+console";
+      Type = "oneshot";
+    };
+    wantedBy = [ "sshd.service" "multi-user.target" ];
+  };
+
+  systemd.services.google-network-daemon = {
+    description = "Google Compute Engine Network Daemon";
+    after = [ "network-online.target" "network.target" "google-instance-setup.service" ];
+    path = with pkgs; [ iproute ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_network_daemon";
+      StandardOutput="journal+console";
+      Type="simple";
+    };
+    wantedBy = [ "multi-user.target" ];
+  };
+
+  systemd.services.google-clock-skew-daemon = {
+    description = "Google Compute Engine Clock Skew Daemon";
+    after = [ "network.target" "google-instance-setup.service" "google-network-daemon.service" ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_clock_skew_daemon";
+      StandardOutput="journal+console";
+      Type = "simple";
+    };
+    wantedBy = ["multi-user.target"];
+  };
+
+
+  systemd.services.google-shutdown-scripts = {
+    description = "Google Compute Engine Shutdown Scripts";
+    after = [
+      "network-online.target"
+      "network.target"
+      "rsyslog.service"
+      "google-instance-setup.service"
+      "google-network-daemon.service"
+    ];
+    serviceConfig = {
+      ExecStart = "${pkgs.coreutils}/bin/true";
+      ExecStop = "${gce}/bin/google_metadata_script_runner --script-type shutdown";
+      RemainAfterExit = true;
+      StandardOutput="journal+console";
+      TimeoutStopSec = "0";
+      Type = "oneshot";
+    };
+    wantedBy = [ "multi-user.target" ];
+  };
+
+  systemd.services.google-startup-scripts = {
+    description = "Google Compute Engine Startup Scripts";
+    after = [
+      "network-online.target"
+      "network.target"
+      "rsyslog.service"
+      "google-instance-setup.service"
+      "google-network-daemon.service"
+    ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_metadata_script_runner --script-type startup";
+      KillMode = "process";
+      StandardOutput = "journal+console";
+      Type = "oneshot";
+    };
+    wantedBy = [ "multi-user.target" ];
+  };
+
+  environment.etc."sysctl.d/11-gce-network-security.conf".source = "${gce}/sysctl.d/11-gce-network-security.conf";
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/google-compute-image.nix b/nixpkgs/nixos/modules/virtualisation/google-compute-image.nix
new file mode 100644
index 000000000000..d172ae38fdcf
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/google-compute-image.nix
@@ -0,0 +1,61 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.googleComputeImage;
+  defaultConfigFile = pkgs.writeText "configuration.nix" ''
+    { ... }:
+    {
+      imports = [
+        <nixpkgs/nixos/modules/virtualisation/google-compute-image.nix>
+      ];
+    }
+  '';
+in
+{
+
+  imports = [ ./google-compute-config.nix ];
+
+  options = {
+    virtualisation.googleComputeImage.diskSize = mkOption {
+      type = with types; int;
+      default = 1536;
+      description = ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+
+    virtualisation.googleComputeImage.configFile = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      description = ''
+        A path to a configuration file which will be placed at `/etc/nixos/configuration.nix`
+        and be used when switching to a new configuration.
+        If set to `null`, a default configuration is used, where the only import is
+        `<nixpkgs/nixos/modules/virtualisation/google-compute-image.nix>`.
+      '';
+    };
+  };
+
+  #### implementation
+  config = {
+
+    system.build.googleComputeImage = import ../../lib/make-disk-image.nix {
+      name = "google-compute-image";
+      postVM = ''
+        PATH=$PATH:${with pkgs; stdenv.lib.makeBinPath [ gnutar gzip ]}
+        pushd $out
+        mv $diskImage disk.raw
+        tar -Szcf nixos-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.raw.tar.gz disk.raw
+        rm $out/disk.raw
+        popd
+      '';
+      format = "raw";
+      configFile = if cfg.configFile == null then defaultConfigFile else cfg.configFile;
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/grow-partition.nix b/nixpkgs/nixos/modules/virtualisation/grow-partition.nix
new file mode 100644
index 000000000000..444c0bc1630e
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/grow-partition.nix
@@ -0,0 +1,3 @@
+# This profile is deprecated, use boot.growPartition directly.
+builtins.trace "the profile <nixos/modules/virtualisation/grow-partition.nix> is deprecated, use boot.growPartition instead"
+{ }
diff --git a/nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix b/nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix
new file mode 100644
index 000000000000..adc2810a9939
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.hypervGuest;
+
+in {
+  options = {
+    virtualisation.hypervGuest = {
+      enable = mkEnableOption "Hyper-V Guest Support";
+
+      videoMode = mkOption {
+        type = types.str;
+        default = "1152x864";
+        example = "1024x768";
+        description = ''
+          Resolution at which to initialize the video adapter.
+
+          Supports screen resolution up to Full HD 1920x1080 with 32 bit color
+          on Windows Server 2012, and 1600x1200 with 16 bit color on Windows
+          Server 2008 R2 or earlier.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot = {
+      initrd.kernelModules = [
+        "hv_balloon" "hv_netvsc" "hv_storvsc" "hv_utils" "hv_vmbus"
+      ];
+
+      kernelParams = [
+        "video=hyperv_fb:${cfg.videoMode} elevator=noop"
+      ];
+    };
+
+    environment.systemPackages = [ config.boot.kernelPackages.hyperv-daemons.bin ];
+
+    security.rngd.enable = false;
+
+    # enable hotadding cpu/memory
+    services.udev.packages = lib.singleton (pkgs.writeTextFile {
+      name = "hyperv-cpu-and-memory-hotadd-udev-rules";
+      destination = "/etc/udev/rules.d/99-hyperv-cpu-and-memory-hotadd.rules";
+      text = ''
+        # Memory hotadd
+        SUBSYSTEM=="memory", ACTION=="add", DEVPATH=="/devices/system/memory/memory[0-9]*", TEST=="state", ATTR{state}="online"
+
+        # CPU hotadd
+        SUBSYSTEM=="cpu", ACTION=="add", DEVPATH=="/devices/system/cpu/cpu[0-9]*", TEST=="online", ATTR{online}="1"
+      '';
+    });
+
+    systemd = {
+      packages = [ config.boot.kernelPackages.hyperv-daemons.lib ];
+
+      targets.hyperv-daemons = {
+        wantedBy = [ "multi-user.target" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix b/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix
new file mode 100644
index 000000000000..fabc9113dfc4
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix
@@ -0,0 +1,70 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.hyperv;
+
+in {
+  options = {
+    hyperv = {
+      baseImageSize = mkOption {
+        type = types.int;
+        default = 2048;
+        description = ''
+          The size of the hyper-v base image in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-hyperv-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = ''
+          The name of the derivation for the hyper-v appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.vhdx";
+        description = ''
+          The file name of the hyper-v appliance.
+        '';
+      };
+    };
+  };
+
+  config = {
+    system.build.hypervImage = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o subformat=dynamic -O vhdx $diskImage $out/${cfg.vmFileName}
+        rm $diskImage
+      '';
+      format = "raw";
+      diskSize = cfg.baseImageSize;
+      partitionTableType = "efi";
+      inherit config lib pkgs;
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    fileSystems."/boot" = {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    boot.growPartition = true;
+
+    boot.loader.grub = {
+      version = 2;
+      device = "nodev";
+      efiSupport = true;
+      efiInstallAsRemovable = true;
+    };
+
+    virtualisation.hypervGuest.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/kvmgt.nix b/nixpkgs/nixos/modules/virtualisation/kvmgt.nix
new file mode 100644
index 000000000000..e08ad3446281
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/kvmgt.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.kvmgt;
+
+  kernelPackages = config.boot.kernelPackages;
+
+  vgpuOptions = {
+    uuid = mkOption {
+      type = with types; listOf str;
+      description = "UUID(s) of VGPU device. You can generate one with <package>libossp_uuid</package>.";
+    };
+  };
+
+in {
+  options = {
+    virtualisation.kvmgt = {
+      enable = mkEnableOption ''
+        KVMGT (iGVT-g) VGPU support. Allows Qemu/KVM guests to share host's Intel integrated graphics card.
+        Currently only one graphical device can be shared. To allow users to access the device without root add them
+        to the kvm group: <literal>users.extraUsers.&lt;yourusername&gt;.extraGroups = [ "kvm" ];</literal>
+      '';
+      # multi GPU support is under the question
+      device = mkOption {
+        type = types.str;
+        default = "0000:00:02.0";
+        description = "PCI ID of graphics card. You can figure it with <command>ls /sys/class/mdev_bus</command>.";
+      };
+      vgpus = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule [ { options = vgpuOptions; } ]);
+        description = ''
+          Virtual GPUs to be used in Qemu. You can find devices via <command>ls /sys/bus/pci/devices/*/mdev_supported_types</command>
+          and find info about device via <command>cat /sys/bus/pci/devices/*/mdev_supported_types/i915-GVTg_V5_4/description</command>
+        '';
+        example = {
+          i915-GVTg_V5_8.uuid = [ "a297db4a-f4c2-11e6-90f6-d3b88d6c9525" ];
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = singleton {
+      assertion = versionAtLeast kernelPackages.kernel.version "4.16";
+      message = "KVMGT is not properly supported for kernels older than 4.16";
+    };
+
+    boot.kernelModules = [ "kvmgt" ];
+    boot.kernelParams = [ "i915.enable_gvt=1" ];
+
+    services.udev.extraRules = ''
+      SUBSYSTEM=="vfio", OWNER="root", GROUP="kvm"
+    '';
+
+    systemd = let
+      vgpus = listToAttrs (flatten (mapAttrsToList
+        (mdev: opt: map (id: nameValuePair "kvmgt-${id}" { inherit mdev; uuid = id; }) opt.uuid)
+        cfg.vgpus));
+    in {
+      paths = mapAttrs (_: opt:
+        {
+          description = "KVMGT VGPU ${opt.uuid} path";
+          wantedBy = [ "multi-user.target" ];
+          pathConfig = {
+            PathExists = "/sys/bus/pci/devices/${cfg.device}/mdev_supported_types/${opt.mdev}/create";
+          };
+        }) vgpus;
+
+      services = mapAttrs (_: opt:
+        {
+          description = "KVMGT VGPU ${opt.uuid}";
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+            ExecStart = "${pkgs.runtimeShell} -c 'echo ${opt.uuid} > /sys/bus/pci/devices/${cfg.device}/mdev_supported_types/${opt.mdev}/create'";
+            ExecStop = "${pkgs.runtimeShell} -c 'echo 1 > /sys/bus/pci/devices/${cfg.device}/${opt.uuid}/remove'";
+          };
+        }) vgpus;
+    };
+  };
+
+  meta.maintainers = with maintainers; [ gnidorah ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/libvirtd.nix b/nixpkgs/nixos/modules/virtualisation/libvirtd.nix
new file mode 100644
index 000000000000..43b5fcfa8fae
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/libvirtd.nix
@@ -0,0 +1,280 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.libvirtd;
+  vswitch = config.virtualisation.vswitch;
+  configFile = pkgs.writeText "libvirtd.conf" ''
+    auth_unix_ro = "polkit"
+    auth_unix_rw = "polkit"
+    ${cfg.extraConfig}
+  '';
+  qemuConfigFile = pkgs.writeText "qemu.conf" ''
+    ${optionalString cfg.qemuOvmf ''
+      nvram = ["/run/libvirt/nix-ovmf/OVMF_CODE.fd:/run/libvirt/nix-ovmf/OVMF_VARS.fd"]
+    ''}
+    ${optionalString (!cfg.qemuRunAsRoot) ''
+      user = "qemu-libvirtd"
+      group = "qemu-libvirtd"
+    ''}
+    ${cfg.qemuVerbatimConfig}
+  '';
+  dirName = "libvirt";
+  subDirs = list: [ dirName ] ++ map (e: "${dirName}/${e}") list;
+
+in {
+
+  imports = [
+    (mkRemovedOptionModule [ "virtualisation" "libvirtd" "enableKVM" ]
+      "Set the option `virtualisation.libvirtd.qemuPackage' instead.")
+  ];
+
+  ###### interface
+
+  options.virtualisation.libvirtd = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        This option enables libvirtd, a daemon that manages
+        virtual machines. Users in the "libvirtd" group can interact with
+        the daemon (e.g. to start or stop VMs) using the
+        <command>virsh</command> command line tool, among others.
+      '';
+    };
+
+    qemuPackage = mkOption {
+      type = types.package;
+      default = pkgs.qemu;
+      description = ''
+        Qemu package to use with libvirt.
+        `pkgs.qemu` can emulate alien architectures (e.g. aarch64 on x86)
+        `pkgs.qemu_kvm` saves disk space allowing to emulate only host architectures.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Extra contents appended to the libvirtd configuration file,
+        libvirtd.conf.
+      '';
+    };
+
+    qemuRunAsRoot = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        If true,  libvirtd runs qemu as root.
+        If false, libvirtd runs qemu as unprivileged user qemu-libvirtd.
+        Changing this option to false may cause file permission issues
+        for existing guests. To fix these, manually change ownership
+        of affected files in /var/lib/libvirt/qemu to qemu-libvirtd.
+      '';
+    };
+
+    qemuVerbatimConfig = mkOption {
+      type = types.lines;
+      default = ''
+        namespaces = []
+      '';
+      description = ''
+        Contents written to the qemu configuration file, qemu.conf.
+        Make sure to include a proper namespace configuration when
+        supplying custom configuration.
+      '';
+    };
+
+    qemuOvmf = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Allows libvirtd to take advantage of OVMF when creating new
+        QEMU VMs with UEFI boot.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--verbose" ];
+      description = ''
+        Extra command line arguments passed to libvirtd on startup.
+      '';
+    };
+
+    onBoot = mkOption {
+      type = types.enum ["start" "ignore" ];
+      default = "start";
+      description = ''
+        Specifies the action to be done to / on the guests when the host boots.
+        The "start" option starts all guests that were running prior to shutdown
+        regardless of their autostart settings. The "ignore" option will not
+        start the formerly running guest on boot. However, any guest marked as
+        autostart will still be automatically started by libvirtd.
+      '';
+    };
+
+    onShutdown = mkOption {
+      type = types.enum ["shutdown" "suspend" ];
+      default = "suspend";
+      description = ''
+        When shutting down / restarting the host what method should
+        be used to gracefully halt the guests. Setting to "shutdown"
+        will cause an ACPI shutdown of each guest. "suspend" will
+        attempt to save the state of the guests ready to restore on boot.
+      '';
+    };
+
+    allowedBridges = mkOption {
+      type = types.listOf types.str;
+      default = [ "virbr0" ];
+      description = ''
+        List of bridge devices that can be used by qemu:///session
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment = {
+      # this file is expected in /etc/qemu and not sysconfdir (/var/lib)
+      etc."qemu/bridge.conf".text = lib.concatMapStringsSep "\n" (e:
+        "allow ${e}") cfg.allowedBridges;
+      systemPackages = with pkgs; [ libvirt libressl.nc iptables cfg.qemuPackage ];
+      etc.ethertypes.source = "${pkgs.iptables}/etc/ethertypes";
+    };
+
+    boot.kernelModules = [ "tun" ];
+
+    users.groups.libvirtd.gid = config.ids.gids.libvirtd;
+
+    # libvirtd runs qemu as this user and group by default
+    users.extraGroups.qemu-libvirtd.gid = config.ids.gids.qemu-libvirtd;
+    users.extraUsers.qemu-libvirtd = {
+      uid = config.ids.uids.qemu-libvirtd;
+      isNormalUser = false;
+      group = "qemu-libvirtd";
+    };
+
+    security.wrappers.qemu-bridge-helper = {
+      source = "/run/${dirName}/nix-helpers/qemu-bridge-helper";
+    };
+
+    systemd.packages = [ pkgs.libvirt ];
+
+    systemd.services.libvirtd-config = {
+      description = "Libvirt Virtual Machine Management Daemon - configuration";
+      script = ''
+        # Copy default libvirt network config .xml files to /var/lib
+        # Files modified by the user will not be overwritten
+        for i in $(cd ${pkgs.libvirt}/var/lib && echo \
+            libvirt/qemu/networks/*.xml libvirt/qemu/networks/autostart/*.xml \
+            libvirt/nwfilter/*.xml );
+        do
+            mkdir -p /var/lib/$(dirname $i) -m 755
+            cp -npd ${pkgs.libvirt}/var/lib/$i /var/lib/$i
+        done
+
+        # Copy generated qemu config to libvirt directory
+        cp -f ${qemuConfigFile} /var/lib/${dirName}/qemu.conf
+
+        # stable (not GC'able as in /nix/store) paths for using in <emulator> section of xml configs
+        for emulator in ${pkgs.libvirt}/libexec/libvirt_lxc ${cfg.qemuPackage}/bin/qemu-kvm ${cfg.qemuPackage}/bin/qemu-system-*; do
+          ln -s --force "$emulator" /run/${dirName}/nix-emulators/
+        done
+
+        for helper in libexec/qemu-bridge-helper bin/qemu-pr-helper; do
+          ln -s --force ${cfg.qemuPackage}/$helper /run/${dirName}/nix-helpers/
+        done
+
+        ${optionalString cfg.qemuOvmf ''
+          ln -s --force ${pkgs.OVMF.fd}/FV/OVMF_CODE.fd /run/${dirName}/nix-ovmf/
+          ln -s --force ${pkgs.OVMF.fd}/FV/OVMF_VARS.fd /run/${dirName}/nix-ovmf/
+        ''}
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        RuntimeDirectoryPreserve = "yes";
+        LogsDirectory = subDirs [ "qemu" ];
+        RuntimeDirectory = subDirs [ "nix-emulators" "nix-helpers" "nix-ovmf" ];
+        StateDirectory = subDirs [ "dnsmasq" ];
+      };
+    };
+
+    systemd.services.libvirtd = {
+      requires = [ "libvirtd-config.service" ];
+      after = [ "systemd-udev-settle.service" "libvirtd-config.service" ]
+              ++ optional vswitch.enable "ovs-vswitchd.service";
+
+      environment.LIBVIRTD_ARGS = escapeShellArgs (
+        [ "--config" configFile
+          "--timeout" "120"     # from ${libvirt}/var/lib/sysconfig/libvirtd
+        ] ++ cfg.extraOptions);
+
+      path = [ cfg.qemuPackage ] # libvirtd requires qemu-img to manage disk images
+             ++ optional vswitch.enable vswitch.package;
+
+      serviceConfig = {
+        Type = "notify";
+        KillMode = "process"; # when stopping, leave the VMs alone
+        Restart = "no";
+      };
+      restartIfChanged = false;
+    };
+
+    systemd.services.libvirt-guests = {
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [ coreutils libvirt gawk ];
+      restartIfChanged = false;
+
+      environment.ON_BOOT = "${cfg.onBoot}";
+      environment.ON_SHUTDOWN = "${cfg.onShutdown}";
+    };
+
+    systemd.sockets.virtlogd = {
+      description = "Virtual machine log manager socket";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "/run/${dirName}/virtlogd-sock" ];
+    };
+
+    systemd.services.virtlogd = {
+      description = "Virtual machine log manager";
+      serviceConfig.ExecStart = "@${pkgs.libvirt}/sbin/virtlogd virtlogd";
+      restartIfChanged = false;
+    };
+
+    systemd.sockets.virtlockd = {
+      description = "Virtual machine lock manager socket";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "/run/${dirName}/virtlockd-sock" ];
+    };
+
+    systemd.services.virtlockd = {
+      description = "Virtual machine lock manager";
+      serviceConfig.ExecStart = "@${pkgs.libvirt}/sbin/virtlockd virtlockd";
+      restartIfChanged = false;
+    };
+
+    systemd.sockets.libvirtd    .wantedBy = [ "sockets.target" ];
+    systemd.sockets.libvirtd-tcp.wantedBy = [ "sockets.target" ];
+
+    security.polkit.extraConfig = ''
+      polkit.addRule(function(action, subject) {
+        if (action.id == "org.libvirt.unix.manage" &&
+          subject.isInGroup("libvirtd")) {
+          return polkit.Result.YES;
+        }
+      });
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxc-container.nix b/nixpkgs/nixos/modules/virtualisation/lxc-container.nix
new file mode 100644
index 000000000000..d49364840187
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxc-container.nix
@@ -0,0 +1,26 @@
+{ lib, ... }:
+
+with lib;
+
+{
+  imports = [
+    ../profiles/docker-container.nix # FIXME, shouldn't include something from profiles/
+  ];
+
+  # Allow the user to login as root without password.
+  users.users.root.initialHashedPassword = mkOverride 150 "";
+
+  # Some more help text.
+  services.mingetty.helpLine =
+    ''
+
+      Log in as "root" with an empty password.
+    '';
+
+  # Containers should be light-weight, so start sshd on demand.
+  services.openssh.enable = mkDefault true;
+  services.openssh.startWhenNeeded = mkDefault true;
+
+  # Allow ssh connections
+  networking.firewall.allowedTCPPorts = [ 22 ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxc.nix b/nixpkgs/nixos/modules/virtualisation/lxc.nix
new file mode 100644
index 000000000000..f484d5ee59a8
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxc.nix
@@ -0,0 +1,82 @@
+# LXC Configuration
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.lxc;
+
+in
+
+{
+  ###### interface
+
+  options.virtualisation.lxc = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            This enables Linux Containers (LXC), which provides tools
+            for creating and managing system or application containers
+            on Linux.
+          '';
+      };
+
+    systemConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          ''
+            This is the system-wide LXC config. See
+            <citerefentry><refentrytitle>lxc.system.conf</refentrytitle>
+            <manvolnum>5</manvolnum></citerefentry>.
+          '';
+      };
+
+    defaultConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          ''
+            Default config (default.conf) for new containers, i.e. for
+            network config. See <citerefentry><refentrytitle>lxc.container.conf
+            </refentrytitle><manvolnum>5</manvolnum></citerefentry>.
+          '';
+      };
+
+    usernetConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          ''
+            This is the config file for managing unprivileged user network
+            administration access in LXC. See <citerefentry>
+            <refentrytitle>lxc-usernet</refentrytitle><manvolnum>5</manvolnum>
+            </citerefentry>.
+          '';
+      };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.lxc ];
+    environment.etc."lxc/lxc.conf".text = cfg.systemConfig;
+    environment.etc."lxc/lxc-usernet".text = cfg.usernetConfig;
+    environment.etc."lxc/default.conf".text = cfg.defaultConfig;
+    systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
+
+    security.apparmor.packages = [ pkgs.lxc ];
+    security.apparmor.profiles = [
+      "${pkgs.lxc}/etc/apparmor.d/lxc-containers"
+      "${pkgs.lxc}/etc/apparmor.d/usr.bin.lxc-start"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxcfs.nix b/nixpkgs/nixos/modules/virtualisation/lxcfs.nix
new file mode 100644
index 000000000000..b2457403463a
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxcfs.nix
@@ -0,0 +1,45 @@
+# LXC Configuration
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.lxc.lxcfs;
+in {
+  meta.maintainers = [ maintainers.mic92 ];
+
+  ###### interface
+  options.virtualisation.lxc.lxcfs = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This enables LXCFS, a FUSE filesystem for LXC.
+          To use lxcfs in include the following configuration in your
+          container configuration:
+          <code>
+            virtualisation.lxc.defaultConfig = "lxc.include = ''${pkgs.lxcfs}/share/lxc/config/common.conf.d/00-lxcfs.conf";
+          </code>
+        '';
+      };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.lxcfs = {
+      description = "FUSE filesystem for LXC";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "lxc.service" ];
+      restartIfChanged = false;
+      serviceConfig = {
+        ExecStartPre="${pkgs.coreutils}/bin/mkdir -p /var/lib/lxcfs";
+        ExecStart="${pkgs.lxcfs}/bin/lxcfs /var/lib/lxcfs";
+        ExecStopPost="-${pkgs.fuse}/bin/fusermount -u /var/lib/lxcfs";
+        KillMode="process";
+        Restart="on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxd.nix b/nixpkgs/nixos/modules/virtualisation/lxd.nix
new file mode 100644
index 000000000000..3958fc2c1d7c
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxd.nix
@@ -0,0 +1,150 @@
+# Systemd services for lxd.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.lxd;
+  zfsCfg = config.boot.zfs;
+
+in
+
+{
+  ###### interface
+
+  options = {
+    virtualisation.lxd = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option enables lxd, a daemon that manages
+          containers. Users in the "lxd" group can interact with
+          the daemon (e.g. to start or stop containers) using the
+          <command>lxc</command> command line tool, among others.
+
+          Most of the time, you'll also want to start lxcfs, so
+          that containers can "see" the limits:
+          <code>
+            virtualisation.lxc.lxcfs.enable = true;
+          </code>
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.lxd.override { nftablesSupport = config.networking.nftables.enable; };
+        defaultText = "pkgs.lxd";
+        description = ''
+          The LXD package to use.
+        '';
+      };
+
+      lxcPackage = mkOption {
+        type = types.package;
+        default = pkgs.lxc;
+        defaultText = "pkgs.lxc";
+        description = ''
+          The LXC package to use with LXD (required for AppArmor profiles).
+        '';
+      };
+
+      zfsPackage = mkOption {
+        type = types.package;
+        default = with pkgs; if zfsCfg.enableUnstable then zfsUnstable else zfs;
+        defaultText = "pkgs.zfs";
+        description = ''
+          The ZFS package to use with LXD.
+        '';
+      };
+
+      zfsSupport = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enables lxd to use zfs as a storage for containers.
+
+          This option is enabled by default if a zfs pool is configured
+          with nixos.
+        '';
+      };
+
+      recommendedSysctlSettings = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          enables various settings to avoid common pitfalls when
+          running containers requiring many file operations.
+          Fixes errors like "Too many open files" or
+          "neighbour: ndisc_cache: neighbor table overflow!".
+          See https://lxd.readthedocs.io/en/latest/production-setup/
+          for details.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    security.apparmor = {
+      enable = true;
+      profiles = [
+        "${cfg.lxcPackage}/etc/apparmor.d/usr.bin.lxc-start"
+        "${cfg.lxcPackage}/etc/apparmor.d/lxc-containers"
+      ];
+      packages = [ cfg.lxcPackage ];
+    };
+
+    systemd.services.lxd = {
+      description = "LXD Container Management Daemon";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+
+      path = lib.optional cfg.zfsSupport cfg.zfsPackage;
+
+      preStart = ''
+        mkdir -m 0755 -p /var/lib/lxc/rootfs
+      '';
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/bin/lxd lxd --group lxd";
+        Type = "simple";
+        KillMode = "process"; # when stopping, leave the containers alone
+        LimitMEMLOCK = "infinity";
+        LimitNOFILE = "1048576";
+        LimitNPROC = "infinity";
+        TasksMax = "infinity";
+
+        # By default, `lxd` loads configuration files from hard-coded
+        # `/usr/share/lxc/config` - since this is a no-go for us, we have to
+        # explicitly tell it where the actual configuration files are
+        Environment = mkIf (config.virtualisation.lxc.lxcfs.enable)
+          "LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
+      };
+    };
+
+    users.groups.lxd.gid = config.ids.gids.lxd;
+
+    users.users.root = {
+      subUidRanges = [ { startUid = 1000000; count = 65536; } ];
+      subGidRanges = [ { startGid = 1000000; count = 65536; } ];
+    };
+
+    boot.kernel.sysctl = mkIf cfg.recommendedSysctlSettings {
+      "fs.inotify.max_queued_events" = 1048576;
+      "fs.inotify.max_user_instances" = 1048576;
+      "fs.inotify.max_user_watches" = 1048576;
+      "vm.max_map_count" = 262144;
+      "kernel.dmesg_restrict" = 1;
+      "net.ipv4.neigh.default.gc_thresh3" = 8192;
+      "net.ipv6.neigh.default.gc_thresh3" = 8192;
+      "kernel.keys.maxkeys" = 2000;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/nixos-containers.nix b/nixpkgs/nixos/modules/virtualisation/nixos-containers.nix
new file mode 100644
index 000000000000..b0fa03917c82
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/nixos-containers.nix
@@ -0,0 +1,844 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  # The container's init script, a small wrapper around the regular
+  # NixOS stage-2 init script.
+  containerInit = (cfg:
+    let
+      renderExtraVeth = (name: cfg:
+        ''
+        echo "Bringing ${name} up"
+        ip link set dev ${name} up
+        ${optionalString (cfg.localAddress != null) ''
+          echo "Setting ip for ${name}"
+          ip addr add ${cfg.localAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.localAddress6 != null) ''
+          echo "Setting ip6 for ${name}"
+          ip -6 addr add ${cfg.localAddress6} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress != null) ''
+          echo "Setting route to host for ${name}"
+          ip route add ${cfg.hostAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress6 != null) ''
+          echo "Setting route6 to host for ${name}"
+          ip -6 route add ${cfg.hostAddress6} dev ${name}
+        ''}
+        ''
+        );
+    in
+      pkgs.writeScript "container-init"
+      ''
+        #! ${pkgs.runtimeShell} -e
+
+        # Initialise the container side of the veth pair.
+        if [ -n "$HOST_ADDRESS" ]   || [ -n "$HOST_ADDRESS6" ]  ||
+           [ -n "$LOCAL_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS6" ] ||
+           [ -n "$HOST_BRIDGE" ]; then
+          ip link set host0 name eth0
+          ip link set dev eth0 up
+
+          if [ -n "$LOCAL_ADDRESS" ]; then
+            ip addr add $LOCAL_ADDRESS dev eth0
+          fi
+          if [ -n "$LOCAL_ADDRESS6" ]; then
+            ip -6 addr add $LOCAL_ADDRESS6 dev eth0
+          fi
+          if [ -n "$HOST_ADDRESS" ]; then
+            ip route add $HOST_ADDRESS dev eth0
+            ip route add default via $HOST_ADDRESS
+          fi
+          if [ -n "$HOST_ADDRESS6" ]; then
+            ip -6 route add $HOST_ADDRESS6 dev eth0
+            ip -6 route add default via $HOST_ADDRESS6
+          fi
+
+          ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+        fi
+
+        # Start the regular stage 1 script.
+        exec "$1"
+      ''
+    );
+
+  nspawnExtraVethArgs = (name: cfg: "--network-veth-extra=${name}");
+
+  startScript = cfg:
+    ''
+      mkdir -p -m 0755 "$root/etc" "$root/var/lib"
+      mkdir -p -m 0700 "$root/var/lib/private" "$root/root" /run/containers
+      if ! [ -e "$root/etc/os-release" ]; then
+        touch "$root/etc/os-release"
+      fi
+
+      if ! [ -e "$root/etc/machine-id" ]; then
+        touch "$root/etc/machine-id"
+      fi
+
+      mkdir -p -m 0755 \
+        "/nix/var/nix/profiles/per-container/$INSTANCE" \
+        "/nix/var/nix/gcroots/per-container/$INSTANCE"
+
+      cp --remove-destination /etc/resolv.conf "$root/etc/resolv.conf"
+
+      if [ "$PRIVATE_NETWORK" = 1 ]; then
+        extraFlags+=" --private-network"
+      fi
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        extraFlags+=" --network-veth"
+      fi
+
+      if [ -n "$HOST_PORT" ]; then
+        OIFS=$IFS
+        IFS=","
+        for i in $HOST_PORT
+        do
+            extraFlags+=" --port=$i"
+        done
+        IFS=$OIFS
+      fi
+
+      if [ -n "$HOST_BRIDGE" ]; then
+        extraFlags+=" --network-bridge=$HOST_BRIDGE"
+      fi
+
+      extraFlags+=" ${concatStringsSep " " (mapAttrsToList nspawnExtraVethArgs cfg.extraVeths)}"
+
+      for iface in $INTERFACES; do
+        extraFlags+=" --network-interface=$iface"
+      done
+
+      for iface in $MACVLANS; do
+        extraFlags+=" --network-macvlan=$iface"
+      done
+
+      # If the host is 64-bit and the container is 32-bit, add a
+      # --personality flag.
+      ${optionalString (config.nixpkgs.localSystem.system == "x86_64-linux") ''
+        if [ "$(< ''${SYSTEM_PATH:-/nix/var/nix/profiles/per-container/$INSTANCE/system}/system)" = i686-linux ]; then
+          extraFlags+=" --personality=x86"
+        fi
+      ''}
+
+      # Run systemd-nspawn without startup notification (we'll
+      # wait for the container systemd to signal readiness).
+      exec ${config.systemd.package}/bin/systemd-nspawn \
+        --keep-unit \
+        -M "$INSTANCE" -D "$root" $extraFlags \
+        $EXTRA_NSPAWN_FLAGS \
+        --notify-ready=yes \
+        --bind-ro=/nix/store \
+        --bind-ro=/nix/var/nix/db \
+        --bind-ro=/nix/var/nix/daemon-socket \
+        --bind="/nix/var/nix/profiles/per-container/$INSTANCE:/nix/var/nix/profiles" \
+        --bind="/nix/var/nix/gcroots/per-container/$INSTANCE:/nix/var/nix/gcroots" \
+        ${optionalString (!cfg.ephemeral) "--link-journal=try-guest"} \
+        --setenv PRIVATE_NETWORK="$PRIVATE_NETWORK" \
+        --setenv HOST_BRIDGE="$HOST_BRIDGE" \
+        --setenv HOST_ADDRESS="$HOST_ADDRESS" \
+        --setenv LOCAL_ADDRESS="$LOCAL_ADDRESS" \
+        --setenv HOST_ADDRESS6="$HOST_ADDRESS6" \
+        --setenv LOCAL_ADDRESS6="$LOCAL_ADDRESS6" \
+        --setenv HOST_PORT="$HOST_PORT" \
+        --setenv PATH="$PATH" \
+        ${optionalString cfg.ephemeral "--ephemeral"} \
+        ${if cfg.additionalCapabilities != null && cfg.additionalCapabilities != [] then
+          ''--capability="${concatStringsSep "," cfg.additionalCapabilities}"'' else ""
+        } \
+        ${if cfg.tmpfs != null && cfg.tmpfs != [] then
+          ''--tmpfs=${concatStringsSep " --tmpfs=" cfg.tmpfs}'' else ""
+        } \
+        ${containerInit cfg} "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/init"
+    '';
+
+  preStartScript = cfg:
+    ''
+      # Clean up existing machined registration and interfaces.
+      machinectl terminate "$INSTANCE" 2> /dev/null || true
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        ip link del dev "ve-$INSTANCE" 2> /dev/null || true
+        ip link del dev "vb-$INSTANCE" 2> /dev/null || true
+      fi
+
+      ${concatStringsSep "\n" (
+        mapAttrsToList (name: cfg:
+          ''ip link del dev ${name} 2> /dev/null || true ''
+        ) cfg.extraVeths
+      )}
+   '';
+
+  postStartScript = (cfg:
+    let
+      ipcall = cfg: ipcmd: variable: attribute:
+        if cfg.${attribute} == null then
+          ''
+            if [ -n "${variable}" ]; then
+              ${ipcmd} add ${variable} dev $ifaceHost
+            fi
+          ''
+        else
+          ''${ipcmd} add ${cfg.${attribute}} dev $ifaceHost'';
+      renderExtraVeth = name: cfg:
+        if cfg.hostBridge != null then
+          ''
+            # Add ${name} to bridge ${cfg.hostBridge}
+            ip link set dev ${name} master ${cfg.hostBridge} up
+          ''
+        else
+          ''
+            echo "Bring ${name} up"
+            ip link set dev ${name} up
+            # Set IPs and routes for ${name}
+            ${optionalString (cfg.hostAddress != null) ''
+              ip addr add ${cfg.hostAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.hostAddress6 != null) ''
+              ip -6 addr add ${cfg.hostAddress6} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress != null) ''
+              ip route add ${cfg.localAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress6 != null) ''
+              ip -6 route add ${cfg.localAddress6} dev ${name}
+            ''}
+          '';
+    in
+      ''
+        if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+           [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+          if [ -z "$HOST_BRIDGE" ]; then
+            ifaceHost=ve-$INSTANCE
+            ip link set dev $ifaceHost up
+
+            ${ipcall cfg "ip addr" "$HOST_ADDRESS" "hostAddress"}
+            ${ipcall cfg "ip -6 addr" "$HOST_ADDRESS6" "hostAddress6"}
+            ${ipcall cfg "ip route" "$LOCAL_ADDRESS" "localAddress"}
+            ${ipcall cfg "ip -6 route" "$LOCAL_ADDRESS6" "localAddress6"}
+          fi
+          ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+        fi
+      ''
+  );
+
+  serviceDirectives = cfg: {
+    ExecReload = pkgs.writeScript "reload-container"
+      ''
+        #! ${pkgs.runtimeShell} -e
+        ${pkgs.nixos-container}/bin/nixos-container run "$INSTANCE" -- \
+          bash --login -c "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/bin/switch-to-configuration test"
+      '';
+
+    SyslogIdentifier = "container %i";
+
+    EnvironmentFile = "-/etc/containers/%i.conf";
+
+    Type = "notify";
+
+    RuntimeDirectory = lib.optional cfg.ephemeral "containers/%i";
+
+    # Note that on reboot, systemd-nspawn returns 133, so this
+    # unit will be restarted. On poweroff, it returns 0, so the
+    # unit won't be restarted.
+    RestartForceExitStatus = "133";
+    SuccessExitStatus = "133";
+
+    # Some containers take long to start
+    # especially when you automatically start many at once
+    TimeoutStartSec = cfg.timeoutStartSec;
+
+    Restart = "on-failure";
+
+    Slice = "machine.slice";
+    Delegate = true;
+
+    # Hack: we don't want to kill systemd-nspawn, since we call
+    # "machinectl poweroff" in preStop to shut down the
+    # container cleanly. But systemd requires sending a signal
+    # (at least if we want remaining processes to be killed
+    # after the timeout). So send an ignored signal.
+    KillMode = "mixed";
+    KillSignal = "WINCH";
+
+    DevicePolicy = "closed";
+    DeviceAllow = map (d: "${d.node} ${d.modifier}") cfg.allowedDevices;
+  };
+
+
+  system = config.nixpkgs.localSystem.system;
+
+  bindMountOpts = { name, ... }: {
+
+    options = {
+      mountPoint = mkOption {
+        example = "/mnt/usb";
+        type = types.str;
+        description = "Mount point on the container file system.";
+      };
+      hostPath = mkOption {
+        default = null;
+        example = "/home/alice";
+        type = types.nullOr types.str;
+        description = "Location of the host path to be mounted.";
+      };
+      isReadOnly = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Determine whether the mounted path will be accessed in read-only mode.";
+      };
+    };
+
+    config = {
+      mountPoint = mkDefault name;
+    };
+
+  };
+
+  allowedDeviceOpts = { ... }: {
+    options = {
+      node = mkOption {
+        example = "/dev/net/tun";
+        type = types.str;
+        description = "Path to device node";
+      };
+      modifier = mkOption {
+        example = "rw";
+        type = types.str;
+        description = ''
+          Device node access modifier. Takes a combination
+          <literal>r</literal> (read), <literal>w</literal> (write), and
+          <literal>m</literal> (mknod). See the
+          <literal>systemd.resource-control(5)</literal> man page for more
+          information.'';
+      };
+    };
+  };
+
+
+  mkBindFlag = d:
+               let flagPrefix = if d.isReadOnly then " --bind-ro=" else " --bind=";
+                   mountstr = if d.hostPath != null then "${d.hostPath}:${d.mountPoint}" else "${d.mountPoint}";
+               in flagPrefix + mountstr ;
+
+  mkBindFlags = bs: concatMapStrings mkBindFlag (lib.attrValues bs);
+
+  networkOptions = {
+    hostBridge = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "br0";
+      description = ''
+        Put the host-side of the veth-pair into the named bridge.
+        Only one of hostAddress* or hostBridge can be given.
+      '';
+    };
+
+    forwardPorts = mkOption {
+      type = types.listOf (types.submodule {
+        options = {
+          protocol = mkOption {
+            type = types.str;
+            default = "tcp";
+            description = "The protocol specifier for port forwarding between host and container";
+          };
+          hostPort = mkOption {
+            type = types.int;
+            description = "Source port of the external interface on host";
+          };
+          containerPort = mkOption {
+            type = types.nullOr types.int;
+            default = null;
+            description = "Target port of container";
+          };
+        };
+      });
+      default = [];
+      example = [ { protocol = "tcp"; hostPort = 8080; containerPort = 80; } ];
+      description = ''
+        List of forwarded ports from host to container. Each forwarded port
+        is specified by protocol, hostPort and containerPort. By default,
+        protocol is tcp and hostPort and containerPort are assumed to be
+        the same if containerPort is not explicitly given.
+      '';
+    };
+
+
+    hostAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.1";
+      description = ''
+        The IPv4 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    hostAddress6 = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "fc00::1";
+      description = ''
+        The IPv6 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    localAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.2";
+      description = ''
+        The IPv4 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /32 and routing is
+        set up from localAddress to hostAddress and back.
+      '';
+    };
+
+    localAddress6 = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "fc00::2";
+      description = ''
+        The IPv6 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /128 and routing is
+        set up from localAddress6 to hostAddress6 and back.
+      '';
+    };
+
+  };
+
+  dummyConfig =
+    {
+      extraVeths = {};
+      additionalCapabilities = [];
+      ephemeral = false;
+      timeoutStartSec = "15s";
+      allowedDevices = [];
+      hostAddress = null;
+      hostAddress6 = null;
+      localAddress = null;
+      localAddress6 = null;
+      tmpfs = null;
+    };
+
+in
+
+{
+  options = {
+
+    boot.isContainer = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether this NixOS machine is a lightweight container running
+        in another NixOS system. If set to true, support for nested
+        containers is disabled by default, but can be reenabled by
+        setting <option>boot.enableContainers</option> to true.
+      '';
+    };
+
+    boot.enableContainers = mkOption {
+      type = types.bool;
+      default = !config.boot.isContainer;
+      description = ''
+        Whether to enable support for NixOS containers. Defaults to true
+        (at no cost if containers are not actually used), but only if the
+        system is not itself a lightweight container of a host.
+        To enable support for nested containers, this option has to be
+        explicitly set to true (in the outer container).
+      '';
+    };
+
+    containers = mkOption {
+      type = types.attrsOf (types.submodule (
+        { config, options, name, ... }:
+        {
+          options = {
+
+            config = mkOption {
+              description = ''
+                A specification of the desired configuration of this
+                container, as a NixOS module.
+              '';
+              type = let
+                confPkgs = if config.pkgs == null then pkgs else config.pkgs;
+              in lib.mkOptionType {
+                name = "Toplevel NixOS config";
+                merge = loc: defs: (import (confPkgs.path + "/nixos/lib/eval-config.nix") {
+                  inherit system;
+                  pkgs = confPkgs;
+                  baseModules = import (confPkgs.path + "/nixos/modules/module-list.nix");
+                  inherit (confPkgs) lib;
+                  modules =
+                    let
+                      extraConfig = {
+                        _file = "module at ${__curPos.file}:${toString __curPos.line}";
+                        config = {
+                          boot.isContainer = true;
+                          networking.hostName = mkDefault name;
+                          networking.useDHCP = false;
+                          assertions = [
+                            {
+                              assertion =  config.privateNetwork -> stringLength name < 12;
+                              message = ''
+                                Container name `${name}` is too long: When `privateNetwork` is enabled, container names can
+                                not be longer than 11 characters, because the container's interface name is derived from it.
+                                This might be fixed in the future. See https://github.com/NixOS/nixpkgs/issues/38509
+                              '';
+                            }
+                          ];
+                        };
+                      };
+                    in [ extraConfig ] ++ (map (x: x.value) defs);
+                  prefix = [ "containers" name ];
+                }).config;
+              };
+            };
+
+            path = mkOption {
+              type = types.path;
+              example = "/nix/var/nix/profiles/containers/webserver";
+              description = ''
+                As an alternative to specifying
+                <option>config</option>, you can specify the path to
+                the evaluated NixOS system configuration, typically a
+                symlink to a system profile.
+              '';
+            };
+
+            additionalCapabilities = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "CAP_NET_ADMIN" "CAP_MKNOD" ];
+              description = ''
+                Grant additional capabilities to the container.  See the
+                capabilities(7) and systemd-nspawn(1) man pages for more
+                information.
+              '';
+            };
+
+            pkgs = mkOption {
+              type = types.nullOr types.attrs;
+              default = null;
+              example = literalExample "pkgs";
+              description = ''
+                Customise which nixpkgs to use for this container.
+              '';
+            };
+
+            ephemeral = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Runs container in ephemeral mode with the empty root filesystem at boot.
+                This way container will be bootstrapped from scratch on each boot
+                and will be cleaned up on shutdown leaving no traces behind.
+                Useful for completely stateless, reproducible containers.
+
+                Note that this option might require to do some adjustments to the container configuration,
+                e.g. you might want to set
+                <varname>systemd.network.networks.$interface.dhcpV4Config.ClientIdentifier</varname> to "mac"
+                if you use <varname>macvlans</varname> option.
+                This way dhcp client identifier will be stable between the container restarts.
+
+                Note that the container journal will not be linked to the host if this option is enabled.
+              '';
+            };
+
+            enableTun = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Allows the container to create and setup tunnel interfaces
+                by granting the <literal>NET_ADMIN</literal> capability and
+                enabling access to <literal>/dev/net/tun</literal>.
+              '';
+            };
+
+            privateNetwork = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Whether to give the container its own private virtual
+                Ethernet interface.  The interface is called
+                <literal>eth0</literal>, and is hooked up to the interface
+                <literal>ve-<replaceable>container-name</replaceable></literal>
+                on the host.  If this option is not set, then the
+                container shares the network interfaces of the host,
+                and can bind to any port on any interface.
+              '';
+            };
+
+            interfaces = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = ''
+                The list of interfaces to be moved into the container.
+              '';
+            };
+
+            macvlans = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = ''
+                The list of host interfaces from which macvlans will be
+                created. For each interface specified, a macvlan interface
+                will be created and moved to the container.
+              '';
+            };
+
+            extraVeths = mkOption {
+              type = with types; attrsOf (submodule { options = networkOptions; });
+              default = {};
+              description = ''
+                Extra veth-pairs to be created for the container.
+              '';
+            };
+
+            autoStart = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Whether the container is automatically started at boot-time.
+              '';
+            };
+
+		    timeoutStartSec = mkOption {
+		      type = types.str;
+		      default = "1min";
+		      description = ''
+		        Time for the container to start. In case of a timeout,
+		        the container processes get killed.
+		        See <citerefentry><refentrytitle>systemd.time</refentrytitle>
+		        <manvolnum>7</manvolnum></citerefentry>
+		        for more information about the format.
+		       '';
+		    };
+
+            bindMounts = mkOption {
+              type = with types; loaOf (submodule bindMountOpts);
+              default = {};
+              example = literalExample ''
+                { "/home" = { hostPath = "/home/alice";
+                              isReadOnly = false; };
+                }
+              '';
+
+              description =
+                ''
+                  An extra list of directories that is bound to the container.
+                '';
+            };
+
+            allowedDevices = mkOption {
+              type = with types; listOf (submodule allowedDeviceOpts);
+              default = [];
+              example = [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              description = ''
+                A list of device nodes to which the containers has access to.
+              '';
+            };
+
+            tmpfs = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "/var" ];
+              description = ''
+                Mounts a set of tmpfs file systems into the container.
+                Multiple paths can be specified.
+                Valid items must conform to the --tmpfs argument
+                of systemd-nspawn. See systemd-nspawn(1) for details.
+              '';
+            };
+
+            extraFlags = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "--drop-capability=CAP_SYS_CHROOT" ];
+              description = ''
+                Extra flags passed to the systemd-nspawn command.
+                See systemd-nspawn(1) for details.
+              '';
+            };
+
+          } // networkOptions;
+
+          config = mkMerge
+            [
+              (mkIf options.config.isDefined {
+                path = config.config.system.build.toplevel;
+              })
+            ];
+        }));
+
+      default = {};
+      example = literalExample
+        ''
+          { webserver =
+              { path = "/nix/var/nix/profiles/webserver";
+              };
+            database =
+              { config =
+                  { config, pkgs, ... }:
+                  { services.postgresql.enable = true;
+                    services.postgresql.package = pkgs.postgresql_9_6;
+
+                    system.stateVersion = "17.03";
+                  };
+              };
+          }
+        '';
+      description = ''
+        A set of NixOS system configurations to be run as lightweight
+        containers.  Each container appears as a service
+        <literal>container-<replaceable>name</replaceable></literal>
+        on the host system, allowing it to be started and stopped via
+        <command>systemctl</command>.
+      '';
+    };
+
+  };
+
+
+  config = mkIf (config.boot.enableContainers) (let
+
+    unit = {
+      description = "Container '%i'";
+
+      unitConfig.RequiresMountsFor = "/var/lib/containers/%i";
+
+      path = [ pkgs.iproute ];
+
+      environment = {
+        root = "/var/lib/containers/%i";
+        INSTANCE = "%i";
+      };
+
+      preStart = preStartScript dummyConfig;
+
+      script = startScript dummyConfig;
+
+      postStart = postStartScript dummyConfig;
+
+      preStop = "machinectl poweroff $INSTANCE";
+
+      restartIfChanged = false;
+
+      serviceConfig = serviceDirectives dummyConfig;
+    };
+  in {
+    systemd.targets.multi-user.wants = [ "machines.target" ];
+
+    systemd.services = listToAttrs (filter (x: x.value != null) (
+      # The generic container template used by imperative containers
+      [{ name = "container@"; value = unit; }]
+      # declarative containers
+      ++ (mapAttrsToList (name: cfg: nameValuePair "container@${name}" (let
+          containerConfig = cfg // (
+          if cfg.enableTun then
+            {
+              allowedDevices = cfg.allowedDevices
+                ++ [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              additionalCapabilities = cfg.additionalCapabilities
+                ++ [ "CAP_NET_ADMIN" ];
+            }
+          else {});
+        in
+          recursiveUpdate unit {
+            preStart = preStartScript containerConfig;
+            script = startScript containerConfig;
+            postStart = postStartScript containerConfig;
+            serviceConfig = serviceDirectives containerConfig;
+            unitConfig.RequiresMountsFor = lib.optional (!containerConfig.ephemeral) "/var/lib/containers/%i";
+            environment.root = if containerConfig.ephemeral then "/run/containers/%i" else "/var/lib/containers/%i";
+          } // (
+          if containerConfig.autoStart then
+            {
+              wantedBy = [ "machines.target" ];
+              wants = [ "network.target" ];
+              after = [ "network.target" ];
+              restartTriggers = [
+                containerConfig.path
+                config.environment.etc."containers/${name}.conf".source
+              ];
+              restartIfChanged = true;
+            }
+          else {})
+      )) config.containers)
+    ));
+
+    # Generate a configuration file in /etc/containers for each
+    # container so that container@.target can get the container
+    # configuration.
+    environment.etc =
+      let mkPortStr = p: p.protocol + ":" + (toString p.hostPort) + ":" + (if p.containerPort == null then toString p.hostPort else toString p.containerPort);
+      in mapAttrs' (name: cfg: nameValuePair "containers/${name}.conf"
+      { text =
+          ''
+            SYSTEM_PATH=${cfg.path}
+            ${optionalString cfg.privateNetwork ''
+              PRIVATE_NETWORK=1
+              ${optionalString (cfg.hostBridge != null) ''
+                HOST_BRIDGE=${cfg.hostBridge}
+              ''}
+              ${optionalString (length cfg.forwardPorts > 0) ''
+                HOST_PORT=${concatStringsSep "," (map mkPortStr cfg.forwardPorts)}
+              ''}
+              ${optionalString (cfg.hostAddress != null) ''
+                HOST_ADDRESS=${cfg.hostAddress}
+              ''}
+              ${optionalString (cfg.hostAddress6 != null) ''
+                HOST_ADDRESS6=${cfg.hostAddress6}
+              ''}
+              ${optionalString (cfg.localAddress != null) ''
+                LOCAL_ADDRESS=${cfg.localAddress}
+              ''}
+              ${optionalString (cfg.localAddress6 != null) ''
+                LOCAL_ADDRESS6=${cfg.localAddress6}
+              ''}
+            ''}
+            INTERFACES="${toString cfg.interfaces}"
+            MACVLANS="${toString cfg.macvlans}"
+            ${optionalString cfg.autoStart ''
+              AUTO_START=1
+            ''}
+            EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts +
+              optionalString (cfg.extraFlags != [])
+                (" " + concatStringsSep " " cfg.extraFlags)}"
+          '';
+      }) config.containers;
+
+    # Generate /etc/hosts entries for the containers.
+    networking.extraHosts = concatStrings (mapAttrsToList (name: cfg: optionalString (cfg.localAddress != null)
+      ''
+        ${head (splitString "/" cfg.localAddress)} ${name}.containers
+      '') config.containers);
+
+    networking.dhcpcd.denyInterfaces = [ "ve-*" "vb-*" ];
+
+    services.udev.extraRules = optionalString config.networking.networkmanager.enable ''
+      # Don't manage interfaces created by nixos-container.
+      ENV{INTERFACE}=="v[eb]-*", ENV{NM_UNMANAGED}="1"
+    '';
+
+    environment.systemPackages = [ pkgs.nixos-container ];
+
+    boot.kernelModules = [
+      "bridge"
+      "macvlan"
+      "tap"
+      "tun"
+    ];
+  });
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/oci-containers.nix b/nixpkgs/nixos/modules/virtualisation/oci-containers.nix
new file mode 100644
index 000000000000..a46dd65eb491
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/oci-containers.nix
@@ -0,0 +1,323 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.oci-containers;
+  proxy_env = config.networking.proxy.envVars;
+
+  defaultBackend = options.virtualisation.oci-containers.backend.default;
+
+  containerOptions =
+    { ... }: {
+
+      options = {
+
+        image = mkOption {
+          type = with types; str;
+          description = "OCI image to run.";
+          example = "library/hello-world";
+        };
+
+        imageFile = mkOption {
+          type = with types; nullOr package;
+          default = null;
+          description = ''
+            Path to an image file to load instead of pulling from a registry.
+            If defined, do not pull from registry.
+
+            You still need to set the <literal>image</literal> attribute, as it
+            will be used as the image name for docker to start a container.
+          '';
+          example = literalExample "pkgs.dockerTools.buildDockerImage {...};";
+        };
+
+        cmd = mkOption {
+          type =  with types; listOf str;
+          default = [];
+          description = "Commandline arguments to pass to the image's entrypoint.";
+          example = literalExample ''
+            ["--port=9000"]
+          '';
+        };
+
+        entrypoint = mkOption {
+          type = with types; nullOr str;
+          description = "Override the default entrypoint of the image.";
+          default = null;
+          example = "/bin/my-app";
+        };
+
+        environment = mkOption {
+          type = with types; attrsOf str;
+          default = {};
+          description = "Environment variables to set for this container.";
+          example = literalExample ''
+            {
+              DATABASE_HOST = "db.example.com";
+              DATABASE_PORT = "3306";
+            }
+        '';
+        };
+
+        log-driver = mkOption {
+          type = types.str;
+          default = "journald";
+          description = ''
+            Logging driver for the container.  The default of
+            <literal>"journald"</literal> means that the container's logs will be
+            handled as part of the systemd unit.
+
+            For more details and a full list of logging drivers, refer to respective backends documentation.
+
+            For Docker:
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver">Docker engine documentation</link>
+
+            For Podman:
+            Refer to the docker-run(1) man page.
+          '';
+        };
+
+        ports = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = ''
+            Network ports to publish from the container to the outer host.
+
+            Valid formats:
+
+            <itemizedlist>
+              <listitem>
+                <para>
+                  <literal>&lt;ip&gt;:&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <literal>&lt;ip&gt;::&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <literal>&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <literal>&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+            </itemizedlist>
+
+            Both <literal>hostPort</literal> and
+            <literal>containerPort</literal> can be specified as a range of
+            ports.  When specifying ranges for both, the number of container
+            ports in the range must match the number of host ports in the
+            range.  Example: <literal>1234-1236:1234-1236/tcp</literal>
+
+            When specifying a range for <literal>hostPort</literal> only, the
+            <literal>containerPort</literal> must <emphasis>not</emphasis> be a
+            range.  In this case, the container port is published somewhere
+            within the specified <literal>hostPort</literal> range.  Example:
+            <literal>1234-1236:1234/tcp</literal>
+
+            Refer to the
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#expose-incoming-ports">
+            Docker engine documentation</link> for full details.
+          '';
+          example = literalExample ''
+            [
+              "8080:9000"
+            ]
+          '';
+        };
+
+        user = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = ''
+            Override the username or UID (and optionally groupname or GID) used
+            in the container.
+          '';
+          example = "nobody:nogroup";
+        };
+
+        volumes = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = ''
+            List of volumes to attach to this container.
+
+            Note that this is a list of <literal>"src:dst"</literal> strings to
+            allow for <literal>src</literal> to refer to
+            <literal>/nix/store</literal> paths, which would be difficult with an
+            attribute set.  There are also a variety of mount options available
+            as a third field; please refer to the
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#volume-shared-filesystems">
+            docker engine documentation</link> for details.
+          '';
+          example = literalExample ''
+            [
+              "volume_name:/path/inside/container"
+              "/path/on/host:/path/inside/container"
+            ]
+          '';
+        };
+
+        workdir = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = "Override the default working directory for the container.";
+          example = "/var/lib/hello_world";
+        };
+
+        dependsOn = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = ''
+            Define which other containers this one depends on. They will be added to both After and Requires for the unit.
+
+            Use the same name as the attribute under <literal>virtualisation.oci-containers</literal>.
+          '';
+          example = literalExample ''
+            virtualisation.oci-containers = {
+              node1 = {};
+              node2 = {
+                dependsOn = [ "node1" ];
+              }
+            }
+          '';
+        };
+
+        extraOptions = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = "Extra options for <command>${defaultBackend} run</command>.";
+          example = literalExample ''
+            ["--network=host"]
+          '';
+        };
+
+        autoStart = mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            When enabled, the container is automatically started on boot.
+            If this option is set to false, the container has to be started on-demand via its service.
+          '';
+        };
+      };
+    };
+
+  mkService = name: container: let
+    dependsOn = map (x: "${cfg.backend}-${x}.service") container.dependsOn;
+  in {
+    wantedBy = [] ++ optional (container.autoStart) "multi-user.target";
+    after = lib.optionals (cfg.backend == "docker") [ "docker.service" "docker.socket" ] ++ dependsOn;
+    requires = dependsOn;
+    environment = proxy_env;
+
+    path =
+      if cfg.backend == "docker" then [ pkgs.docker ]
+      else if cfg.backend == "podman" then [ config.virtualisation.podman.package ]
+      else throw "Unhandled backend: ${cfg.backend}";
+
+    preStart = ''
+      ${cfg.backend} rm -f ${name} || true
+      ${optionalString (container.imageFile != null) ''
+        ${cfg.backend} load -i ${container.imageFile}
+        ''}
+      '';
+    postStop = "${cfg.backend} rm -f ${name} || true";
+
+    serviceConfig = {
+      StandardOutput = "null";
+      StandardError = "null";
+      ExecStart = concatStringsSep " \\\n  " ([
+        "${config.system.path}/bin/${cfg.backend} run"
+        "--rm"
+        "--name=${name}"
+        "--log-driver=${container.log-driver}"
+      ] ++ optional (container.entrypoint != null)
+        "--entrypoint=${escapeShellArg container.entrypoint}"
+        ++ (mapAttrsToList (k: v: "-e ${escapeShellArg k}=${escapeShellArg v}") container.environment)
+        ++ map (p: "-p ${escapeShellArg p}") container.ports
+        ++ optional (container.user != null) "-u ${escapeShellArg container.user}"
+        ++ map (v: "-v ${escapeShellArg v}") container.volumes
+        ++ optional (container.workdir != null) "-w ${escapeShellArg container.workdir}"
+        ++ map escapeShellArg container.extraOptions
+        ++ [container.image]
+        ++ map escapeShellArg container.cmd
+      );
+
+      ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || ${cfg.backend} stop ${name}"'';
+
+      ### There is no generalized way of supporting `reload` for docker
+      ### containers. Some containers may respond well to SIGHUP sent to their
+      ### init process, but it is not guaranteed; some apps have other reload
+      ### mechanisms, some don't have a reload signal at all, and some docker
+      ### images just have broken signal handling.  The best compromise in this
+      ### case is probably to leave ExecReload undefined, so `systemctl reload`
+      ### will at least result in an error instead of potentially undefined
+      ### behaviour.
+      ###
+      ### Advanced users can still override this part of the unit to implement
+      ### a custom reload handler, since the result of all this is a normal
+      ### systemd service from the perspective of the NixOS module system.
+      ###
+      # ExecReload = ...;
+      ###
+
+      TimeoutStartSec = 0;
+      TimeoutStopSec = 120;
+      Restart = "always";
+    };
+  };
+
+in {
+  imports = [
+    (
+      lib.mkChangedOptionModule
+      [ "docker-containers"  ]
+      [ "virtualisation" "oci-containers" ]
+      (oldcfg: {
+        backend = "docker";
+        containers = lib.mapAttrs (n: v: builtins.removeAttrs (v // {
+          extraOptions = v.extraDockerOptions or [];
+        }) [ "extraDockerOptions" ]) oldcfg.docker-containers;
+      })
+    )
+  ];
+
+  options.virtualisation.oci-containers = {
+
+    backend = mkOption {
+      type = types.enum [ "podman" "docker" ];
+      default =
+        # TODO: Once https://github.com/NixOS/nixpkgs/issues/77925 is resolved default to podman
+        # if versionAtLeast config.system.stateVersion "20.09" then "podman"
+        # else "docker";
+        "docker";
+      description = "The underlying Docker implementation to use.";
+    };
+
+    containers = mkOption {
+      default = {};
+      type = types.attrsOf (types.submodule containerOptions);
+      description = "OCI (Docker) containers to run as systemd services.";
+    };
+
+  };
+
+  config = lib.mkIf (cfg.containers != {}) (lib.mkMerge [
+    {
+      systemd.services = mapAttrs' (n: v: nameValuePair "${cfg.backend}-${n}" (mkService n v)) cfg.containers;
+    }
+    (lib.mkIf (cfg.backend == "podman") {
+      virtualisation.podman.enable = true;
+    })
+    (lib.mkIf (cfg.backend == "docker") {
+      virtualisation.docker.enable = true;
+    })
+  ]);
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/openstack-config.nix b/nixpkgs/nixos/modules/virtualisation/openstack-config.nix
new file mode 100644
index 000000000000..c2da5d0d2301
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/openstack-config.nix
@@ -0,0 +1,58 @@
+{ pkgs, lib, ... }:
+
+with lib;
+
+let
+  metadataFetcher = import ./ec2-metadata-fetcher.nix {
+    targetRoot = "/";
+    wgetExtraOptions = "--retry-connrefused";
+  };
+in
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+    ../profiles/headless.nix
+    # The Openstack Metadata service exposes data on an EC2 API also.
+    ./ec2-data.nix
+    ./amazon-init.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=ttyS0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      permitRootLogin = "prohibit-password";
+      passwordAuthentication = mkDefault false;
+    };
+
+    # Force getting the hostname from Openstack metadata.
+    networking.hostName = mkDefault "";
+
+    systemd.services.openstack-init = {
+      path = [ pkgs.wget ];
+      description = "Fetch Metadata on startup";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "apply-ec2-data.service" "amazon-init.service"];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      script = metadataFetcher;
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/openvswitch.nix b/nixpkgs/nixos/modules/virtualisation/openvswitch.nix
new file mode 100644
index 000000000000..c6a3ceddc3e0
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/openvswitch.nix
@@ -0,0 +1,190 @@
+# Systemd services for openvswitch
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.vswitch;
+
+in {
+
+  options.virtualisation.vswitch = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to enable Open vSwitch. A configuration daemon (ovs-server)
+        will be started.
+        '';
+    };
+
+    resetOnStart = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to reset the Open vSwitch configuration database to a default
+        configuration on every start of the systemd <literal>ovsdb.service</literal>.
+        '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.openvswitch;
+      defaultText = "pkgs.openvswitch";
+      description = ''
+        Open vSwitch package to use.
+      '';
+    };
+
+    ipsec = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to start racoon service for openvswitch.
+        Supported only if openvswitch version is less than 2.6.0.
+        Use <literal>virtualisation.vswitch.package = pkgs.openvswitch-lts</literal>
+        for a version that supports ipsec over GRE.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable (let
+
+    # Where the communication sockets live
+    runDir = "/run/openvswitch";
+
+    # The path to the an initialized version of the database
+    db = pkgs.stdenv.mkDerivation {
+      name = "vswitch.db";
+      dontUnpack = true;
+      buildPhase = "true";
+      buildInputs = with pkgs; [
+        cfg.package
+      ];
+      installPhase = "mkdir -p $out";
+    };
+
+  in (mkMerge [{
+
+    environment.systemPackages = [ cfg.package pkgs.ipsecTools ];
+
+    boot.kernelModules = [ "tun" "openvswitch" ];
+
+    boot.extraModulePackages = [ cfg.package ];
+
+    systemd.services.ovsdb = {
+      description = "Open_vSwitch Database Server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+      path = [ cfg.package ];
+      restartTriggers = [ db cfg.package ];
+      # Create the config database
+      preStart =
+        ''
+        mkdir -p ${runDir}
+        mkdir -p /var/db/openvswitch
+        chmod +w /var/db/openvswitch
+        ${optionalString cfg.resetOnStart "rm -f /var/db/openvswitch/conf.db"}
+        if [[ ! -e /var/db/openvswitch/conf.db ]]; then
+          ${cfg.package}/bin/ovsdb-tool create \
+            "/var/db/openvswitch/conf.db" \
+            "${cfg.package}/share/openvswitch/vswitch.ovsschema"
+        fi
+        chmod -R +w /var/db/openvswitch
+        if ${cfg.package}/bin/ovsdb-tool needs-conversion /var/db/openvswitch/conf.db | grep -q "yes"
+        then
+          echo "Performing database upgrade"
+          ${cfg.package}/bin/ovsdb-tool convert /var/db/openvswitch/conf.db
+        else
+          echo "Database already up to date"
+        fi
+        '';
+      serviceConfig = {
+        ExecStart =
+          ''
+          ${cfg.package}/bin/ovsdb-server \
+            --remote=punix:${runDir}/db.sock \
+            --private-key=db:Open_vSwitch,SSL,private_key \
+            --certificate=db:Open_vSwitch,SSL,certificate \
+            --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert \
+            --unixctl=ovsdb.ctl.sock \
+            --pidfile=/run/openvswitch/ovsdb.pid \
+            --detach \
+            /var/db/openvswitch/conf.db
+          '';
+        Restart = "always";
+        RestartSec = 3;
+        PIDFile = "/run/openvswitch/ovsdb.pid";
+        # Use service type 'forking' to correctly determine when ovsdb-server is ready.
+        Type = "forking";
+      };
+      postStart = ''
+        ${cfg.package}/bin/ovs-vsctl --timeout 3 --retry --no-wait init
+      '';
+    };
+
+    systemd.services.ovs-vswitchd = {
+      description = "Open_vSwitch Daemon";
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "ovsdb.service" ];
+      after = [ "ovsdb.service" ];
+      path = [ cfg.package ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/ovs-vswitchd \
+          --pidfile=/run/openvswitch/ovs-vswitchd.pid \
+          --detach
+        '';
+        PIDFile = "/run/openvswitch/ovs-vswitchd.pid";
+        # Use service type 'forking' to correctly determine when vswitchd is ready.
+        Type = "forking";
+        Restart = "always";
+        RestartSec = 3;
+      };
+    };
+
+  }
+  (mkIf (cfg.ipsec && (versionOlder cfg.package.version "2.6.0")) {
+    services.racoon.enable = true;
+    services.racoon.configPath = "${runDir}/ipsec/etc/racoon/racoon.conf";
+
+    networking.firewall.extraCommands = ''
+      iptables -I INPUT -t mangle -p esp -j MARK --set-mark 1/1
+      iptables -I INPUT -t mangle -p udp --dport 4500 -j MARK --set-mark 1/1
+    '';
+
+    systemd.services.ovs-monitor-ipsec = {
+      description = "Open_vSwitch Ipsec Daemon";
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "ovsdb.service" ];
+      before = [ "vswitchd.service" "racoon.service" ];
+      environment.UNIXCTLPATH = "/tmp/ovsdb.ctl.sock";
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/ovs-monitor-ipsec \
+            --root-prefix ${runDir}/ipsec \
+            --pidfile /run/openvswitch/ovs-monitor-ipsec.pid \
+            --monitor --detach \
+            unix:/run/openvswitch/db.sock
+        '';
+        PIDFile = "/run/openvswitch/ovs-monitor-ipsec.pid";
+        # Use service type 'forking' to correctly determine when ovs-monitor-ipsec is ready.
+        Type = "forking";
+      };
+
+      preStart = ''
+        rm -r ${runDir}/ipsec/etc/racoon/certs || true
+        mkdir -p ${runDir}/ipsec/{etc/racoon,etc/init.d/,usr/sbin/}
+        ln -fs ${pkgs.ipsecTools}/bin/setkey ${runDir}/ipsec/usr/sbin/setkey
+        ln -fs ${pkgs.writeScript "racoon-restart" ''
+        #!${pkgs.runtimeShell}
+        /run/current-system/sw/bin/systemctl $1 racoon
+        ''} ${runDir}/ipsec/etc/init.d/racoon
+      '';
+    };
+  })]));
+
+  meta.maintainers = with maintainers; [ netixx ];
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/parallels-guest.nix b/nixpkgs/nixos/modules/virtualisation/parallels-guest.nix
new file mode 100644
index 000000000000..828419fb4b9d
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/parallels-guest.nix
@@ -0,0 +1,156 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  prl-tools = config.hardware.parallels.package;
+in
+
+{
+
+  options = {
+    hardware.parallels = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This enables Parallels Tools for Linux guests, along with provided
+          video, mouse and other hardware drivers.
+        '';
+      };
+
+      autoMountShares = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Control prlfsmountd service. When this service is running, shares can not be manually
+          mounted through `mount -t prl_fs ...` as this service will remount and trample any set options.
+          Recommended to enable for simple file sharing, but extended share use such as for code should
+          disable this to manually mount shares.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = config.boot.kernelPackages.prl-tools;
+        defaultText = "config.boot.kernelPackages.prl-tools";
+        example = literalExample "config.boot.kernelPackages.prl-tools";
+        description = ''
+          Defines which package to use for prl-tools. Override to change the version.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf config.hardware.parallels.enable {
+    services.xserver = {
+      drivers = singleton
+        { name = "prlvideo"; modules = [ prl-tools ]; };
+
+      screenSection = ''
+        Option "NoMTRR"
+      '';
+
+      config = ''
+        Section "InputClass"
+          Identifier "prlmouse"
+          MatchIsPointer "on"
+          MatchTag "prlmouse"
+          Driver "prlmouse"
+        EndSection
+      '';
+    };
+
+    hardware.opengl.package = prl-tools;
+    hardware.opengl.package32 = pkgs.pkgsi686Linux.linuxPackages.prl-tools.override { libsOnly = true; kernel = null; };
+    hardware.opengl.setLdLibraryPath = true;
+
+    services.udev.packages = [ prl-tools ];
+
+    environment.systemPackages = [ prl-tools ];
+
+    boot.extraModulePackages = [ prl-tools ];
+
+    boot.kernelModules = [ "prl_tg" "prl_eth" "prl_fs" "prl_fs_freeze" ];
+
+    services.timesyncd.enable = false;
+
+    systemd.services.prltoolsd = {
+      description = "Parallels Tools' service";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${prl-tools}/bin/prltoolsd -f";
+        PIDFile = "/var/run/prltoolsd.pid";
+      };
+    };
+
+    systemd.services.prlfsmountd = mkIf config.hardware.parallels.autoMountShares {
+      description = "Parallels Shared Folders Daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = rec {
+        ExecStart = "${prl-tools}/sbin/prlfsmountd ${PIDFile}";
+        ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p /media";
+        ExecStopPost = "${prl-tools}/sbin/prlfsmountd -u";
+        PIDFile = "/run/prlfsmountd.pid";
+      };
+    };
+
+    systemd.services.prlshprint = {
+      description = "Parallels Shared Printer Tool";
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "cups.service" ];
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${prl-tools}/bin/prlshprint";
+      };
+    };
+
+    systemd.user.services = {
+      prlcc = {
+        description = "Parallels Control Center";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcc";
+        };
+      };
+      prldnd = {
+        description = "Parallels Control Center";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prldnd";
+        };
+      };
+      prl_wmouse_d  = {
+        description = "Parallels Walking Mouse Daemon";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prl_wmouse_d";
+        };
+      };
+      prlcp = {
+        description = "Parallels CopyPaste Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcp";
+        };
+      };
+      prlsga = {
+        description = "Parallels Shared Guest Applications Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlsga";
+        };
+      };
+      prlshprof = {
+        description = "Parallels Shared Profile Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlshprof";
+        };
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/podman.nix b/nixpkgs/nixos/modules/virtualisation/podman.nix
new file mode 100644
index 000000000000..652850bf5006
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/podman.nix
@@ -0,0 +1,123 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.virtualisation.podman;
+
+  inherit (lib) mkOption types;
+
+  podmanPackage = (pkgs.podman.override { inherit (cfg) extraPackages; });
+
+  # Provides a fake "docker" binary mapping to podman
+  dockerCompat = pkgs.runCommandNoCC "${podmanPackage.pname}-docker-compat-${podmanPackage.version}" {
+    outputs = [ "out" "man" ];
+    inherit (podmanPackage) meta;
+  } ''
+    mkdir -p $out/bin
+    ln -s ${podmanPackage}/bin/podman $out/bin/docker
+
+    mkdir -p $man/share/man/man1
+    for f in ${podmanPackage.man}/share/man/man1/*; do
+      basename=$(basename $f | sed s/podman/docker/g)
+      ln -s $f $man/share/man/man1/$basename
+    done
+  '';
+
+  # Copy configuration files to avoid having the entire sources in the system closure
+  copyFile = filePath: pkgs.runCommandNoCC (builtins.unsafeDiscardStringContext (builtins.baseNameOf filePath)) {} ''
+    cp ${filePath} $out
+  '';
+
+in
+{
+  meta = {
+    maintainers = lib.teams.podman.members;
+  };
+
+  options.virtualisation.podman = {
+
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option enables Podman, a daemonless container engine for
+          developing, managing, and running OCI Containers on your Linux System.
+
+          It is a drop-in replacement for the <command>docker</command> command.
+        '';
+      };
+
+    dockerCompat = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Create an alias mapping <command>docker</command> to <command>podman</command>.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = [ ];
+      example = lib.literalExample ''
+        [
+          pkgs.gvisor
+        ]
+      '';
+      description = ''
+        Extra packages to be installed in the Podman wrapper.
+      '';
+    };
+
+    libpod = mkOption {
+      default = {};
+      description = "Libpod configuration";
+      type = types.submodule {
+        options = {
+
+          extraConfig = mkOption {
+            type = types.lines;
+            default = "";
+            description = ''
+              Extra configuration that should be put in the libpod.conf
+              configuration file
+            '';
+
+          };
+        };
+      };
+    };
+
+    package = lib.mkOption {
+      type = types.package;
+      default = podmanPackage;
+      internal = true;
+      description = ''
+        The final Podman package (including extra packages).
+      '';
+    };
+
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ]
+      ++ lib.optional cfg.dockerCompat dockerCompat;
+
+    environment.etc."containers/libpod.conf".text = ''
+      cni_plugin_dir = ["${pkgs.cni-plugins}/bin/"]
+
+    '' + cfg.libpod.extraConfig;
+
+    environment.etc."cni/net.d/87-podman-bridge.conflist".source = copyFile "${pkgs.podman-unwrapped.src}/cni/87-podman-bridge.conflist";
+
+    # Enable common /etc/containers configuration
+    virtualisation.containers.enable = true;
+
+    assertions = [{
+      assertion = cfg.dockerCompat -> !config.virtualisation.docker.enable;
+      message = "Option dockerCompat conflicts with docker";
+    }];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix b/nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix
new file mode 100644
index 000000000000..665224e35d8c
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.qemuGuest;
+in {
+
+  options.services.qemuGuest = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable the qemu guest agent.";
+      };
+  };
+
+  config = mkIf cfg.enable (
+      mkMerge [
+    {
+
+      services.udev.extraRules = ''
+        SUBSYSTEM=="virtio-ports", ATTR{name}=="org.qemu.guest_agent.0", TAG+="systemd" ENV{SYSTEMD_WANTS}="qemu-guest-agent.service"
+      '';
+
+      systemd.services.qemu-guest-agent = {
+        description = "Run the QEMU Guest Agent";
+        serviceConfig = {
+          ExecStart = "${pkgs.qemu.ga}/bin/qemu-ga";
+          Restart = "always";
+          RestartSec = 0;
+        };
+      };
+    }
+  ]
+  );
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix b/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix
new file mode 100644
index 000000000000..be06d6feb11f
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix
@@ -0,0 +1,672 @@
+# This module creates a virtual machine from the NixOS configuration.
+# Building the `config.system.build.vm' attribute gives you a command
+# that starts a KVM/QEMU VM running the NixOS configuration defined in
+# `config'.  The Nix store is shared read-only with the host, which
+# makes (re)building VMs very efficient.  However, it also means you
+# can't reconfigure the guest inside the guest - you need to rebuild
+# the VM in the host.  On the other hand, the root filesystem is a
+# read/writable disk image persistent across VM reboots.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+with import ../../lib/qemu-flags.nix { inherit pkgs; };
+
+let
+
+  qemu = config.system.build.qemu or pkgs.qemu_test;
+
+  vmName =
+    if config.networking.hostName == ""
+    then "noname"
+    else config.networking.hostName;
+
+  cfg = config.virtualisation;
+
+  consoles = lib.concatMapStringsSep " " (c: "console=${c}") cfg.qemu.consoles;
+
+  driveOpts = { ... }: {
+
+    options = {
+
+      file = mkOption {
+        type = types.str;
+        description = "The file image used for this drive.";
+      };
+
+      driveExtraOpts = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = "Extra options passed to drive flag.";
+      };
+
+      deviceExtraOpts = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = "Extra options passed to device flag.";
+      };
+
+    };
+
+  };
+
+  driveCmdline = idx: { file, driveExtraOpts, deviceExtraOpts, ... }:
+    let
+      drvId = "drive${toString idx}";
+      mkKeyValue = generators.mkKeyValueDefault {} "=";
+      mkOpts = opts: concatStringsSep "," (mapAttrsToList mkKeyValue opts);
+      driveOpts = mkOpts (driveExtraOpts // {
+        index = idx;
+        id = drvId;
+        "if" = "none";
+        inherit file;
+      });
+      deviceOpts = mkOpts (deviceExtraOpts // {
+        drive = drvId;
+      });
+      device =
+        if cfg.qemu.diskInterface == "scsi" then
+          "-device lsi53c895a -device scsi-hd,${deviceOpts}"
+        else
+          "-device virtio-blk-pci,${deviceOpts}";
+    in
+      "-drive ${driveOpts} ${device}";
+
+  drivesCmdLine = drives: concatStringsSep " " (imap1 driveCmdline drives);
+
+  # Shell script to start the VM.
+  startVM =
+    ''
+      #! ${pkgs.runtimeShell}
+
+      NIX_DISK_IMAGE=$(readlink -f ''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}})
+
+      if ! test -e "$NIX_DISK_IMAGE"; then
+          ${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \
+            ${toString config.virtualisation.diskSize}M || exit 1
+      fi
+
+      # Create a directory for storing temporary data of the running VM.
+      if [ -z "$TMPDIR" -o -z "$USE_TMPDIR" ]; then
+          TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
+      fi
+
+      # Create a directory for exchanging data with the VM.
+      mkdir -p $TMPDIR/xchg
+
+      ${if cfg.useBootLoader then ''
+        # Create a writable copy/snapshot of the boot disk.
+        # A writable boot disk can be booted from automatically.
+        ${qemu}/bin/qemu-img create -f qcow2 -b ${bootDisk}/disk.img $TMPDIR/disk.img || exit 1
+
+        ${if cfg.useEFIBoot then ''
+          # VM needs a writable flash BIOS.
+          cp ${bootDisk}/bios.bin $TMPDIR || exit 1
+          chmod 0644 $TMPDIR/bios.bin || exit 1
+        '' else ''
+        ''}
+      '' else ''
+      ''}
+
+      cd $TMPDIR
+      idx=0
+      ${flip concatMapStrings cfg.emptyDiskImages (size: ''
+        if ! test -e "empty$idx.qcow2"; then
+            ${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${toString size}M"
+        fi
+        idx=$((idx + 1))
+      '')}
+
+      # Start QEMU.
+      exec ${qemuBinary qemu} \
+          -name ${vmName} \
+          -m ${toString config.virtualisation.memorySize} \
+          -smp ${toString config.virtualisation.cores} \
+          -device virtio-rng-pci \
+          ${concatStringsSep " " config.virtualisation.qemu.networkingOptions} \
+          -virtfs local,path=/nix/store,security_model=none,mount_tag=store \
+          -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \
+          -virtfs local,path=''${SHARED_DIR:-$TMPDIR/xchg},security_model=none,mount_tag=shared \
+          ${drivesCmdLine config.virtualisation.qemu.drives} \
+          ${toString config.virtualisation.qemu.options} \
+          $QEMU_OPTS \
+          "$@"
+    '';
+
+
+  regInfo = pkgs.closureInfo { rootPaths = config.virtualisation.pathsInNixDB; };
+
+
+  # Generate a hard disk image containing a /boot partition and GRUB
+  # in the MBR.  Used when the `useBootLoader' option is set.
+  # FIXME: use nixos/lib/make-disk-image.nix.
+  bootDisk =
+    pkgs.vmTools.runInLinuxVM (
+      pkgs.runCommand "nixos-boot-disk"
+        { preVM =
+            ''
+              mkdir $out
+              diskImage=$out/disk.img
+              bootFlash=$out/bios.bin
+              ${qemu}/bin/qemu-img create -f qcow2 $diskImage "40M"
+              ${if cfg.useEFIBoot then ''
+                cp ${pkgs.OVMF-CSM.fd}/FV/OVMF.fd $bootFlash
+                chmod 0644 $bootFlash
+              '' else ''
+              ''}
+            '';
+          buildInputs = [ pkgs.utillinux ];
+          QEMU_OPTS = if cfg.useEFIBoot
+                      then "-pflash $out/bios.bin -nographic -serial pty"
+                      else "-nographic -serial pty";
+        }
+        ''
+          # Create a /boot EFI partition with 40M and arbitrary but fixed GUIDs for reproducibility
+          ${pkgs.gptfdisk}/bin/sgdisk \
+            --set-alignment=1 --new=1:34:2047 --change-name=1:BIOSBootPartition --typecode=1:ef02 \
+            --set-alignment=512 --largest-new=2 --change-name=2:EFISystem --typecode=2:ef00 \
+            --attributes=1:set:1 \
+            --attributes=2:set:2 \
+            --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C1 \
+            --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \
+            --partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \
+            --hybrid 2 \
+            --recompute-chs /dev/vda
+          ${pkgs.dosfstools}/bin/mkfs.fat -F16 /dev/vda2
+          export MTOOLS_SKIP_CHECK=1
+          ${pkgs.mtools}/bin/mlabel -i /dev/vda2 ::boot
+
+          # Mount /boot; load necessary modules first.
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_cp437.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_iso8859-1.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/fat.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/vfat.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/efivarfs/efivarfs.ko.xz || true
+          mkdir /boot
+          mount /dev/vda2 /boot
+
+          # This is needed for GRUB 0.97, which doesn't know about virtio devices.
+          mkdir /boot/grub
+          echo '(hd0) /dev/vda' > /boot/grub/device.map
+
+          # This is needed for systemd-boot to find ESP, and udev is not available here to create this
+          mkdir -p /dev/block
+          ln -s /dev/vda2 /dev/block/254:2
+
+          # Set up system profile (normally done by nixos-rebuild / nix-env --set)
+          mkdir -p /nix/var/nix/profiles
+          ln -s ${config.system.build.toplevel} /nix/var/nix/profiles/system-1-link
+          ln -s /nix/var/nix/profiles/system-1-link /nix/var/nix/profiles/system
+
+          # Install bootloader
+          touch /etc/NIXOS
+          export NIXOS_INSTALL_BOOTLOADER=1
+          ${config.system.build.toplevel}/bin/switch-to-configuration boot
+
+          umount /boot
+        '' # */
+    );
+
+in
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+   ./docker-preloader.nix
+  ];
+
+  options = {
+
+    virtualisation.memorySize =
+      mkOption {
+        default = 384;
+        description =
+          ''
+            Memory size (M) of virtual machine.
+          '';
+      };
+
+    virtualisation.diskSize =
+      mkOption {
+        default = 512;
+        description =
+          ''
+            Disk size (M) of virtual machine.
+          '';
+      };
+
+    virtualisation.diskImage =
+      mkOption {
+        default = "./${vmName}.qcow2";
+        description =
+          ''
+            Path to the disk image containing the root filesystem.
+            The image will be created on startup if it does not
+            exist.
+          '';
+      };
+
+    virtualisation.bootDevice =
+      mkOption {
+        type = types.str;
+        example = "/dev/vda";
+        description =
+          ''
+            The disk to be used for the root filesystem.
+          '';
+      };
+
+    virtualisation.emptyDiskImages =
+      mkOption {
+        default = [];
+        type = types.listOf types.int;
+        description =
+          ''
+            Additional disk images to provide to the VM. The value is
+            a list of size in megabytes of each disk. These disks are
+            writeable by the VM.
+          '';
+      };
+
+    virtualisation.graphics =
+      mkOption {
+        default = true;
+        description =
+          ''
+            Whether to run QEMU with a graphics window, or in nographic mode.
+            Serial console will be enabled on both settings, but this will
+            change the preferred console.
+            '';
+      };
+
+    virtualisation.cores =
+      mkOption {
+        default = 1;
+        type = types.int;
+        description =
+          ''
+            Specify the number of cores the guest is permitted to use.
+            The number can be higher than the available cores on the
+            host system.
+          '';
+      };
+
+    virtualisation.pathsInNixDB =
+      mkOption {
+        default = [];
+        description =
+          ''
+            The list of paths whose closure is registered in the Nix
+            database in the VM.  All other paths in the host Nix store
+            appear in the guest Nix store as well, but are considered
+            garbage (because they are not registered in the Nix
+            database in the guest).
+          '';
+      };
+
+    virtualisation.vlans =
+      mkOption {
+        default = [ 1 ];
+        example = [ 1 2 ];
+        description =
+          ''
+            Virtual networks to which the VM is connected.  Each
+            number <replaceable>N</replaceable> in this list causes
+            the VM to have a virtual Ethernet interface attached to a
+            separate virtual network on which it will be assigned IP
+            address
+            <literal>192.168.<replaceable>N</replaceable>.<replaceable>M</replaceable></literal>,
+            where <replaceable>M</replaceable> is the index of this VM
+            in the list of VMs.
+          '';
+      };
+
+    virtualisation.writableStore =
+      mkOption {
+        default = true; # FIXME
+        description =
+          ''
+            If enabled, the Nix store in the VM is made writable by
+            layering an overlay filesystem on top of the host's Nix
+            store.
+          '';
+      };
+
+    virtualisation.writableStoreUseTmpfs =
+      mkOption {
+        default = true;
+        description =
+          ''
+            Use a tmpfs for the writable store instead of writing to the VM's
+            own filesystem.
+          '';
+      };
+
+    networking.primaryIPAddress =
+      mkOption {
+        default = "";
+        internal = true;
+        description = "Primary IP address used in /etc/hosts.";
+      };
+
+    virtualisation.qemu = {
+      options =
+        mkOption {
+          type = types.listOf types.unspecified;
+          default = [];
+          example = [ "-vga std" ];
+          description = "Options passed to QEMU.";
+        };
+
+      consoles = mkOption {
+        type = types.listOf types.str;
+        default = let
+          consoles = [ "${qemuSerialDevice},115200n8" "tty0" ];
+        in if cfg.graphics then consoles else reverseList consoles;
+        example = [ "console=tty1" ];
+        description = ''
+          The output console devices to pass to the kernel command line via the
+          <literal>console</literal> parameter, the primary console is the last
+          item of this list.
+
+          By default it enables both serial console and
+          <literal>tty0</literal>. The preferred console (last one) is based on
+          the value of <option>virtualisation.graphics</option>.
+        '';
+      };
+
+      networkingOptions =
+        mkOption {
+          default = [
+            "-net nic,netdev=user.0,model=virtio"
+            "-netdev user,id=user.0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
+          ];
+          type = types.listOf types.str;
+          description = ''
+            Networking-related command-line options that should be passed to qemu.
+            The default is to use userspace networking (slirp).
+
+            If you override this option, be advised to keep
+            ''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS} (as seen in the default)
+            to keep the default runtime behaviour.
+          '';
+        };
+
+      drives =
+        mkOption {
+          type = types.listOf (types.submodule driveOpts);
+          description = "Drives passed to qemu.";
+        };
+
+      diskInterface =
+        mkOption {
+          default = "virtio";
+          example = "scsi";
+          type = types.enum [ "virtio" "scsi" "ide" ];
+          description = "The interface used for the virtual hard disks.";
+        };
+
+      guestAgent.enable =
+        mkOption {
+          default = true;
+          type = types.bool;
+          description = ''
+            Enable the Qemu guest agent.
+          '';
+        };
+    };
+
+    virtualisation.useBootLoader =
+      mkOption {
+        default = false;
+        description =
+          ''
+            If enabled, the virtual machine will be booted using the
+            regular boot loader (i.e., GRUB 1 or 2).  This allows
+            testing of the boot loader.  If
+            disabled (the default), the VM directly boots the NixOS
+            kernel and initial ramdisk, bypassing the boot loader
+            altogether.
+          '';
+      };
+
+    virtualisation.useEFIBoot =
+      mkOption {
+        default = false;
+        description =
+          ''
+            If enabled, the virtual machine will provide a EFI boot
+            manager.
+            useEFIBoot is ignored if useBootLoader == false.
+          '';
+      };
+
+    virtualisation.bios =
+      mkOption {
+        default = null;
+        type = types.nullOr types.package;
+        description =
+          ''
+            An alternate BIOS (such as <package>qboot</package>) with which to start the VM.
+            Should contain a file named <literal>bios.bin</literal>.
+            If <literal>null</literal>, QEMU's builtin SeaBIOS will be used.
+          '';
+      };
+
+  };
+
+  config = {
+
+    boot.loader.grub.device = mkVMOverride cfg.bootDevice;
+
+    boot.initrd.extraUtilsCommands =
+      ''
+        # We need mke2fs in the initrd.
+        copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs
+      '';
+
+    boot.initrd.postDeviceCommands =
+      ''
+        # If the disk image appears to be empty, run mke2fs to
+        # initialise.
+        FSTYPE=$(blkid -o value -s TYPE ${cfg.bootDevice} || true)
+        if test -z "$FSTYPE"; then
+            mke2fs -t ext4 ${cfg.bootDevice}
+        fi
+      '';
+
+    boot.initrd.postMountCommands =
+      ''
+        # Mark this as a NixOS machine.
+        mkdir -p $targetRoot/etc
+        echo -n > $targetRoot/etc/NIXOS
+
+        # Fix the permissions on /tmp.
+        chmod 1777 $targetRoot/tmp
+
+        mkdir -p $targetRoot/boot
+
+        ${optionalString cfg.writableStore ''
+          echo "mounting overlay filesystem on /nix/store..."
+          mkdir -p 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
+          mount -t overlay overlay $targetRoot/nix/store \
+            -o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail
+        ''}
+      '';
+
+    # After booting, register the closure of the paths in
+    # `virtualisation.pathsInNixDB' in the Nix database in the VM.  This
+    # allows Nix operations to work in the VM.  The path to the
+    # registration file is passed through the kernel command line to
+    # allow `system.build.toplevel' to be included.  (If we had a direct
+    # reference to ${regInfo} here, then we would get a cyclic
+    # dependency.)
+    boot.postBootCommands =
+      ''
+        if [[ "$(cat /proc/cmdline)" =~ regInfo=([^ ]*) ]]; then
+          ${config.nix.package.out}/bin/nix-store --load-db < ''${BASH_REMATCH[1]}
+        fi
+      '';
+
+    boot.initrd.availableKernelModules =
+      optional cfg.writableStore "overlay"
+      ++ optional (cfg.qemu.diskInterface == "scsi") "sym53c8xx";
+
+    virtualisation.bootDevice =
+      mkDefault (if cfg.qemu.diskInterface == "scsi" then "/dev/sda" else "/dev/vda");
+
+    virtualisation.pathsInNixDB = [ config.system.build.toplevel ];
+
+    # FIXME: Consolidate this one day.
+    virtualisation.qemu.options = mkMerge [
+      (mkIf (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) [
+        "-usb" "-device usb-tablet,bus=usb-bus.0"
+      ])
+      (mkIf (pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64) [
+        "-device virtio-gpu-pci" "-device usb-ehci,id=usb0" "-device usb-kbd" "-device usb-tablet"
+      ])
+      (mkIf (!cfg.useBootLoader) [
+        "-kernel ${config.system.build.toplevel}/kernel"
+        "-initrd ${config.system.build.toplevel}/initrd"
+        ''-append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo}/registration ${consoles} $QEMU_KERNEL_PARAMS"''
+      ])
+      (mkIf cfg.useEFIBoot [
+        "-pflash $TMPDIR/bios.bin"
+      ])
+      (mkIf (cfg.bios != null) [
+        "-bios ${cfg.bios}/bios.bin"
+      ])
+      (mkIf (!cfg.graphics) [
+        "-nographic"
+      ])
+    ];
+
+    virtualisation.qemu.drives = mkMerge [
+      (mkIf cfg.useBootLoader [
+        {
+          file = "$NIX_DISK_IMAGE";
+          driveExtraOpts.cache = "writeback";
+          driveExtraOpts.werror = "report";
+        }
+        {
+          file = "$TMPDIR/disk.img";
+          driveExtraOpts.media = "disk";
+          deviceExtraOpts.bootindex = "1";
+        }
+      ])
+      (mkIf (!cfg.useBootLoader) [
+        {
+          file = "$NIX_DISK_IMAGE";
+          driveExtraOpts.cache = "writeback";
+          driveExtraOpts.werror = "report";
+        }
+      ])
+      (imap0 (idx: _: {
+        file = "$(pwd)/empty${toString idx}.qcow2";
+        driveExtraOpts.werror = "report";
+      }) cfg.emptyDiskImages)
+    ];
+
+    # Mount the host filesystem via 9P, and bind-mount the Nix store
+    # of the host into our own filesystem.  We use mkVMOverride to
+    # allow this module to be applied to "normal" NixOS system
+    # configuration, where the regular value for the `fileSystems'
+    # attribute should be disregarded for the purpose of building a VM
+    # test image (since those filesystems don't exist in the VM).
+    fileSystems = mkVMOverride (
+      { "/".device = cfg.bootDevice;
+        ${if cfg.writableStore then "/nix/.ro-store" else "/nix/store"} =
+          { device = "store";
+            fsType = "9p";
+            options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
+            neededForBoot = true;
+          };
+        "/tmp" = mkIf config.boot.tmpOnTmpfs
+          { device = "tmpfs";
+            fsType = "tmpfs";
+            neededForBoot = true;
+            # Sync with systemd's tmp.mount;
+            options = [ "mode=1777" "strictatime" "nosuid" "nodev" ];
+          };
+        "/tmp/xchg" =
+          { device = "xchg";
+            fsType = "9p";
+            options = [ "trans=virtio" "version=9p2000.L" ];
+            neededForBoot = true;
+          };
+        "/tmp/shared" =
+          { device = "shared";
+            fsType = "9p";
+            options = [ "trans=virtio" "version=9p2000.L" ];
+            neededForBoot = true;
+          };
+      } // optionalAttrs (cfg.writableStore && cfg.writableStoreUseTmpfs)
+      { "/nix/.rw-store" =
+          { fsType = "tmpfs";
+            options = [ "mode=0755" ];
+            neededForBoot = true;
+          };
+      } // optionalAttrs cfg.useBootLoader
+      { "/boot" =
+          { device = "/dev/vdb2";
+            fsType = "vfat";
+            options = [ "ro" ];
+            noCheck = true; # fsck fails on a r/o filesystem
+          };
+      });
+
+    swapDevices = mkVMOverride [ ];
+    boot.initrd.luks.devices = mkVMOverride {};
+
+    # Don't run ntpd in the guest.  It should get the correct time from KVM.
+    services.timesyncd.enable = false;
+
+    services.qemuGuest.enable = cfg.qemu.guestAgent.enable;
+
+    system.build.vm = pkgs.runCommand "nixos-vm" { preferLocalBuild = true; }
+      ''
+        mkdir -p $out/bin
+        ln -s ${config.system.build.toplevel} $out/system
+        ln -s ${pkgs.writeScript "run-nixos-vm" startVM} $out/bin/run-${vmName}-vm
+      '';
+
+    # When building a regular system configuration, override whatever
+    # video driver the host uses.
+    services.xserver.videoDrivers = mkVMOverride [ "modesetting" ];
+    services.xserver.defaultDepth = mkVMOverride 0;
+    services.xserver.resolutions = mkVMOverride [ { x = 1024; y = 768; } ];
+    services.xserver.monitorSection =
+      ''
+        # Set a higher refresh rate so that resolutions > 800x600 work.
+        HorizSync 30-140
+        VertRefresh 50-160
+      '';
+
+    # Wireless won't work in the VM.
+    networking.wireless.enable = mkVMOverride false;
+    services.connman.enable = mkVMOverride false;
+
+    # Speed up booting by not waiting for ARP.
+    networking.dhcpcd.extraConfig = "noarp";
+
+    networking.usePredictableInterfaceNames = false;
+
+    system.requiredKernelConfig = with config.lib.kernelConfig;
+      [ (isEnabled "VIRTIO_BLK")
+        (isEnabled "VIRTIO_PCI")
+        (isEnabled "VIRTIO_NET")
+        (isEnabled "EXT4_FS")
+        (isYes "BLK_DEV")
+        (isYes "PCI")
+        (isYes "EXPERIMENTAL")
+        (isYes "NETDEVICES")
+        (isYes "NET_CORE")
+        (isYes "INET")
+        (isYes "NETWORK_FILESYSTEMS")
+      ] ++ optional (!cfg.graphics) [
+        (isYes "SERIAL_8250_CONSOLE")
+        (isYes "SERIAL_8250")
+      ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/railcar.nix b/nixpkgs/nixos/modules/virtualisation/railcar.nix
new file mode 100644
index 000000000000..12da1c75fc38
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/railcar.nix
@@ -0,0 +1,125 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.railcar;
+  generateUnit = name: containerConfig:
+    let
+      container = pkgs.ociTools.buildContainer {
+        args = [
+          (pkgs.writeShellScript "run.sh" containerConfig.cmd).outPath
+        ];
+      };
+    in
+      nameValuePair "railcar-${name}" {
+        enable = true;
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+            ExecStart = ''
+              ${cfg.package}/bin/railcar -r ${cfg.stateDir} run ${name} -b ${container}
+            '';
+            Type = containerConfig.runType;
+          };
+      };
+  mount = with types; (submodule {
+    options = {
+      type = mkOption {
+        type = str;
+        default = "none";
+        description = ''
+          The type of the filesystem to be mounted.
+          Linux: filesystem types supported by the kernel as listed in 
+          `/proc/filesystems` (e.g., "minix", "ext2", "ext3", "jfs", "xfs", 
+          "reiserfs", "msdos", "proc", "nfs", "iso9660"). For bind mounts 
+          (when options include either bind or rbind), the type is a dummy,
+          often "none" (not listed in /proc/filesystems).
+        '';
+      };
+      source = mkOption {
+        type = str;
+        description = "Source for the in-container mount";
+      };
+      options = mkOption {
+        type = loaOf (str);
+        default = [ "bind" ];
+        description = ''
+          Mount options of the filesystem to be used.
+        
+          Support optoions are listed in the mount(8) man page. Note that 
+          both filesystem-independent and filesystem-specific options 
+          are listed.
+        '';
+      };
+    };
+  });
+in
+{
+  options.services.railcar = {
+    enable = mkEnableOption "railcar";
+
+    containers = mkOption {
+      default = {};
+      description = "Declarative container configuration";
+      type = with types; loaOf (submodule ({ name, config, ... }: {
+        options = {
+          cmd = mkOption {
+            type = types.lines;
+            description = "Command or script to run inside the container";
+          };
+
+          mounts = mkOption {
+            type = with types; attrsOf mount;
+            default = {};
+            description = ''
+              A set of mounts inside the container.
+
+              The defaults have been chosen for simple bindmounts, meaning
+              that you only need to provide the "source" parameter.
+            '';
+            example = ''
+              { "/data" = { source = "/var/lib/data"; }; }
+            '';
+          };
+
+          runType = mkOption {
+            type = types.str;
+            default = "oneshot";
+            description = "The systemd service run type";
+          };
+
+          os = mkOption {
+            type = types.str;
+            default = "linux";
+            description = "OS type of the container";
+          };
+
+          arch = mkOption {
+            type = types.str;
+            default = "x86_64";
+            description = "Computer architecture type of the container";
+          };
+        };
+      }));
+    };
+
+    stateDir = mkOption {
+      type = types.path;
+      default = ''/var/railcar'';
+      description = "Railcar persistent state directory";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.railcar;
+      description = "Railcar package to use";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services = flip mapAttrs' cfg.containers (name: containerConfig:
+      generateUnit name containerConfig
+    );
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
new file mode 100644
index 000000000000..834b994e92d2
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
@@ -0,0 +1,93 @@
+# Module for VirtualBox guests.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.virtualbox.guest;
+  kernel = config.boot.kernelPackages;
+
+in
+
+{
+
+  ###### interface
+
+  options.virtualisation.virtualbox.guest = {
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = "Whether to enable the VirtualBox service and other guest additions.";
+    };
+
+    x11 = mkOption {
+      default = true;
+      type = types.bool;
+      description = "Whether to enable x11 graphics";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [{
+    assertions = [{
+      assertion = pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64;
+      message = "Virtualbox not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    }];
+
+    environment.systemPackages = [ kernel.virtualboxGuestAdditions ];
+
+    boot.extraModulePackages = [ kernel.virtualboxGuestAdditions ];
+
+    boot.supportedFilesystems = [ "vboxsf" ];
+    boot.initrd.supportedFilesystems = [ "vboxsf" ];
+
+    users.groups.vboxsf.gid = config.ids.gids.vboxsf;
+
+    systemd.services.virtualbox =
+      { description = "VirtualBox Guest Services";
+
+        wantedBy = [ "multi-user.target" ];
+        requires = [ "dev-vboxguest.device" ];
+        after = [ "dev-vboxguest.device" ];
+
+        unitConfig.ConditionVirtualization = "oracle";
+
+        serviceConfig.ExecStart = "@${kernel.virtualboxGuestAdditions}/bin/VBoxService VBoxService --foreground";
+      };
+
+    services.udev.extraRules =
+      ''
+        # /dev/vboxuser is necessary for VBoxClient to work.  Maybe we
+        # should restrict this to logged-in users.
+        KERNEL=="vboxuser",  OWNER="root", GROUP="root", MODE="0666"
+
+        # Allow systemd dependencies on vboxguest.
+        SUBSYSTEM=="misc", KERNEL=="vboxguest", TAG+="systemd"
+      '';
+  } (mkIf cfg.x11 {
+    services.xserver.videoDrivers = mkOverride 50 [ "virtualbox" "modesetting" ];
+
+    services.xserver.config =
+      ''
+        Section "InputDevice"
+          Identifier "VBoxMouse"
+          Driver "vboxmouse"
+        EndSection
+      '';
+
+    services.xserver.serverLayoutSection =
+      ''
+        InputDevice "VBoxMouse"
+      '';
+
+    services.xserver.displayManager.sessionCommands =
+      ''
+        PATH=${makeBinPath [ pkgs.gnugrep pkgs.which pkgs.xorg.xorgserver.out ]}:$PATH \
+          ${kernel.virtualboxGuestAdditions}/bin/VBoxClient-all
+      '';
+  })]);
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix
new file mode 100644
index 000000000000..ddb0a7bda4f3
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.virtualbox.host;
+
+  virtualbox = cfg.package.override {
+    inherit (cfg) enableHardening headless;
+    extensionPack = if cfg.enableExtensionPack then pkgs.virtualboxExtpack else null;
+  };
+
+  kernelModules = config.boot.kernelPackages.virtualbox.override {
+    inherit virtualbox;
+  };
+
+in
+
+{
+  options.virtualisation.virtualbox.host = {
+    enable = mkEnableOption "VirtualBox" // {
+      description = ''
+        Whether to enable VirtualBox.
+
+        <note><para>
+          In order to pass USB devices from the host to the guests, the user
+          needs to be in the <literal>vboxusers</literal> group.
+        </para></note>
+      '';
+    };
+
+    enableExtensionPack = mkEnableOption "VirtualBox extension pack" // {
+      description = ''
+        Whether to install the Oracle Extension Pack for VirtualBox.
+
+        <important><para>
+          You must set <literal>nixpkgs.config.allowUnfree = true</literal> in
+          order to use this.  This requires you accept the VirtualBox PUEL.
+        </para></important>
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.virtualbox;
+      defaultText = "pkgs.virtualbox";
+      description = ''
+        Which VirtualBox package to use.
+      '';
+    };
+
+    addNetworkInterface = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Automatically set up a vboxnet0 host-only network interface.
+      '';
+    };
+
+    enableHardening = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Enable hardened VirtualBox, which ensures that only the binaries in the
+        system path get access to the devices exposed by the kernel modules
+        instead of all users in the vboxusers group.
+
+        <important><para>
+          Disabling this can put your system's security at risk, as local users
+          in the vboxusers group can tamper with the VirtualBox device files.
+        </para></important>
+      '';
+    };
+
+    headless = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Use VirtualBox installation without GUI and Qt dependency. Useful to enable on servers
+        and when virtual machines are controlled only via SSH.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [{
+    warnings = mkIf (config.nixpkgs.config.virtualbox.enableExtensionPack or false)
+      ["'nixpkgs.virtualbox.enableExtensionPack' has no effect, please use 'virtualisation.virtualbox.host.enableExtensionPack'"];
+    boot.kernelModules = [ "vboxdrv" "vboxnetadp" "vboxnetflt" ];
+    boot.extraModulePackages = [ kernelModules ];
+    environment.systemPackages = [ virtualbox ];
+
+    security.wrappers = let
+      mkSuid = program: {
+        source = "${virtualbox}/libexec/virtualbox/${program}";
+        owner = "root";
+        group = "vboxusers";
+        setuid = true;
+      };
+    in mkIf cfg.enableHardening
+      (builtins.listToAttrs (map (x: { name = x; value = mkSuid x; }) [
+      "VBoxHeadless"
+      "VBoxNetAdpCtl"
+      "VBoxNetDHCP"
+      "VBoxNetNAT"
+      "VBoxSDL"
+      "VBoxVolInfo"
+      "VirtualBoxVM"
+    ]));
+
+    users.groups.vboxusers.gid = config.ids.gids.vboxusers;
+
+    services.udev.extraRules =
+      ''
+        KERNEL=="vboxdrv",    OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+        KERNEL=="vboxdrvu",   OWNER="root", GROUP="root",      MODE="0666", TAG+="systemd"
+        KERNEL=="vboxnetctl", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+        SUBSYSTEM=="usb_device", ACTION=="add", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
+        SUBSYSTEM=="usb", ACTION=="add", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
+        SUBSYSTEM=="usb_device", ACTION=="remove", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
+        SUBSYSTEM=="usb", ACTION=="remove", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
+      '';
+
+    # Since we lack the right setuid/setcap binaries, set up a host-only network by default.
+  } (mkIf cfg.addNetworkInterface {
+    systemd.services.vboxnet0 =
+      { description = "VirtualBox vboxnet0 Interface";
+        requires = [ "dev-vboxnetctl.device" ];
+        after = [ "dev-vboxnetctl.device" ];
+        wantedBy = [ "network.target" "sys-subsystem-net-devices-vboxnet0.device" ];
+        path = [ virtualbox ];
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.Type = "oneshot";
+        serviceConfig.PrivateTmp = true;
+        environment.VBOX_USER_HOME = "/tmp";
+        script =
+          ''
+            if ! [ -e /sys/class/net/vboxnet0 ]; then
+              VBoxManage hostonlyif create
+              cat /tmp/VBoxSVC.log >&2
+            fi
+          '';
+        postStop =
+          ''
+            VBoxManage hostonlyif remove vboxnet0
+          '';
+      };
+
+    networking.interfaces.vboxnet0.ipv4.addresses = [{ address = "192.168.56.1"; prefixLength = 24; }];
+    # Make sure NetworkManager won't assume this interface being up
+    # means we have internet access.
+    networking.networkmanager.unmanaged = ["vboxnet0"];
+  }) (mkIf config.networking.useNetworkd {
+    systemd.network.networks."40-vboxnet0".extraConfig = ''
+      [Link]
+      RequiredForOnline=no
+    '';
+  })
+
+]);
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix
new file mode 100644
index 000000000000..788b4d9d9761
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix
@@ -0,0 +1,138 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualbox;
+
+in {
+
+  options = {
+    virtualbox = {
+      baseImageSize = mkOption {
+        type = types.int;
+        default = 50 * 1024;
+        description = ''
+          The size of the VirtualBox base image in MiB.
+        '';
+      };
+      memorySize = mkOption {
+        type = types.int;
+        default = 1536;
+        description = ''
+          The amount of RAM the VirtualBox appliance can use in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-ova-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = ''
+          The name of the derivation for the VirtualBox appliance.
+        '';
+      };
+      vmName = mkOption {
+        type = types.str;
+        default = "NixOS ${config.system.nixos.label} (${pkgs.stdenv.hostPlatform.system})";
+        description = ''
+          The name of the VirtualBox appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.ova";
+        description = ''
+          The file name of the VirtualBox appliance.
+        '';
+      };
+      params = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
+        example = {
+          audio = "alsa";
+          rtcuseutc = "on";
+          usb = "off";
+        };
+        description = ''
+          Parameters passed to the Virtualbox appliance.
+
+          Run <literal>VBoxManage modifyvm --help</literal> to see more options.
+        '';
+     };
+    };
+  };
+
+  config = {
+
+    virtualbox.params = mkMerge [
+      (mapAttrs (name: mkDefault) {
+        acpi = "on";
+        vram = 32;
+        nictype1 = "virtio";
+        nic1 = "nat";
+        audiocontroller = "ac97";
+        audio = "alsa";
+        audioout = "on";
+        rtcuseutc = "on";
+        usb = "on";
+        usbehci = "on";
+        mouse = "usbtablet";
+      })
+      (mkIf (pkgs.stdenv.hostPlatform.system == "i686-linux") { pae = "on"; })
+    ];
+
+    system.build.virtualBoxOVA = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+
+      inherit pkgs lib config;
+      partitionTableType = "legacy";
+      diskSize = cfg.baseImageSize;
+
+      postVM =
+        ''
+          export HOME=$PWD
+          export PATH=${pkgs.virtualbox}/bin:$PATH
+
+          echo "creating VirtualBox pass-through disk wrapper (no copying involved)..."
+          VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage
+
+          echo "creating VirtualBox VM..."
+          vmName="${cfg.vmName}";
+          VBoxManage createvm --name "$vmName" --register \
+            --ostype ${if pkgs.stdenv.hostPlatform.system == "x86_64-linux" then "Linux26_64" else "Linux26"}
+          VBoxManage modifyvm "$vmName" \
+            --memory ${toString cfg.memorySize} \
+            ${lib.cli.toGNUCommandLineShell { } cfg.params}
+          VBoxManage storagectl "$vmName" --name SATA --add sata --portcount 4 --bootable on --hostiocache on
+          VBoxManage storageattach "$vmName" --storagectl SATA --port 0 --device 0 --type hdd \
+            --medium disk.vmdk
+
+          echo "exporting VirtualBox VM..."
+          mkdir -p $out
+          fn="$out/${cfg.vmFileName}"
+          VBoxManage export "$vmName" --output "$fn" --options manifest
+
+          rm -v $diskImage
+
+          mkdir -p $out/nix-support
+          echo "file ova $fn" >> $out/nix-support/hydra-build-products
+        '';
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    boot.growPartition = true;
+    boot.loader.grub.device = "/dev/sda";
+
+    swapDevices = [{
+      device = "/var/swap";
+      size = 2048;
+    }];
+
+    virtualisation.virtualbox.guest.enable = true;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/vmware-guest.nix b/nixpkgs/nixos/modules/virtualisation/vmware-guest.nix
new file mode 100644
index 000000000000..962a9059ea47
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/vmware-guest.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.vmware.guest;
+  open-vm-tools = if cfg.headless then pkgs.open-vm-tools-headless else pkgs.open-vm-tools;
+  xf86inputvmmouse = pkgs.xorg.xf86inputvmmouse;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "vmwareGuest" ] [ "virtualisation" "vmware" "guest" ])
+  ];
+
+  options.virtualisation.vmware.guest = {
+    enable = mkEnableOption "VMWare Guest Support";
+    headless = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Whether to disable X11-related features.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64;
+      message = "VMWare guest is not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    } ];
+
+    boot.initrd.kernelModules = [ "vmw_pvscsi" ];
+
+    environment.systemPackages = [ open-vm-tools ];
+
+    systemd.services.vmware =
+      { description = "VMWare Guest Service";
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig.ExecStart = "${open-vm-tools}/bin/vmtoolsd";
+      };
+
+    environment.etc.vmware-tools.source = "${open-vm-tools}/etc/vmware-tools/*";
+
+    services.xserver = mkIf (!cfg.headless) {
+      videoDrivers = mkOverride 50 [ "vmware" ];
+      modules = [ xf86inputvmmouse ];
+
+      config = ''
+          Section "InputClass"
+            Identifier "VMMouse"
+            MatchDevicePath "/dev/input/event*"
+            MatchProduct "ImPS/2 Generic Wheel Mouse"
+            Driver "vmmouse"
+          EndSection
+        '';
+
+      displayManager.sessionCommands = ''
+          ${open-vm-tools}/bin/vmware-user-suid-wrapper
+        '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/vmware-image.nix b/nixpkgs/nixos/modules/virtualisation/vmware-image.nix
new file mode 100644
index 000000000000..9da9e145f7a9
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/vmware-image.nix
@@ -0,0 +1,90 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  boolToStr = value: if value then "on" else "off";
+  cfg = config.vmware;
+
+  subformats = [
+    "monolithicSparse"
+    "monolithicFlat"
+    "twoGbMaxExtentSparse"
+    "twoGbMaxExtentFlat"
+    "streamOptimized"
+  ];
+
+in {
+  options = {
+    vmware = {
+      baseImageSize = mkOption {
+        type = types.int;
+        default = 2048;
+        description = ''
+          The size of the VMWare base image in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-vmware-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = ''
+          The name of the derivation for the VMWare appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.vmdk";
+        description = ''
+          The file name of the VMWare appliance.
+        '';
+      };
+      vmSubformat = mkOption {
+        type = types.enum subformats;
+        default = "monolithicSparse";
+        description = "Specifies which VMDK subformat to use.";
+      };
+      vmCompat6 = mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description = "Create a VMDK version 6 image (instead of version 4).";
+      };
+    };
+  };
+
+  config = {
+    system.build.vmwareImage = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o compat6=${boolToStr cfg.vmCompat6},subformat=${cfg.vmSubformat} -O vmdk $diskImage $out/${cfg.vmFileName}
+        rm $diskImage
+      '';
+      format = "raw";
+      diskSize = cfg.baseImageSize;
+      partitionTableType = "efi";
+      inherit config lib pkgs;
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    fileSystems."/boot" = {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    boot.growPartition = true;
+
+    boot.loader.grub = {
+      version = 2;
+      device = "nodev";
+      efiSupport = true;
+      efiInstallAsRemovable = true;
+    };
+
+    virtualisation.vmware.guest.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix b/nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix
new file mode 100644
index 000000000000..675cf9297371
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.xe-guest-utilities;
+in {
+  options = {
+    services.xe-guest-utilities = {
+      enable = mkEnableOption "the Xen guest utilities daemon";
+    };
+  };
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.xe-guest-utilities ];
+    systemd.tmpfiles.rules = [ "d /run/xenstored 0755 - - -" ];
+
+    systemd.services.xe-daemon = {
+      description = "xen daemon file";
+      wantedBy    = [ "multi-user.target" ];
+      after = [ "xe-linux-distribution.service" ];
+      requires = [ "proc-xen.mount" ];
+      path = [ pkgs.coreutils pkgs.iproute ];
+      serviceConfig = {
+        PIDFile = "/run/xe-daemon.pid";
+        ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-daemon -p /run/xe-daemon.pid";
+        ExecStop = "${pkgs.procps}/bin/pkill -TERM -F /run/xe-daemon.pid";
+      };
+    };
+
+    systemd.services.xe-linux-distribution = {
+      description = "xen linux distribution service";
+      wantedBy    = [ "multi-user.target" ];
+      before = [ "xend.service" ];
+      path = [ pkgs.xe-guest-utilities pkgs.coreutils pkgs.gawk pkgs.gnused ];
+      serviceConfig = {
+        Type = "simple";
+        RemainAfterExit = "yes";
+        ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-linux-distribution /var/cache/xe-linux-distribution";
+      };
+    };
+
+    systemd.mounts = [
+      { description = "Mount /proc/xen files";
+        what = "xenfs";
+        where = "/proc/xen";
+        type = "xenfs";
+        unitConfig = {
+          ConditionPathExists = "/proc/xen";
+          RefuseManualStop = "true";
+        };
+      }
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/xen-dom0.nix b/nixpkgs/nixos/modules/virtualisation/xen-dom0.nix
new file mode 100644
index 000000000000..7b2a66c43489
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/xen-dom0.nix
@@ -0,0 +1,454 @@
+# Xen hypervisor (Dom0) support.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.xen;
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "virtualisation" "xen" "qemu" ] "You don't need this option anymore, it will work without it.")
+    (mkRenamedOptionModule [ "virtualisation" "xen" "qemu-package" ] [ "virtualisation" "xen" "package-qemu" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    virtualisation.xen.enable =
+      mkOption {
+        default = false;
+        type = types.bool;
+        description =
+          ''
+            Setting this option enables the Xen hypervisor, a
+            virtualisation technology that allows multiple virtual
+            machines, known as <emphasis>domains</emphasis>, to run
+            concurrently on the physical machine.  NixOS runs as the
+            privileged <emphasis>Domain 0</emphasis>.  This option
+            requires a reboot to take effect.
+          '';
+      };
+
+    virtualisation.xen.package = mkOption {
+      type = types.package;
+      defaultText = "pkgs.xen";
+      example = literalExample "pkgs.xen-light";
+      description = ''
+        The package used for Xen binary.
+      '';
+      relatedPackages = [ "xen" "xen-light" ];
+    };
+
+    virtualisation.xen.package-qemu = mkOption {
+      type = types.package;
+      defaultText = "pkgs.xen";
+      example = literalExample "pkgs.qemu_xen-light";
+      description = ''
+        The package with qemu binaries for dom0 qemu and xendomains.
+      '';
+      relatedPackages = [ "xen"
+                          { name = "qemu_xen-light"; comment = "For use with pkgs.xen-light."; }
+                        ];
+    };
+
+    virtualisation.xen.bootParams =
+      mkOption {
+        default = "";
+        description =
+          ''
+            Parameters passed to the Xen hypervisor at boot time.
+          '';
+      };
+
+    virtualisation.xen.domain0MemorySize =
+      mkOption {
+        default = 0;
+        example = 512;
+        description =
+          ''
+            Amount of memory (in MiB) allocated to Domain 0 on boot.
+            If set to 0, all memory is assigned to Domain 0.
+          '';
+      };
+
+    virtualisation.xen.bridge = {
+        name = mkOption {
+          default = "xenbr0";
+          description = ''
+              Name of bridge the Xen domUs connect to.
+            '';
+        };
+
+        address = mkOption {
+          type = types.str;
+          default = "172.16.0.1";
+          description = ''
+            IPv4 address of the bridge.
+          '';
+        };
+
+        prefixLength = mkOption {
+          type = types.addCheck types.int (n: n >= 0 && n <= 32);
+          default = 16;
+          description = ''
+            Subnet mask of the bridge interface, specified as the number of
+            bits in the prefix (<literal>24</literal>).
+            A DHCP server will provide IP addresses for the whole, remaining
+            subnet.
+          '';
+        };
+
+        forwardDns = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            If set to <literal>true</literal>, the DNS queries from the
+            hosts connected to the bridge will be forwarded to the DNS
+            servers specified in /etc/resolv.conf .
+            '';
+        };
+
+      };
+
+    virtualisation.xen.stored =
+      mkOption {
+        type = types.path;
+        description =
+          ''
+            Xen Store daemon to use. Defaults to oxenstored of the xen package.
+          '';
+      };
+
+    virtualisation.xen.domains = {
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          description =
+            ''
+              Options defined here will override the defaults for xendomains.
+              The default options can be seen in the file included from
+              /etc/default/xendomains.
+            '';
+          };
+      };
+
+    virtualisation.xen.trace = mkEnableOption "Xen tracing";
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.isx86_64;
+      message = "Xen currently not supported on ${pkgs.stdenv.hostPlatform.system}";
+    } {
+      assertion = config.boot.loader.grub.enable && (config.boot.loader.grub.efiSupport == false);
+      message = "Xen currently does not support EFI boot";
+    } ];
+
+    virtualisation.xen.package = mkDefault pkgs.xen;
+    virtualisation.xen.package-qemu = mkDefault pkgs.xen;
+    virtualisation.xen.stored = mkDefault "${cfg.package}/bin/oxenstored";
+
+    environment.systemPackages = [ cfg.package ];
+
+    # Make sure Domain 0 gets the required configuration
+    #boot.kernelPackages = pkgs.boot.kernelPackages.override { features={xen_dom0=true;}; };
+
+    boot.kernelModules =
+      [ "xen-evtchn" "xen-gntdev" "xen-gntalloc" "xen-blkback" "xen-netback"
+        "xen-pciback" "evtchn" "gntdev" "netbk" "blkbk" "xen-scsibk"
+        "usbbk" "pciback" "xen-acpi-processor" "blktap2" "tun" "netxen_nic"
+        "xen_wdt" "xen-acpi-processor" "xen-privcmd" "xen-scsiback"
+        "xenfs"
+      ];
+
+    # The xenfs module is needed in system.activationScripts.xen, but
+    # the modprobe command there fails silently. Include xenfs in the
+    # initrd as a work around.
+    boot.initrd.kernelModules = [ "xenfs" ];
+
+    # The radeonfb kernel module causes the screen to go black as soon
+    # as it's loaded, so don't load it.
+    boot.blacklistedKernelModules = [ "radeonfb" ];
+
+    # Increase the number of loopback devices from the default (8),
+    # which is way too small because every VM virtual disk requires a
+    # loopback device.
+    boot.extraModprobeConfig =
+      ''
+        options loop max_loop=64
+      '';
+
+    virtualisation.xen.bootParams = [] ++
+      optionals cfg.trace [ "loglvl=all" "guest_loglvl=all" ] ++
+      optional (cfg.domain0MemorySize != 0) "dom0_mem=${toString cfg.domain0MemorySize}M";
+
+    system.extraSystemBuilderCmds =
+      ''
+        ln -s ${cfg.package}/boot/xen.gz $out/xen.gz
+        echo "${toString cfg.bootParams}" > $out/xen-params
+      '';
+
+    # Mount the /proc/xen pseudo-filesystem.
+    system.activationScripts.xen =
+      ''
+        if [ -d /proc/xen ]; then
+            ${pkgs.kmod}/bin/modprobe xenfs 2> /dev/null
+            ${pkgs.utillinux}/bin/mountpoint -q /proc/xen || \
+                ${pkgs.utillinux}/bin/mount -t xenfs none /proc/xen
+        fi
+      '';
+
+    # Domain 0 requires a pvops-enabled kernel.
+    system.requiredKernelConfig = with config.lib.kernelConfig;
+      [ (isYes "XEN")
+        (isYes "X86_IO_APIC")
+        (isYes "ACPI")
+        (isYes "XEN_DOM0")
+        (isYes "PCI_XEN")
+        (isYes "XEN_DEV_EVTCHN")
+        (isYes "XENFS")
+        (isYes "XEN_COMPAT_XENFS")
+        (isYes "XEN_SYS_HYPERVISOR")
+        (isYes "XEN_GNTDEV")
+        (isYes "XEN_BACKEND")
+        (isModule "XEN_NETDEV_BACKEND")
+        (isModule "XEN_BLKDEV_BACKEND")
+        (isModule "XEN_PCIDEV_BACKEND")
+        (isYes "XEN_BALLOON")
+        (isYes "XEN_SCRUB_PAGES")
+      ];
+
+
+    environment.etc =
+      {
+        "xen/xl.conf".source = "${cfg.package}/etc/xen/xl.conf";
+        "xen/scripts".source = "${cfg.package}/etc/xen/scripts";
+        "default/xendomains".text = ''
+          source ${cfg.package}/etc/default/xendomains
+
+          ${cfg.domains.extraConfig}
+        '';
+      }
+      // optionalAttrs (builtins.compareVersions cfg.package.version "4.10" >= 0) {
+        # in V 4.10 oxenstored requires /etc/xen/oxenstored.conf to start
+        "xen/oxenstored.conf".source = "${cfg.package}/etc/xen/oxenstored.conf";
+      };
+
+    # Xen provides udev rules.
+    services.udev.packages = [ cfg.package ];
+
+    services.udev.path = [ pkgs.bridge-utils pkgs.iproute ];
+
+    systemd.services.xen-store = {
+      description = "Xen Store Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "xen-store.socket" ];
+      requires = [ "xen-store.socket" ];
+      preStart = ''
+        export XENSTORED_ROOTDIR="/var/lib/xenstored"
+        rm -f "$XENSTORED_ROOTDIR"/tdb* &>/dev/null
+
+        mkdir -p /var/run
+        mkdir -p /var/log/xen # Running xl requires /var/log/xen and /var/lib/xen,
+        mkdir -p /var/lib/xen # so we create them here unconditionally.
+        grep -q control_d /proc/xen/capabilities
+        '';
+      serviceConfig = if (builtins.compareVersions cfg.package.version "4.8" < 0) then
+        { ExecStart = ''
+            ${cfg.stored}${optionalString cfg.trace " -T /var/log/xen/xenstored-trace.log"} --no-fork
+            '';
+        } else {
+          ExecStart = ''
+            ${cfg.package}/etc/xen/scripts/launch-xenstore
+            '';
+          Type            = "notify";
+          RemainAfterExit = true;
+          NotifyAccess    = "all";
+        };
+      postStart = ''
+        ${optionalString (builtins.compareVersions cfg.package.version "4.8" < 0) ''
+          time=0
+          timeout=30
+          # Wait for xenstored to actually come up, timing out after 30 seconds
+          while [ $time -lt $timeout ] && ! `${cfg.package}/bin/xenstore-read -s / >/dev/null 2>&1` ; do
+              time=$(($time+1))
+              sleep 1
+          done
+
+          # Exit if we timed out
+          if ! [ $time -lt $timeout ] ; then
+              echo "Could not start Xenstore Daemon"
+              exit 1
+          fi
+        ''}
+        echo "executing xen-init-dom0"
+        ${cfg.package}/lib/xen/bin/xen-init-dom0
+        '';
+    };
+
+    systemd.sockets.xen-store = {
+      description = "XenStore Socket for userspace API";
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenStream = [ "/var/run/xenstored/socket" "/var/run/xenstored/socket_ro" ];
+        SocketMode = "0660";
+        SocketUser = "root";
+        SocketGroup = "root";
+      };
+    };
+
+
+    systemd.services.xen-console = {
+      description = "Xen Console Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-store.service" ];
+      requires = [ "xen-store.service" ];
+      preStart = ''
+        mkdir -p /var/run/xen
+        ${optionalString cfg.trace "mkdir -p /var/log/xen"}
+        grep -q control_d /proc/xen/capabilities
+        '';
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/xenconsoled\
+            ${optionalString ((builtins.compareVersions cfg.package.version "4.8" >= 0)) " -i"}\
+            ${optionalString cfg.trace " --log=all --log-dir=/var/log/xen"}
+          '';
+      };
+    };
+
+
+    systemd.services.xen-qemu = {
+      description = "Xen Qemu Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-console.service" ];
+      requires = [ "xen-store.service" ];
+      serviceConfig.ExecStart = ''
+        ${cfg.package-qemu}/${cfg.package-qemu.qemu-system-i386} \
+           -xen-attach -xen-domid 0 -name dom0 -M xenpv \
+           -nographic -monitor /dev/null -serial /dev/null -parallel /dev/null
+        '';
+    };
+
+
+    systemd.services.xen-watchdog = {
+      description = "Xen Watchdog Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-qemu.service" "xen-domains.service" ];
+      serviceConfig.ExecStart = "${cfg.package}/bin/xenwatchdogd 30 15";
+      serviceConfig.Type = "forking";
+      serviceConfig.RestartSec = "1";
+      serviceConfig.Restart = "on-failure";
+    };
+
+
+    systemd.services.xen-bridge = {
+      description = "Xen bridge";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "xen-domains.service" ];
+      preStart = ''
+        mkdir -p /var/run/xen
+        touch /var/run/xen/dnsmasq.pid
+        touch /var/run/xen/dnsmasq.etherfile
+        touch /var/run/xen/dnsmasq.leasefile
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Usable\ range`
+        export XEN_BRIDGE_IP_RANGE_START="${"\${data[1]//[[:blank:]]/}"}"
+        export XEN_BRIDGE_IP_RANGE_END="${"\${data[2]//[[:blank:]]/}"}"
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ address`
+        export XEN_BRIDGE_NETWORK_ADDRESS="${"\${data[1]//[[:blank:]]/}"}"
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ mask`
+        export XEN_BRIDGE_NETMASK="${"\${data[1]//[[:blank:]]/}"}"
+
+        echo "${cfg.bridge.address} host gw dns" > /var/run/xen/dnsmasq.hostsfile
+
+        cat <<EOF > /var/run/xen/dnsmasq.conf
+        no-daemon
+        pid-file=/var/run/xen/dnsmasq.pid
+        interface=${cfg.bridge.name}
+        except-interface=lo
+        bind-interfaces
+        auth-zone=xen.local,$XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength}
+        domain=xen.local
+        addn-hosts=/var/run/xen/dnsmasq.hostsfile
+        expand-hosts
+        strict-order
+        no-hosts
+        bogus-priv
+        ${optionalString (!cfg.bridge.forwardDns) ''
+          no-resolv
+          no-poll
+          auth-server=dns.xen.local,${cfg.bridge.name}
+        ''}
+        filterwin2k
+        clear-on-reload
+        domain-needed
+        dhcp-hostsfile=/var/run/xen/dnsmasq.etherfile
+        dhcp-authoritative
+        dhcp-range=$XEN_BRIDGE_IP_RANGE_START,$XEN_BRIDGE_IP_RANGE_END
+        dhcp-no-override
+        no-ping
+        dhcp-leasefile=/var/run/xen/dnsmasq.leasefile
+        EOF
+
+        # DHCP
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p tcp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p udp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        # DNS
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p tcp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p udp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+
+        ${pkgs.bridge-utils}/bin/brctl addbr ${cfg.bridge.name}
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} ${cfg.bridge.address}
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} netmask $XEN_BRIDGE_NETMASK
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} up
+      '';
+      serviceConfig.ExecStart = "${pkgs.dnsmasq}/bin/dnsmasq --conf-file=/var/run/xen/dnsmasq.conf";
+      postStop = ''
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ address`
+        export XEN_BRIDGE_NETWORK_ADDRESS="${"\${data[1]//[[:blank:]]/}"}"
+
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} down
+        ${pkgs.bridge-utils}/bin/brctl delbr ${cfg.bridge.name}
+
+        # DNS
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p udp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p tcp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        # DHCP
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p udp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p tcp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+      '';
+    };
+
+
+    systemd.services.xen-domains = {
+      description = "Xen domains - automatically starts, saves and restores Xen domains";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-bridge.service" "xen-qemu.service" ];
+      requires = [ "xen-bridge.service" "xen-qemu.service" ];
+      ## To prevent a race between dhcpcd and xend's bridge setup script
+      ## (which renames eth* to peth* and recreates eth* as a virtual
+      ## device), start dhcpcd after xend.
+      before = [ "dhcpd.service" ];
+      restartIfChanged = false;
+      serviceConfig.RemainAfterExit = "yes";
+      path = [ cfg.package cfg.package-qemu ];
+      environment.XENDOM_CONFIG = "${cfg.package}/etc/sysconfig/xendomains";
+      preStart = "mkdir -p /var/lock/subsys -m 755";
+      serviceConfig.ExecStart = "${cfg.package}/etc/init.d/xendomains start";
+      serviceConfig.ExecStop = "${cfg.package}/etc/init.d/xendomains stop";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/xen-domU.nix b/nixpkgs/nixos/modules/virtualisation/xen-domU.nix
new file mode 100644
index 000000000000..c00b984c2ce0
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/xen-domU.nix
@@ -0,0 +1,19 @@
+# Common configuration for Xen DomU NixOS virtual machines.
+
+{ ... }:
+
+{
+  boot.loader.grub.version = 2;
+  boot.loader.grub.device = "nodev";
+
+  boot.initrd.kernelModules =
+    [ "xen-blkfront" "xen-tpmfront" "xen-kbdfront" "xen-fbfront"
+      "xen-netfront" "xen-pcifront" "xen-scsifront"
+    ];
+
+  # Send syslog messages to the Xen console.
+  services.syslogd.tty = "hvc0";
+
+  # Don't run ntpd, since we should get the correct time from Dom0.
+  services.timesyncd.enable = false;
+}