summary refs log tree commit diff
path: root/nixos/modules/virtualisation
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/virtualisation')
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix7
-rw-r--r--nixos/modules/virtualisation/amazon-init.nix2
-rw-r--r--nixos/modules/virtualisation/azure-agent.nix10
-rw-r--r--nixos/modules/virtualisation/brightbox-image.nix2
-rw-r--r--nixos/modules/virtualisation/container-config.nix2
-rw-r--r--nixos/modules/virtualisation/containers.nix27
-rw-r--r--nixos/modules/virtualisation/ec2-amis.nix50
-rw-r--r--nixos/modules/virtualisation/google-compute-image.nix18
-rw-r--r--nixos/modules/virtualisation/grow-partition.nix51
-rw-r--r--nixos/modules/virtualisation/hyperv-guest.nix37
-rw-r--r--nixos/modules/virtualisation/libvirtd.nix31
-rw-r--r--nixos/modules/virtualisation/lxc.nix5
-rw-r--r--nixos/modules/virtualisation/lxcfs.nix4
-rw-r--r--nixos/modules/virtualisation/lxd.nix67
-rw-r--r--nixos/modules/virtualisation/nova-config.nix4
-rw-r--r--nixos/modules/virtualisation/nova.nix174
-rw-r--r--nixos/modules/virtualisation/openstack/common.nix84
-rw-r--r--nixos/modules/virtualisation/openstack/glance.nix245
-rw-r--r--nixos/modules/virtualisation/openstack/keystone.nix220
-rw-r--r--nixos/modules/virtualisation/openvswitch.nix2
-rw-r--r--nixos/modules/virtualisation/parallels-guest.nix74
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix20
-rw-r--r--nixos/modules/virtualisation/virtualbox-host.nix2
-rw-r--r--nixos/modules/virtualisation/virtualbox-image.nix12
-rw-r--r--nixos/modules/virtualisation/xen-dom0.nix37
25 files changed, 292 insertions, 895 deletions
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index 1eb3ca707afd..f74c42a777f5 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -11,7 +11,7 @@ with lib;
 let cfg = config.ec2; in
 
 {
-  imports = [ ../profiles/headless.nix ./ec2-data.nix ./grow-partition.nix ./amazon-init.nix ];
+  imports = [ ../profiles/headless.nix ./ec2-data.nix ./amazon-init.nix ];
 
   config = {
 
@@ -21,7 +21,7 @@ let cfg = config.ec2; in
       }
     ];
 
-    virtualisation.growPartition = cfg.hvm;
+    boot.growPartition = cfg.hvm;
 
     fileSystems."/" = {
       device = "/dev/disk/by-label/nixos";
@@ -152,5 +152,8 @@ let cfg = config.ec2; in
     environment.systemPackages = [ pkgs.cryptsetup ];
 
     boot.initrd.supportedFilesystems = [ "unionfs-fuse" ];
+    
+    # EC2 has its own NTP server provided by the hypervisor
+    networking.timeServers = [ "169.254.169.123" ];
   };
 }
diff --git a/nixos/modules/virtualisation/amazon-init.nix b/nixos/modules/virtualisation/amazon-init.nix
index a7362423eb46..8032b2c6d7ca 100644
--- a/nixos/modules/virtualisation/amazon-init.nix
+++ b/nixos/modules/virtualisation/amazon-init.nix
@@ -2,7 +2,7 @@
 
 let
   script = ''
-    #!${pkgs.stdenv.shell} -eu
+    #!${pkgs.runtimeShell} -eu
 
     echo "attempting to fetch configuration from EC2 user data..."
 
diff --git a/nixos/modules/virtualisation/azure-agent.nix b/nixos/modules/virtualisation/azure-agent.nix
index 6817eb837a01..b7ab54aab7ec 100644
--- a/nixos/modules/virtualisation/azure-agent.nix
+++ b/nixos/modules/virtualisation/azure-agent.nix
@@ -47,7 +47,7 @@ let
   };
 
   provisionedHook = pkgs.writeScript "provisioned-hook" ''
-    #!${pkgs.stdenv.shell}
+    #!${pkgs.runtimeShell}
     ${config.systemd.package}/bin/systemctl start provisioned.target
   '';
 
@@ -66,6 +66,10 @@ in
       default = false;
       description = "Whether to enable verbose logging.";
     };
+    mountResourceDisk = mkOption {
+      default = true;
+      description = "Whether the agent should format (ext4) and mount the resource disk to /mnt/resource.";
+    };
   };
 
   ###### implementation
@@ -112,7 +116,7 @@ in
         Provisioning.ExecuteCustomData=n
 
         # Format if unformatted. If 'n', resource disk will not be mounted.
-        ResourceDisk.Format=y
+        ResourceDisk.Format=${if cfg.mountResourceDisk then "y" else "n"}
 
         # File system on the resource disk
         # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here.
@@ -181,7 +185,7 @@ in
       after = [ "network-online.target" "sshd.service" ];
       wants = [ "network-online.target" ];
 
-      path = [ pkgs.e2fsprogs ];
+      path = [ pkgs.e2fsprogs pkgs.bash ];
       description = "Windows Azure Agent Service";
       unitConfig.ConditionPathExists = "/etc/waagent.conf";
       serviceConfig = {
diff --git a/nixos/modules/virtualisation/brightbox-image.nix b/nixos/modules/virtualisation/brightbox-image.nix
index 08bbcfd9d7c2..39a655b4c104 100644
--- a/nixos/modules/virtualisation/brightbox-image.nix
+++ b/nixos/modules/virtualisation/brightbox-image.nix
@@ -26,7 +26,7 @@ in
               rm $diskImageBase
               popd
             '';
-          diskImageBase = "nixos-image-${config.system.nixosLabel}-${pkgs.stdenv.system}.raw";
+          diskImageBase = "nixos-image-${config.system.nixos.label}-${pkgs.stdenv.system}.raw";
           buildInputs = [ pkgs.utillinux pkgs.perl ];
           exportReferencesGraph =
             [ "closure" config.system.build.toplevel ];
diff --git a/nixos/modules/virtualisation/container-config.nix b/nixos/modules/virtualisation/container-config.nix
index b4f9d8b6fc17..5e368acd6d8b 100644
--- a/nixos/modules/virtualisation/container-config.nix
+++ b/nixos/modules/virtualisation/container-config.nix
@@ -11,7 +11,7 @@ with lib;
     services.udisks2.enable = mkDefault false;
     powerManagement.enable = mkDefault false;
 
-    networking.useHostResolvConf = true;
+    networking.useHostResolvConf = mkDefault true;
 
     # Containers should be light-weight, so start sshd on demand.
     services.openssh.startWhenNeeded = mkDefault true;
diff --git a/nixos/modules/virtualisation/containers.nix b/nixos/modules/virtualisation/containers.nix
index e68bfd860601..248c2fc1fb23 100644
--- a/nixos/modules/virtualisation/containers.nix
+++ b/nixos/modules/virtualisation/containers.nix
@@ -33,7 +33,7 @@ let
     in
       pkgs.writeScript "container-init"
       ''
-        #! ${pkgs.stdenv.shell} -e
+        #! ${pkgs.runtimeShell} -e
 
         # Initialise the container side of the veth pair.
         if [ "$PRIVATE_NETWORK" = 1 ]; then
@@ -112,7 +112,7 @@ let
 
       # If the host is 64-bit and the container is 32-bit, add a
       # --personality flag.
-      ${optionalString (config.nixpkgs.system == "x86_64-linux") ''
+      ${optionalString (config.nixpkgs.localSystem.system == "x86_64-linux") ''
         if [ "$(< ''${SYSTEM_PATH:-/nix/var/nix/profiles/per-container/$INSTANCE/system}/system)" = i686-linux ]; then
           extraFlags+=" --personality=x86"
         fi
@@ -223,7 +223,7 @@ let
   serviceDirectives = cfg: {
     ExecReload = pkgs.writeScript "reload-container"
       ''
-        #! ${pkgs.stdenv.shell} -e
+        #! ${pkgs.runtimeShell} -e
         ${pkgs.nixos-container}/bin/nixos-container run "$INSTANCE" -- \
           bash --login -c "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/bin/switch-to-configuration test"
       '';
@@ -255,7 +255,7 @@ let
   };
 
 
-  system = config.nixpkgs.system;
+  system = config.nixpkgs.localSystem.system;
 
   bindMountOpts = { name, config, ... }: {
 
@@ -575,6 +575,16 @@ in
               '';
             };
 
+            extraFlags = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "--drop-capability=CAP_SYS_CHROOT" ];
+              description = ''
+                Extra flags passed to the systemd-nspawn command.
+                See systemd-nspawn(1) for details.
+              '';
+            };
+
           } // networkOptions;
 
           config = mkMerge
@@ -714,7 +724,9 @@ in
             ${optionalString cfg.autoStart ''
               AUTO_START=1
             ''}
-            EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts}"
+            EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts +
+              optionalString (cfg.extraFlags != [])
+                (" " + concatStringsSep " " cfg.extraFlags)}"
           '';
       }) config.containers;
 
@@ -726,6 +738,11 @@ in
 
     networking.dhcpcd.denyInterfaces = [ "ve-*" "vb-*" ];
 
+    services.udev.extraRules = optionalString config.networking.networkmanager.enable ''
+      # Don't manage interfaces created by nixos-container.
+      ENV{INTERFACE}=="v[eb]-*", ENV{NM_UNMANAGED}="1"
+    '';
+
     environment.systemPackages = [ pkgs.nixos-container ];
   });
 }
diff --git a/nixos/modules/virtualisation/ec2-amis.nix b/nixos/modules/virtualisation/ec2-amis.nix
index 14826b6272f7..baffad79b001 100644
--- a/nixos/modules/virtualisation/ec2-amis.nix
+++ b/nixos/modules/virtualisation/ec2-amis.nix
@@ -223,21 +223,39 @@ let self = {
   "17.03".us-west-2.hvm-ebs = "ami-a93daac9";
   "17.03".us-west-2.hvm-s3 = "ami-5139ae31";
 
-  # 17.09.1483.d0f0657ca0
-  "17.09".eu-west-1.hvm-ebs = "ami-cf33e7b6";
-  "17.09".eu-west-2.hvm-ebs = "ami-7d061419";
-  "17.09".eu-central-1.hvm-ebs = "ami-7548fa1a";
-  "17.09".us-east-1.hvm-ebs = "ami-6f669d15";
-  "17.09".us-east-2.hvm-ebs = "ami-cbe1ccae";
-  "17.09".us-west-1.hvm-ebs = "ami-9d95a5fd";
-  "17.09".us-west-2.hvm-ebs = "ami-d3956fab";
-  "17.09".ca-central-1.hvm-ebs = "ami-ee4ef78a";
-  "17.09".ap-southeast-1.hvm-ebs = "ami-1dfc807e";
-  "17.09".ap-southeast-2.hvm-ebs = "ami-dcb350be";
-  "17.09".ap-northeast-1.hvm-ebs = "ami-00ec3d66";
-  "17.09".ap-northeast-2.hvm-ebs = "ami-1107dd7f";
-  "17.09".sa-east-1.hvm-ebs = "ami-0377086f";
-  "17.09".ap-south-1.hvm-ebs = "ami-4a064625";
+  # 17.09.2681.59661f21be6
+  "17.09".eu-west-1.hvm-ebs = "ami-a30192da";
+  "17.09".eu-west-2.hvm-ebs = "ami-295a414d";
+  "17.09".eu-west-3.hvm-ebs = "ami-8c0eb9f1";
+  "17.09".eu-central-1.hvm-ebs = "ami-266cfe49";
+  "17.09".us-east-1.hvm-ebs = "ami-40bee63a";
+  "17.09".us-east-2.hvm-ebs = "ami-9d84aff8";
+  "17.09".us-west-1.hvm-ebs = "ami-d14142b1";
+  "17.09".us-west-2.hvm-ebs = "ami-3eb40346";
+  "17.09".ca-central-1.hvm-ebs = "ami-ca8207ae";
+  "17.09".ap-southeast-1.hvm-ebs = "ami-84bccff8";
+  "17.09".ap-southeast-2.hvm-ebs = "ami-0dc5386f";
+  "17.09".ap-northeast-1.hvm-ebs = "ami-89b921ef";
+  "17.09".ap-northeast-2.hvm-ebs = "ami-179b3b79";
+  "17.09".sa-east-1.hvm-ebs = "ami-4762202b";
+  "17.09".ap-south-1.hvm-ebs = "ami-4e376021";
 
-  latest = self."17.09";
+  # 18.03.131792.becbe4dbe16
+  "18.03".eu-west-1.hvm-ebs = "ami-cda4fab4";
+  "18.03".eu-west-2.hvm-ebs = "ami-d96786be";
+  "18.03".eu-west-3.hvm-ebs = "ami-6b0cba16";
+  "18.03".eu-central-1.hvm-ebs = "ami-5e2b75b5";
+  "18.03".us-east-1.hvm-ebs = "ami-d464cba9";
+  "18.03".us-east-2.hvm-ebs = "ami-fd221298";
+  "18.03".us-west-1.hvm-ebs = "ami-ff0d1d9f";
+  "18.03".us-west-2.hvm-ebs = "ami-c05c3bb8";
+  "18.03".ca-central-1.hvm-ebs = "ami-cc72f4a8";
+  "18.03".ap-southeast-1.hvm-ebs = "ami-b61633ca";
+  "18.03".ap-southeast-2.hvm-ebs = "ami-530fc131";
+  "18.03".ap-northeast-1.hvm-ebs = "ami-90d6c0ec";
+  "18.03".ap-northeast-2.hvm-ebs = "ami-a1248bcf";
+  "18.03".sa-east-1.hvm-ebs = "ami-b090c6dc";
+  "18.03".ap-south-1.hvm-ebs = "ami-32c9ec5d";
+
+  latest = self."18.03";
 }; in self
diff --git a/nixos/modules/virtualisation/google-compute-image.nix b/nixos/modules/virtualisation/google-compute-image.nix
index e3b3e6a5f4ab..0b6bec786da4 100644
--- a/nixos/modules/virtualisation/google-compute-image.nix
+++ b/nixos/modules/virtualisation/google-compute-image.nix
@@ -2,11 +2,11 @@
 
 with lib;
 let
-  diskSize = 1024; # MB
+  diskSize = 1536; # MB
   gce = pkgs.google-compute-engine;
 in
 {
-  imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ./grow-partition.nix ];
+  imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ];
 
   system.build.googleComputeImage = import ../../lib/make-disk-image.nix {
     name = "google-compute-image";
@@ -14,7 +14,7 @@ in
       PATH=$PATH:${pkgs.stdenv.lib.makeBinPath [ pkgs.gnutar pkgs.gzip ]}
       pushd $out
       mv $diskImage disk.raw
-      tar -Szcf nixos-image-${config.system.nixosLabel}-${pkgs.stdenv.system}.raw.tar.gz disk.raw
+      tar -Szcf nixos-image-${config.system.nixos.label}-${pkgs.stdenv.system}.raw.tar.gz disk.raw
       rm $out/disk.raw
       popd
     '';
@@ -29,6 +29,7 @@ in
     autoResize = true;
   };
 
+  boot.growPartition = true;
   boot.kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
   boot.initrd.kernelModules = [ "virtio_scsi" ];
   boot.kernelModules = [ "virtio_pci" "virtio_net" ];
@@ -56,6 +57,12 @@ in
   # Always include cryptsetup so that NixOps can use it.
   environment.systemPackages = [ pkgs.cryptsetup ];
 
+  # Make sure GCE image does not replace host key that NixOps sets
+  environment.etc."default/instance_configs.cfg".text = lib.mkDefault ''
+    [InstanceSetup]
+    set_host_keys = false
+  '';
+
   # Rely on GCP's firewall instead
   networking.firewall.enable = mkDefault false;
 
@@ -68,6 +75,9 @@ in
 
   networking.usePredictableInterfaceNames = false;
 
+  # GC has 1460 MTU
+  networking.interfaces.eth0.mtu = 1460;
+
   # allow the google-accounts-daemon to manage users
   users.mutableUsers = true;
   # and allow users to sudo without password
@@ -211,7 +221,7 @@ in
           echo "Obtaining SSH keys..."
           mkdir -m 0700 -p /root/.ssh
           AUTH_KEYS=$(${mktemp})
-          ${wget} -O $AUTH_KEYS http://metadata.google.internal/computeMetadata/v1/project/attributes/sshKeys
+          ${wget} -O $AUTH_KEYS --header="Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/sshKeys
           if [ -s $AUTH_KEYS ]; then
 
             # Read in key one by one, split in case Google decided
diff --git a/nixos/modules/virtualisation/grow-partition.nix b/nixos/modules/virtualisation/grow-partition.nix
index 2cb932d208f0..444c0bc1630e 100644
--- a/nixos/modules/virtualisation/grow-partition.nix
+++ b/nixos/modules/virtualisation/grow-partition.nix
@@ -1,48 +1,3 @@
-# This module automatically grows the root partition on virtual machines.
-# This allows an instance to be created with a bigger root filesystem
-# than provided by the machine image.
-
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-{
-
-  options = {
-
-    virtualisation.growPartition = mkOption {
-      type = types.bool;
-      default = true;
-    };
-
-  };
-
-  config = mkIf config.virtualisation.growPartition {
-
-    boot.initrd.extraUtilsCommands = ''
-      copy_bin_and_libs ${pkgs.gawk}/bin/gawk
-      copy_bin_and_libs ${pkgs.gnused}/bin/sed
-      copy_bin_and_libs ${pkgs.utillinux}/sbin/sfdisk
-      copy_bin_and_libs ${pkgs.utillinux}/sbin/lsblk
-
-      substitute "${pkgs.cloud-utils}/bin/.growpart-wrapped" "$out/bin/growpart" \
-        --replace "${pkgs.bash}/bin/sh" "/bin/sh" \
-        --replace "awk" "gawk" \
-        --replace "sed" "gnused"
-
-      ln -s sed $out/bin/gnused
-    '';
-
-    boot.initrd.postDeviceCommands = ''
-      rootDevice="${config.fileSystems."/".device}"
-      if [ -e "$rootDevice" ]; then
-        rootDevice="$(readlink -f "$rootDevice")"
-        parentDevice="$(lsblk -npo PKNAME "$rootDevice")"
-        TMPDIR=/run sh $(type -P growpart) "$parentDevice" "''${rootDevice#$parentDevice}"
-        udevadm settle
-      fi
-    '';
-
-  };
-
-}
+# This profile is deprecated, use boot.growPartition directly.
+builtins.trace "the profile <nixos/modules/virtualisation/grow-partition.nix> is deprecated, use boot.growPartition instead"
+{ }
diff --git a/nixos/modules/virtualisation/hyperv-guest.nix b/nixos/modules/virtualisation/hyperv-guest.nix
new file mode 100644
index 000000000000..ecd2a8117710
--- /dev/null
+++ b/nixos/modules/virtualisation/hyperv-guest.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.hypervGuest;
+
+in {
+  options = {
+    virtualisation.hypervGuest = {
+      enable = mkEnableOption "Hyper-V Guest Support";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ config.boot.kernelPackages.hyperv-daemons.bin ];
+
+    security.rngd.enable = false;
+
+    # enable hotadding memory
+    services.udev.packages = lib.singleton (pkgs.writeTextFile {
+      name = "hyperv-memory-hotadd-udev-rules";
+      destination = "/etc/udev/rules.d/99-hyperv-memory-hotadd.rules";
+      text = ''
+        ACTION="add", SUBSYSTEM=="memory", ATTR{state}="online"
+      '';
+    });
+
+    systemd = {
+      packages = [ config.boot.kernelPackages.hyperv-daemons.lib ];
+
+      targets.hyperv-daemons = {
+        wantedBy = [ "multi-user.target" ];
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/libvirtd.nix b/nixos/modules/virtualisation/libvirtd.nix
index 8aa7ad8e3911..024db7f87c2e 100644
--- a/nixos/modules/virtualisation/libvirtd.nix
+++ b/nixos/modules/virtualisation/libvirtd.nix
@@ -37,11 +37,13 @@ in {
       '';
     };
 
-    virtualisation.libvirtd.enableKVM = mkOption {
-      type = types.bool;
-      default = true;
+    virtualisation.libvirtd.qemuPackage = mkOption {
+      type = types.package;
+      default = pkgs.qemu;
       description = ''
-        This option enables support for QEMU/KVM in libvirtd.
+        Qemu package to use with libvirt.
+        `pkgs.qemu` can emulate alien architectures (e.g. aarch64 on x86)
+        `pkgs.qemu_kvm` saves disk space allowing to emulate only host architectures.
       '';
     };
 
@@ -102,7 +104,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    environment.systemPackages = with pkgs; [ libvirt netcat-openbsd qemu_kvm ];
+    environment.systemPackages = with pkgs; [ libvirt netcat-openbsd cfg.qemuPackage ];
 
     boot.kernelModules = [ "tun" ];
 
@@ -117,17 +119,10 @@ in {
       after = [ "systemd-udev-settle.service" ]
               ++ optional vswitch.enable "vswitchd.service";
 
-      environment = {
-        LIBVIRTD_ARGS = ''--config "${configFile}" ${concatStringsSep " " cfg.extraOptions}'';
-      };
+      environment.LIBVIRTD_ARGS = ''--config "${configFile}" ${concatStringsSep " " cfg.extraOptions}'';
 
-      path = with pkgs; [
-          bridge-utils
-          dmidecode
-          dnsmasq
-          ebtables
-        ]
-        ++ optional vswitch.enable vswitch.package;
+      path = [ cfg.qemuPackage ] # libvirtd requires qemu-img to manage disk images
+             ++ optional vswitch.enable vswitch.package;
 
       preStart = ''
         mkdir -p /var/log/libvirt/qemu -m 755
@@ -154,9 +149,9 @@ in {
 
         # stable (not GC'able as in /nix/store) paths for using in <emulator> section of xml configs
         mkdir -p /run/libvirt/nix-emulators
-        ln -s --force ${pkgs.libvirt}/libexec/libvirt_lxc /run/libvirt/nix-emulators/
-        ${optionalString pkgs.stdenv.isAarch64 "ln -s --force ${pkgs.qemu}/bin/qemu-system-aarch64 /run/libvirt/nix-emulators/"}
-        ${optionalString cfg.enableKVM         "ln -s --force ${pkgs.qemu_kvm}/bin/qemu-kvm        /run/libvirt/nix-emulators/"}
+        for emulator in ${pkgs.libvirt}/libexec/libvirt_lxc ${cfg.qemuPackage}/bin/qemu-kvm ${cfg.qemuPackage}/bin/qemu-system-*; do
+          ln -s --force "$emulator" /run/libvirt/nix-emulators/
+        done
 
         ${optionalString cfg.qemuOvmf ''
             mkdir -p /run/libvirt/nix-ovmf
diff --git a/nixos/modules/virtualisation/lxc.nix b/nixos/modules/virtualisation/lxc.nix
index 2310fe984325..9b5adaf08249 100644
--- a/nixos/modules/virtualisation/lxc.nix
+++ b/nixos/modules/virtualisation/lxc.nix
@@ -74,6 +74,9 @@ in
     systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
 
     security.apparmor.packages = [ pkgs.lxc ];
-    security.apparmor.profiles = [ "${pkgs.lxc}/etc/apparmor.d/lxc-containers" ];
+    security.apparmor.profiles = [
+      "${pkgs.lxc}/etc/apparmor.d/lxc-containers"
+      "${pkgs.lxc}/etc/apparmor.d/usr.bin.lxc-start"
+    ];
   };
 }
diff --git a/nixos/modules/virtualisation/lxcfs.nix b/nixos/modules/virtualisation/lxcfs.nix
index 48462dc66da8..b2457403463a 100644
--- a/nixos/modules/virtualisation/lxcfs.nix
+++ b/nixos/modules/virtualisation/lxcfs.nix
@@ -28,13 +28,9 @@ in {
 
   ###### implementation
   config = mkIf cfg.enable {
-    services.cgmanager.enable = true;
-
     systemd.services.lxcfs = {
       description = "FUSE filesystem for LXC";
       wantedBy = [ "multi-user.target" ];
-      requires = [ "cgmanager.service" ];
-      after = [ "cgmanager.service" ];
       before = [ "lxc.service" ];
       restartIfChanged = false;
       serviceConfig = {
diff --git a/nixos/modules/virtualisation/lxd.nix b/nixos/modules/virtualisation/lxd.nix
index b1ff0337994e..3e76cdacfc4b 100644
--- a/nixos/modules/virtualisation/lxd.nix
+++ b/nixos/modules/virtualisation/lxd.nix
@@ -15,50 +15,69 @@ in
 
   options = {
 
-    virtualisation.lxd.enable =
-      mkOption {
+    virtualisation.lxd = {
+      enable = mkOption {
         type = types.bool;
         default = false;
-        description =
-          ''
-            This option enables lxd, a daemon that manages
-            containers. Users in the "lxd" group can interact with
-            the daemon (e.g. to start or stop containers) using the
-            <command>lxc</command> command line tool, among others.
-          '';
+        description = ''
+          This option enables lxd, a daemon that manages
+          containers. Users in the "lxd" group can interact with
+          the daemon (e.g. to start or stop containers) using the
+          <command>lxc</command> command line tool, among others.
+        '';
       };
-
+      zfsSupport = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          enables lxd to use zfs as a storage for containers.
+          This option is enabled by default if a zfs pool is configured
+          with nixos.
+        '';
+      };
+    };
   };
 
-
   ###### implementation
 
   config = mkIf cfg.enable {
 
-    environment.systemPackages =
-      [ pkgs.lxd ];
+    environment.systemPackages = [ pkgs.lxd ];
 
-    systemd.services.lxd =
-      { description = "LXD Container Management Daemon";
+    security.apparmor = {
+      enable = true;
+      profiles = [
+        "${pkgs.lxc}/etc/apparmor.d/usr.bin.lxc-start"
+        "${pkgs.lxc}/etc/apparmor.d/lxc-containers"
+      ];
+      packages = [ pkgs.lxc ];
+    };
 
-        wantedBy = [ "multi-user.target" ];
-        after = [ "systemd-udev-settle.service" ];
+    systemd.services.lxd = {
+      description = "LXD Container Management Daemon";
 
-        # TODO(wkennington): Add lvm2 and thin-provisioning-tools
-        path = with pkgs; [ acl rsync gnutar xz btrfs-progs gzip dnsmasq squashfsTools iproute iptables ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
 
-        serviceConfig.ExecStart = "@${pkgs.lxd.bin}/bin/lxd lxd --syslog --group lxd";
-        serviceConfig.Type = "simple";
-        serviceConfig.KillMode = "process"; # when stopping, leave the containers alone
+      path = lib.optional cfg.zfsSupport pkgs.zfs;
+
+      preStart = ''
+        mkdir -m 0755 -p /var/lib/lxc/rootfs
+      '';
+
+      serviceConfig = {
+        ExecStart = "@${pkgs.lxd.bin}/bin/lxd lxd --group lxd";
+        Type = "simple";
+        KillMode = "process"; # when stopping, leave the containers alone
       };
 
+    };
+
     users.extraGroups.lxd.gid = config.ids.gids.lxd;
 
     users.extraUsers.root = {
       subUidRanges = [ { startUid = 1000000; count = 65536; } ];
       subGidRanges = [ { startGid = 1000000; count = 65536; } ];
     };
-
   };
-
 }
diff --git a/nixos/modules/virtualisation/nova-config.nix b/nixos/modules/virtualisation/nova-config.nix
index c865cf451e40..c1d2a314daf2 100644
--- a/nixos/modules/virtualisation/nova-config.nix
+++ b/nixos/modules/virtualisation/nova-config.nix
@@ -6,7 +6,6 @@ with lib;
   imports = [
     ../profiles/qemu-guest.nix
     ../profiles/headless.nix
-    ./grow-partition.nix
   ];
 
   config = {
@@ -15,8 +14,7 @@ with lib;
       autoResize = true;
     };
 
-    virtualisation.growPartition = true;
-
+    boot.growPartition = true;
     boot.kernelParams = [ "console=ttyS0" ];
     boot.loader.grub.device = "/dev/vda";
     boot.loader.timeout = 0;
diff --git a/nixos/modules/virtualisation/nova.nix b/nixos/modules/virtualisation/nova.nix
deleted file mode 100644
index c2837d0e2e24..000000000000
--- a/nixos/modules/virtualisation/nova.nix
+++ /dev/null
@@ -1,174 +0,0 @@
-# Module for Nova, a.k.a. OpenStack Compute.
-
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-
-  cfg = config.virtualisation.nova;
-
-  nova = pkgs.nova;
-
-  novaConf = pkgs.writeText "nova.conf"
-    ''
-      --nodaemon
-      --verbose
-      ${cfg.extraConfig}
-    '';
-
-in
-
-{
-
-  ###### interface
-
-  options = {
-
-    virtualisation.nova.enableSingleNode =
-      mkOption {
-        default = false;
-        description =
-          ''
-            This option enables Nova, also known as OpenStack Compute,
-            a cloud computing system, as a single-machine
-            installation.  That is, all of Nova's components are
-            enabled on this machine, using SQLite as Nova's database.
-            This is useful for evaluating and experimenting with Nova.
-            However, for a real cloud computing environment, you'll
-            want to enable some of Nova's services on other machines,
-            and use a database such as MySQL.
-          '';
-      };
-
-    virtualisation.nova.extraConfig =
-      mkOption {
-        default = "";
-        description =
-          ''
-            Additional text appended to <filename>nova.conf</filename>,
-            the main Nova configuration file.
-          '';
-      };
-
-  };
-
-
-  ###### implementation
-
-  config = mkIf cfg.enableSingleNode {
-
-    environment.systemPackages = [ nova pkgs.euca2ools pkgs.novaclient ];
-
-    environment.etc =
-      [ { source = novaConf;
-          target = "nova/nova.conf";
-        }
-      ];
-
-    # Nova requires libvirtd and RabbitMQ.
-    virtualisation.libvirtd.enable = true;
-    services.rabbitmq.enable = true;
-
-    # `qemu-nbd' required the `nbd' kernel module.
-    boot.kernelModules = [ "nbd" ];
-
-    system.activationScripts.nova =
-      ''
-        mkdir -m 755 -p /var/lib/nova
-        mkdir -m 755 -p /var/lib/nova/networks
-        mkdir -m 700 -p /var/lib/nova/instances
-        mkdir -m 700 -p /var/lib/nova/keys
-
-        # Allow the CA certificate generation script (called by
-        # nova-api) to work.
-        mkdir -m 700 -p /var/lib/nova/CA /var/lib/nova/CA/private
-
-        # Initialise the SQLite database.
-        ${nova}/bin/nova-manage db sync
-      '';
-
-    # `nova-api' receives and executes external client requests from
-    # tools such as euca2ools.  It listens on port 8773 (XML) and 8774
-    # (JSON).
-    jobs.nova_api =
-      { name = "nova-api";
-
-        description = "Nova API service";
-
-        startOn = "ip-up";
-
-        # `openssl' is required to generate the CA.  `openssh' is
-        # required to generate key pairs.
-        path = [ pkgs.openssl config.programs.ssh.package pkgs.bash ];
-
-        respawn = false;
-
-        exec = "${nova}/bin/nova-api --flagfile=${novaConf} --api_paste_config=${nova}/etc/nova/api-paste.ini";
-      };
-
-    # `nova-objectstore' is a simple image server.  Useful if you're
-    # not running the OpenStack Imaging Service (Swift).  It serves
-    # images placed in /var/lib/nova/images/.
-    jobs.nova_objectstore =
-      { name = "nova-objectstore";
-
-        description = "Nova Simple Object Store Service";
-
-        startOn = "ip-up";
-
-        preStart =
-          ''
-            mkdir -m 700 -p /var/lib/nova/images
-          '';
-
-        exec = "${nova}/bin/nova-objectstore --flagfile=${novaConf}";
-      };
-
-    # `nova-scheduler' schedules VM execution requests.
-    jobs.nova_scheduler =
-      { name = "nova-scheduler";
-
-        description = "Nova Scheduler Service";
-
-        startOn = "ip-up";
-
-        exec = "${nova}/bin/nova-scheduler --flagfile=${novaConf}";
-      };
-
-    # `nova-compute' starts and manages virtual machines.
-    jobs.nova_compute =
-      { name = "nova-compute";
-
-        description = "Nova Compute Service";
-
-        startOn = "ip-up";
-
-        path =
-          [ pkgs.sudo pkgs.vlan pkgs.nettools pkgs.iptables pkgs.qemu_kvm
-            pkgs.e2fsprogs pkgs.utillinux pkgs.multipath-tools pkgs.iproute
-            pkgs.bridge-utils
-          ];
-
-        exec = "${nova}/bin/nova-compute --flagfile=${novaConf}";
-      };
-
-    # `nova-network' manages networks and allocates IP addresses.
-    jobs.nova_network =
-      { name = "nova-network";
-
-        description = "Nova Network Service";
-
-        startOn = "ip-up";
-
-        path =
-          [ pkgs.sudo pkgs.vlan pkgs.dnsmasq pkgs.nettools pkgs.iptables
-            pkgs.iproute pkgs.bridge-utils pkgs.radvd
-          ];
-
-        exec = "${nova}/bin/nova-network --flagfile=${novaConf}";
-      };
-
-  };
-
-}
diff --git a/nixos/modules/virtualisation/openstack/common.nix b/nixos/modules/virtualisation/openstack/common.nix
deleted file mode 100644
index 2feb0a873951..000000000000
--- a/nixos/modules/virtualisation/openstack/common.nix
+++ /dev/null
@@ -1,84 +0,0 @@
-{ lib }:
-
-with lib;
-
-rec {
-  # A shell script string helper to get the value of a secret at
-  # runtime.
-  getSecret = secretOption:
-    if secretOption.storage == "fromFile"
-    then ''$(cat ${secretOption.value})''
-    else ''${secretOption.value}'';
-
-
-  # A shell script string help to replace at runtime in a file the
-  # pattern of a secret by its value.
-  replaceSecret = secretOption: filename: ''
-    sed -i "s/${secretOption.pattern}/${getSecret secretOption}/g" ${filename}
-    '';
-
-  # This generates an option that can be used to declare secrets which
-  # can be stored in the nix store, or not. A pattern is written in
-  # the nix store to represent the secret. The pattern can
-  # then be overwritten with the value of the secret at runtime.
-  mkSecretOption = {name, description ? ""}:
-    mkOption {
-      description = description;
-      type = types.submodule ({
-        options = {
-          pattern = mkOption {
-            type = types.str;
-            default = "##${name}##";
-            description = "The pattern that represent the secret.";
-            };
-          storage = mkOption {
-            type = types.enum [ "fromNixStore" "fromFile" ];
-            description = ''
-            Choose the way the password is provisionned. If
-            fromNixStore is used, the value is the password and it is
-            written in the nix store. If fromFile is used, the value
-            is a path from where the password will be read at
-            runtime. This is generally used with <link
-            xlink:href="https://nixos.org/nixops/manual/#opt-deployment.keys">
-            deployment keys</link> of Nixops.
-           '';};
-            value = mkOption {
-              type = types.str;
-	      description = ''
-	      If the storage is fromNixStore, the value is the password itself,
-	      otherwise it is a path to the file that contains the password.
-	      '';
-	      };
-            };});
-  };
-  
-  databaseOption = name: {
-    host = mkOption {
-      type = types.str;
-      default = "localhost";
-      description = ''
-        Host of the database.
-      '';
-    };
-
-    name = mkOption {
-      type = types.str;
-      default = name;
-      description = ''
-        Name of the existing database.
-      '';
-    };
-
-    user = mkOption {
-      type = types.str;
-      default = name;
-      description = ''
-        The database user. The user must exist and has access to
-        the specified database.
-      '';
-    };
-    password = mkSecretOption {
-      name = name + "MysqlPassword";
-      description = "The database user's password";};
-  };
-}
diff --git a/nixos/modules/virtualisation/openstack/glance.nix b/nixos/modules/virtualisation/openstack/glance.nix
deleted file mode 100644
index 7862409a65ec..000000000000
--- a/nixos/modules/virtualisation/openstack/glance.nix
+++ /dev/null
@@ -1,245 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib; with import ./common.nix {inherit lib;};
-
-let
-  cfg = config.virtualisation.openstack.glance;
-  commonConf = ''
-    [database]
-    connection = "mysql://${cfg.database.user}:${cfg.database.password.pattern}@${cfg.database.host}/${cfg.database.name}"
-    notification_driver = noop
-
-    [keystone_authtoken]
-    auth_url = ${cfg.authUrl}
-    auth_plugin = password
-    project_name = service
-    project_domain_id = default
-    user_domain_id = default
-    username = ${cfg.serviceUsername}
-    password = ${cfg.servicePassword.pattern}
-
-    [glance_store]
-    default_store = file
-    filesystem_store_datadir = /var/lib/glance/images/
-  '';
-  glanceApiConfTpl = pkgs.writeText "glance-api.conf" ''
-    ${commonConf}
-
-    [paste_deploy]
-    flavor = keystone
-    config_file = ${cfg.package}/etc/glance-api-paste.ini
-  '';
-  glanceRegistryConfTpl = pkgs.writeText "glance-registry.conf" ''
-    ${commonConf}
-
-    [paste_deploy]
-    config_file = ${cfg.package}/etc/glance-registry-paste.ini
-  '';
-  glanceApiConf = "/var/lib/glance/glance-api.conf";
-  glanceRegistryConf = "/var/lib/glance/glance-registry.conf";
-
-in {
-  options.virtualisation.openstack.glance = {
-    package = mkOption {
-      type = types.package;
-      default = pkgs.glance;
-      defaultText = "pkgs.glance";
-      description = ''
-        Glance package to use.
-      '';
-    };
-
-    enable = mkOption {
-      default = false;
-      type = types.bool;
-      description = ''
-        This option enables Glance as a single-machine
-        installation. That is, all of Glance's components are
-        enabled on this machine. This is useful for evaluating and
-        experimenting with Glance. Note we are currently not
-        providing any configurations for a multi-node setup.
-      '';
-    };
-
-    authUrl = mkOption {
-      type = types.str;
-      default = http://localhost:5000;
-      description = ''
-        Complete public Identity (Keystone) API endpoint. Note this is
-        unversionned.
-      '';
-    };
-
-    serviceUsername = mkOption {
-      type = types.str;
-      default = "glance";
-      description = ''
-        The Glance service username. This user is created if bootstrap
-        is enable, otherwise it has to be manually created before
-        starting this service.
-      '';
-    };
-
-    servicePassword = mkSecretOption {
-      name = "glanceAdminPassword";
-      description = ''
-        The Glance service user's password.
-      '';
-    };
-
-    database = databaseOption "glance";
-
-    bootstrap = {
-      enable = mkOption {
-        default = false;
-        type = types.bool;
-        description = ''
-          Bootstrap the Glance service by creating the service tenant,
-          an admin account and a public endpoint. This option provides
-          a ready-to-use glance service. This is only done at the
-          first Glance execution by the systemd post start section.
-          The keystone admin account is used to create required
-          Keystone resource for the Glance service.
-
-          <note><para> This option is a helper for setting up
-          development or testing environments.</para></note>
-        '';
-      };
-
-      endpointPublic = mkOption {
-        type = types.str;
-        default = "http://localhost:9292";
-        description = ''
-          The public image endpoint. The link <link
-          xlink:href="http://docs.openstack.org/liberty/install-guide-rdo/keystone-services.html">
-          create endpoint</link> provides more informations
-          about that.
-        '';
-      };
-
-      keystoneAdminUsername = mkOption {
-        type = types.str;
-        default = "admin";
-        description = ''
-          The keystone admin user name used to create the Glance account.
-        '';
-      };
-
-      keystoneAdminPassword = mkSecretOption {
-        name = "keystoneAdminPassword";
-        description = ''
-          The keystone admin user's password.
-        '';
-      };
-
-      keystoneAdminTenant = mkOption {
-        type = types.str;
-        default = "admin";
-        description = ''
-          The keystone admin tenant used to create the Glance account.
-        '';
-      };
-      keystoneAuthUrl = mkOption {
-        type = types.str;
-        default = "http://localhost:5000/v2.0";
-        description = ''
-          The keystone auth url used to create the Glance account.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    users.extraUsers = [{
-      name = "glance";
-      group = "glance";
-      uid = config.ids.gids.glance;
-
-    }];
-    users.extraGroups = [{
-      name = "glance";
-      gid = config.ids.gids.glance;
-    }];
-
-    systemd.services.glance-registry = {
-      description = "OpenStack Glance Registry Daemon";
-      after = [ "network.target"];
-      path = [ pkgs.curl pkgs.pythonPackages.keystoneclient pkgs.gawk ];
-      wantedBy = [ "multi-user.target" ];
-      preStart = ''
-        mkdir -m 775 -p /var/lib/glance/{images,scrubber,image_cache}
-        chown glance:glance /var/lib/glance/{images,scrubber,image_cache}
-
-        # Secret file managment
-        cp ${glanceRegistryConfTpl} ${glanceRegistryConf};
-        chown glance:glance ${glanceRegistryConf};
-        chmod 640 ${glanceRegistryConf}
-        ${replaceSecret cfg.database.password glanceRegistryConf}
-        ${replaceSecret cfg.servicePassword glanceRegistryConf}
-
-        cp ${glanceApiConfTpl} ${glanceApiConf};
-        chown glance:glance ${glanceApiConf};
-        chmod 640 ${glanceApiConf}
-        ${replaceSecret cfg.database.password glanceApiConf}
-        ${replaceSecret cfg.servicePassword glanceApiConf}
-
-        # Initialise the database
-        ${cfg.package}/bin/glance-manage --config-file=${glanceApiConf} --config-file=${glanceRegistryConf} db_sync
-      '';
-      postStart = ''
-        set -eu
-        export OS_AUTH_URL=${cfg.bootstrap.keystoneAuthUrl}
-        export OS_USERNAME=${cfg.bootstrap.keystoneAdminUsername}
-        export OS_PASSWORD=${getSecret cfg.bootstrap.keystoneAdminPassword}
-        export OS_TENANT_NAME=${cfg.bootstrap.keystoneAdminTenant}
-
-        # Wait until the keystone is available for use
-        count=0
-        while ! keystone user-get ${cfg.bootstrap.keystoneAdminUsername} > /dev/null
-        do
-            if [ $count -eq 30 ]
-            then
-                echo "Tried 30 times, giving up..."
-                exit 1
-            fi
-
-            echo "Keystone not yet started. Waiting for 1 second..."
-            count=$((count++))
-            sleep 1
-        done
-
-        # If the service glance doesn't exist, we consider glance is
-        # not initialized
-        if ! keystone service-get glance
-        then
-            keystone service-create --type image --name glance
-            ID=$(keystone service-get glance | awk '/ id / { print $4 }')
-            keystone endpoint-create --region RegionOne --service $ID --internalurl http://localhost:9292 --adminurl http://localhost:9292 --publicurl ${cfg.bootstrap.endpointPublic}
-
-            keystone user-create --name ${cfg.serviceUsername} --tenant service --pass ${getSecret cfg.servicePassword}
-            keystone user-role-add --tenant service --user ${cfg.serviceUsername} --role admin
-        fi
-        '';
-      serviceConfig = {
-        PermissionsStartOnly = true; # preStart must be run as root
-        TimeoutStartSec = "600"; # 10min for initial db migrations
-        User = "glance";
-        Group = "glance";
-        ExecStart = "${cfg.package}/bin/glance-registry --config-file=${glanceRegistryConf}";
-      };
-    };
-    systemd.services.glance-api = {
-      description = "OpenStack Glance API Daemon";
-      after = [ "glance-registry.service" "network.target"];
-      requires = [ "glance-registry.service" "network.target"]; 
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig = {
-        PermissionsStartOnly = true; # preStart must be run as root
-        User = "glance";
-        Group = "glance";
-        ExecStart = "${cfg.package}/bin/glance-api --config-file=${glanceApiConf}";
-      };
-    };
-  };
-
-}
diff --git a/nixos/modules/virtualisation/openstack/keystone.nix b/nixos/modules/virtualisation/openstack/keystone.nix
deleted file mode 100644
index e32c5a4cae1b..000000000000
--- a/nixos/modules/virtualisation/openstack/keystone.nix
+++ /dev/null
@@ -1,220 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib; with import ./common.nix {inherit lib;};
-
-let
-  cfg = config.virtualisation.openstack.keystone;
-  keystoneConfTpl = pkgs.writeText "keystone.conf" ''
-    [DEFAULT]
-    admin_token = ${cfg.adminToken.pattern}
-    policy_file=${cfg.package}/etc/policy.json
-
-    [database]
-
-    connection = "mysql://${cfg.database.user}:${cfg.database.password.pattern}@${cfg.database.host}/${cfg.database.name}"
-
-    [paste_deploy]
-    config_file = ${cfg.package}/etc/keystone-paste.ini
-
-    ${cfg.extraConfig}
-  '';
-  keystoneConf = "/var/lib/keystone/keystone.conf";
-
-in {
-  options.virtualisation.openstack.keystone = {
-    package = mkOption {
-      type = types.package;
-      example = literalExample "pkgs.keystone";
-      description = ''
-        Keystone package to use.
-      '';
-    };
-
-    enable = mkOption {
-      default = false;
-      type = types.bool;
-      description = ''
-        Enable Keystone, the OpenStack Identity Service
-      '';
-    };
-
-    extraConfig = mkOption {
-      default = "";
-      type = types.lines;
-      description = ''
-        Additional text appended to <filename>keystone.conf</filename>,
-        the main Keystone configuration file.
-      '';
-    };
-
-    adminToken = mkSecretOption {
-      name = "adminToken";
-      description = ''
-        This is the admin token used to boostrap keystone,
-        ie. to provision first resources.
-      '';
-    };
-
-    bootstrap = {
-      enable = mkOption {
-        default = false;
-        type = types.bool;
-        description = ''
-          Bootstrap the Keystone service by creating the service
-          tenant, an admin account and a public endpoint. This options
-          provides a ready-to-use admin account. This is only done at
-          the first Keystone execution by the systemd post start.
-
-          Note this option is a helper for setting up development or
-          testing environments.
-        '';
-      };
-
-      endpointPublic = mkOption {
-        type = types.str;
-        default = "http://localhost:5000/v2.0";
-        description = ''
-          The public identity endpoint. The link <link
-          xlink:href="http://docs.openstack.org/liberty/install-guide-rdo/keystone-services.html">
-          create keystone endpoint</link> provides more informations
-          about that.
-        '';
-      };
-
-      adminUsername = mkOption {
-        type = types.str;
-        default = "admin";
-        description = ''
-          A keystone admin username.
-        '';
-      };
-
-      adminPassword = mkSecretOption {
-        name = "keystoneAdminPassword";
-        description = ''
-          The keystone admin user's password.
-        '';
-      };
-
-      adminTenant = mkOption {
-        type = types.str;
-        default = "admin";
-        description = ''
-          A keystone admin tenant name.
-        '';
-      };
-    };
-
-    database = {
-      host = mkOption {
-        type = types.str;
-        default = "localhost";
-        description = ''
-          Host of the database.
-        '';
-      };
-
-      name = mkOption {
-        type = types.str;
-        default = "keystone";
-        description = ''
-          Name of the existing database.
-        '';
-      };
-
-      user = mkOption {
-        type = types.str;
-        default = "keystone";
-        description = ''
-          The database user. The user must exist and has access to
-          the specified database.
-        '';
-      };
-      password = mkSecretOption {
-        name = "mysqlPassword";
-        description = "The database user's password";};
-    };
-  };
-
-  config = mkIf cfg.enable {
-    # Note: when changing the default, make it conditional on
-    # ‘system.stateVersion’ to maintain compatibility with existing
-    # systems!
-    virtualisation.openstack.keystone.package = mkDefault pkgs.keystone;
-
-    users.extraUsers = [{
-      name = "keystone";
-      group = "keystone";
-      uid = config.ids.uids.keystone;
-    }];
-    users.extraGroups = [{
-      name = "keystone";
-      gid = config.ids.gids.keystone;
-    }];
-
-    systemd.services.keystone-all = {
-        description = "OpenStack Keystone Daemon";
-        after = [ "network.target"];
-        path = [ cfg.package pkgs.mysql pkgs.curl pkgs.pythonPackages.keystoneclient pkgs.gawk ];
-        wantedBy = [ "multi-user.target" ];
-        preStart = ''
-          mkdir -m 755 -p /var/lib/keystone
-
-          cp ${keystoneConfTpl} ${keystoneConf};
-          chown keystone:keystone ${keystoneConf};
-          chmod 640 ${keystoneConf}
-
-          ${replaceSecret cfg.database.password keystoneConf}
-          ${replaceSecret cfg.adminToken keystoneConf}
-
-          # Initialise the database
-          ${cfg.package}/bin/keystone-manage --config-file=${keystoneConf} db_sync
-          # Set up the keystone's PKI infrastructure
-          ${cfg.package}/bin/keystone-manage --config-file=${keystoneConf} pki_setup --keystone-user keystone --keystone-group keystone
-        '';
-        postStart = optionalString cfg.bootstrap.enable ''
-          set -eu
-          # Wait until the keystone is available for use
-          count=0
-          while ! curl --fail -s  http://localhost:35357/v2.0 > /dev/null 
-          do
-              if [ $count -eq 30 ]
-              then
-                  echo "Tried 30 times, giving up..."
-                  exit 1
-              fi
-
-              echo "Keystone not yet started. Waiting for 1 second..."
-              count=$((count++))
-              sleep 1
-          done
-
-          # We use the service token to create a first admin user
-          export OS_SERVICE_ENDPOINT=http://localhost:35357/v2.0
-          export OS_SERVICE_TOKEN=${getSecret cfg.adminToken}
-
-          # If the tenant service doesn't exist, we consider
-          # keystone is not initialized
-          if ! keystone tenant-get service
-          then
-              keystone tenant-create --name service
-              keystone tenant-create --name ${cfg.bootstrap.adminTenant}
-              keystone user-create --name ${cfg.bootstrap.adminUsername} --tenant ${cfg.bootstrap.adminTenant} --pass ${getSecret cfg.bootstrap.adminPassword}
-              keystone role-create --name admin
-              keystone role-create --name Member
-              keystone user-role-add --tenant ${cfg.bootstrap.adminTenant} --user ${cfg.bootstrap.adminUsername} --role admin
-              keystone service-create --type identity --name keystone
-              ID=$(keystone service-get keystone | awk '/ id / { print $4 }')
-              keystone endpoint-create --region RegionOne --service $ID --publicurl ${cfg.bootstrap.endpointPublic} --adminurl http://localhost:35357/v2.0 --internalurl http://localhost:5000/v2.0
-          fi
-        '';
-        serviceConfig = {
-          PermissionsStartOnly = true; # preStart must be run as root
-          TimeoutStartSec = "600"; # 10min for initial db migrations
-          User = "keystone";
-          Group = "keystone";
-          ExecStart = "${cfg.package}/bin/keystone-all --config-file=${keystoneConf}";
-        };
-      };
-  };
-}
diff --git a/nixos/modules/virtualisation/openvswitch.nix b/nixos/modules/virtualisation/openvswitch.nix
index 4218a3840fc1..38b138e06326 100644
--- a/nixos/modules/virtualisation/openvswitch.nix
+++ b/nixos/modules/virtualisation/openvswitch.nix
@@ -169,7 +169,7 @@ in {
         mkdir -p ${runDir}/ipsec/{etc/racoon,etc/init.d/,usr/sbin/}
         ln -fs ${pkgs.ipsecTools}/bin/setkey ${runDir}/ipsec/usr/sbin/setkey
         ln -fs ${pkgs.writeScript "racoon-restart" ''
-        #!${pkgs.stdenv.shell}
+        #!${pkgs.runtimeShell}
         /var/run/current-system/sw/bin/systemctl $1 racoon
         ''} ${runDir}/ipsec/etc/init.d/racoon
       '';
diff --git a/nixos/modules/virtualisation/parallels-guest.nix b/nixos/modules/virtualisation/parallels-guest.nix
index bd85973ee561..36ca7f356d44 100644
--- a/nixos/modules/virtualisation/parallels-guest.nix
+++ b/nixos/modules/virtualisation/parallels-guest.nix
@@ -3,9 +3,7 @@
 with lib;
 
 let
-
-  prl-tools = config.boot.kernelPackages.prl-tools;
-
+  prl-tools = config.hardware.parallels.package;
 in
 
 {
@@ -22,12 +20,31 @@ in
         '';
       };
 
+      autoMountShares = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Control prlfsmountd service. When this service is running, shares can not be manually
+          mounted through `mount -t prl_fs ...` as this service will remount and trample any set options.
+          Recommended to enable for simple file sharing, but extended share use such as for code should
+          disable this to manually mount shares.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = config.boot.kernelPackages.prl-tools;
+        defaultText = "config.boot.kernelPackages.prl-tools";
+        example = literalExample "config.boot.kernelPackages.prl-tools";
+        description = ''
+          Defines which package to use for prl-tools. Override to change the version.
+        '';
+      };
     };
 
   };
 
   config = mkIf config.hardware.parallels.enable {
-
     services.xserver = {
       drivers = singleton
         { name = "prlvideo"; modules = [ prl-tools ]; libPath = [ prl-tools ]; };
@@ -55,7 +72,7 @@ in
 
     boot.extraModulePackages = [ prl-tools ];
 
-    boot.kernelModules = [ "prl_tg" "prl_eth" "prl_fs" "prl_fs_freeze" "acpi_memhotplug" ];
+    boot.kernelModules = [ "prl_tg" "prl_eth" "prl_fs" "prl_fs_freeze" ];
 
     services.timesyncd.enable = false;
 
@@ -68,7 +85,7 @@ in
       };
     };
 
-    systemd.services.prlfsmountd = {
+    systemd.services.prlfsmountd = mkIf config.hardware.parallels.autoMountShares {
       description = "Parallels Shared Folders Daemon";
       wantedBy = [ "multi-user.target" ];
       serviceConfig = rec {
@@ -89,5 +106,50 @@ in
       };
     };
 
+    systemd.user.services = {
+      prlcc = {
+        description = "Parallels Control Center";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcc";
+        };
+      };
+      prldnd = {
+        description = "Parallels Control Center";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prldnd";
+        };
+      };
+      prl_wmouse_d  = {
+        description = "Parallels Walking Mouse Daemon";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prl_wmouse_d";
+        };
+      };
+      prlcp = {
+        description = "Parallels CopyPaste Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcp";
+        };
+      };
+      prlsga = {
+        description = "Parallels Shared Guest Applications Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlsga";
+        };
+      };
+      prlshprof = {
+        description = "Parallels Shared Profile Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlshprof";
+        };
+      };
+    };
+
   };
 }
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index 3c89ca68113b..45325c6b0d8d 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -10,6 +10,7 @@
 { config, lib, pkgs, ... }:
 
 with lib;
+with import ../../lib/qemu-flags.nix { inherit pkgs; };
 
 let
 
@@ -23,13 +24,13 @@ let
   cfg = config.virtualisation;
 
   qemuGraphics = if cfg.graphics then "" else "-nographic";
-  kernelConsole = if cfg.graphics then "" else "console=ttyS0";
+  kernelConsole = if cfg.graphics then "" else "console=${qemuSerialDevice}";
   ttys = [ "tty1" "tty2" "tty3" "tty4" "tty5" "tty6" ];
 
   # Shell script to start the VM.
   startVM =
     ''
-      #! ${pkgs.stdenv.shell}
+      #! ${pkgs.runtimeShell}
 
       NIX_DISK_IMAGE=$(readlink -f ''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}})
 
@@ -72,11 +73,10 @@ let
       '')}
 
       # Start QEMU.
-      exec ${qemu}/bin/qemu-kvm \
+      exec ${qemuBinary qemu} \
           -name ${vmName} \
           -m ${toString config.virtualisation.memorySize} \
           -smp ${toString config.virtualisation.cores} \
-          ${optionalString (pkgs.stdenv.system == "x86_64-linux") "-cpu kvm64"} \
           ${concatStringsSep " " config.virtualisation.qemu.networkingOptions} \
           -virtfs local,path=/nix/store,security_model=none,mount_tag=store \
           -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \
@@ -98,7 +98,7 @@ let
           ${qemuGraphics} \
           ${toString config.virtualisation.qemu.options} \
           $QEMU_OPTS \
-          $@
+          "$@"
     '';
 
 
@@ -319,8 +319,8 @@ in
       networkingOptions =
         mkOption {
           default = [
-            "-net nic,vlan=0,model=virtio"
-            "-net user,vlan=0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
+            "-net nic,netdev=user.0,model=virtio"
+            "-netdev user,id=user.0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
           ];
           type = types.listOf types.str;
           description = ''
@@ -434,7 +434,11 @@ in
 
     virtualisation.pathsInNixDB = [ config.system.build.toplevel ];
 
-    virtualisation.qemu.options = [ "-vga std" "-usbdevice tablet" ];
+    # FIXME: Consolidate this one day.
+    virtualisation.qemu.options = mkMerge [
+      (mkIf (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) [ "-vga std" "-usb" "-device usb-tablet,bus=usb-bus.0" ])
+      (mkIf (pkgs.stdenv.isArm || pkgs.stdenv.isAarch64) [ "-device virtio-gpu-pci" "-device usb-ehci,id=usb0" "-device usb-kbd" "-device usb-tablet" ])
+    ];
 
     # Mount the host filesystem via 9P, and bind-mount the Nix store
     # of the host into our own filesystem.  We use mkVMOverride to
diff --git a/nixos/modules/virtualisation/virtualbox-host.nix b/nixos/modules/virtualisation/virtualbox-host.nix
index bb0c38bd4eb8..7413e12c8f3d 100644
--- a/nixos/modules/virtualisation/virtualbox-host.nix
+++ b/nixos/modules/virtualisation/virtualbox-host.nix
@@ -124,7 +124,7 @@ in
           '';
       };
 
-    networking.interfaces.vboxnet0.ip4 = [ { address = "192.168.56.1"; prefixLength = 24; } ];
+    networking.interfaces.vboxnet0.ipv4.addresses = [{ address = "192.168.56.1"; prefixLength = 24; }];
     # Make sure NetworkManager won't assume this interface being up
     # means we have internet access.
     networking.networkmanager.unmanaged = ["vboxnet0"];
diff --git a/nixos/modules/virtualisation/virtualbox-image.nix b/nixos/modules/virtualisation/virtualbox-image.nix
index d68b3bb73904..64f145f77ca3 100644
--- a/nixos/modules/virtualisation/virtualbox-image.nix
+++ b/nixos/modules/virtualisation/virtualbox-image.nix
@@ -8,8 +8,6 @@ let
 
 in {
 
-  imports = [ ./grow-partition.nix ];
-
   options = {
     virtualbox = {
       baseImageSize = mkOption {
@@ -23,12 +21,11 @@ in {
   };
 
   config = {
-
     system.build.virtualBoxOVA = import ../../lib/make-disk-image.nix {
-      name = "nixos-ova-${config.system.nixosLabel}-${pkgs.stdenv.system}";
+      name = "nixos-ova-${config.system.nixos.label}-${pkgs.stdenv.system}";
 
       inherit pkgs lib config;
-      partitioned = true;
+      partitionTableType = "legacy";
       diskSize = cfg.baseImageSize;
 
       postVM =
@@ -40,7 +37,7 @@ in {
           VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage
 
           echo "creating VirtualBox VM..."
-          vmName="NixOS ${config.system.nixosLabel} (${pkgs.stdenv.system})"
+          vmName="NixOS ${config.system.nixos.label} (${pkgs.stdenv.system})"
           VBoxManage createvm --name "$vmName" --register \
             --ostype ${if pkgs.stdenv.system == "x86_64-linux" then "Linux26_64" else "Linux26"}
           VBoxManage modifyvm "$vmName" \
@@ -56,7 +53,7 @@ in {
 
           echo "exporting VirtualBox VM..."
           mkdir -p $out
-          fn="$out/nixos-${config.system.nixosLabel}-${pkgs.stdenv.system}.ova"
+          fn="$out/nixos-${config.system.nixos.label}-${pkgs.stdenv.system}.ova"
           VBoxManage export "$vmName" --output "$fn"
 
           rm -v $diskImage
@@ -71,6 +68,7 @@ in {
       autoResize = true;
     };
 
+    boot.growPartition = true;
     boot.loader.grub.device = "/dev/sda";
 
     virtualisation.virtualbox.guest.enable = true;
diff --git a/nixos/modules/virtualisation/xen-dom0.nix b/nixos/modules/virtualisation/xen-dom0.nix
index c7656bc309c0..cf57868acef9 100644
--- a/nixos/modules/virtualisation/xen-dom0.nix
+++ b/nixos/modules/virtualisation/xen-dom0.nix
@@ -35,24 +35,19 @@ in
       description = ''
         The package used for Xen binary.
       '';
+      relatedPackages = [ "xen" "xen-light" ];
     };
 
-    virtualisation.xen.qemu = mkOption {
-      type = types.path;
-      defaultText = "\${pkgs.xen}/lib/xen/bin/qemu-system-i386";
-      example = literalExample "''${pkgs.qemu_xen-light}/bin/qemu-system-i386";
-      description = ''
-        The qemu binary to use for Dom-0 backend.
-      '';
-    };
-
-    virtualisation.xen.qemu-package = mkOption {
+    virtualisation.xen.package-qemu = mkOption {
       type = types.package;
       defaultText = "pkgs.xen";
       example = literalExample "pkgs.qemu_xen-light";
       description = ''
-        The package with qemu binaries for xendomains.
+        The package with qemu binaries for dom0 qemu and xendomains.
       '';
+      relatedPackages = [ "xen"
+                          { name = "qemu_xen-light"; comment = "For use with pkgs.xen-light."; }
+                        ];
     };
 
     virtualisation.xen.bootParams =
@@ -158,8 +153,7 @@ in
     } ];
 
     virtualisation.xen.package = mkDefault pkgs.xen;
-    virtualisation.xen.qemu = mkDefault "${pkgs.xen}/lib/xen/bin/qemu-system-i386";
-    virtualisation.xen.qemu-package = mkDefault pkgs.xen;
+    virtualisation.xen.package-qemu = mkDefault pkgs.xen;
     virtualisation.xen.stored = mkDefault "${cfg.package}/bin/oxenstored";
 
     environment.systemPackages = [ cfg.package ];
@@ -247,6 +241,12 @@ in
           '';
           target = "default/xendomains";
         }
+      ]
+      ++ lib.optionals (builtins.compareVersions cfg.package.version "4.10" >= 0) [
+        # in V 4.10 oxenstored requires /etc/xen/oxenstored.conf to start
+        { source = "${cfg.package}/etc/xen/oxenstored.conf";
+          target = "xen/oxenstored.conf";
+        }
       ];
 
     # Xen provides udev rules.
@@ -268,7 +268,7 @@ in
         mkdir -p /var/lib/xen # so we create them here unconditionally.
         grep -q control_d /proc/xen/capabilities
         '';
-      serviceConfig = if cfg.package.version < "4.8" then
+      serviceConfig = if (builtins.compareVersions cfg.package.version "4.8" < 0) then
         { ExecStart = ''
             ${cfg.stored}${optionalString cfg.trace " -T /var/log/xen/xenstored-trace.log"} --no-fork
             '';
@@ -281,7 +281,7 @@ in
           NotifyAccess    = "all";
         };
       postStart = ''
-        ${optionalString (cfg.package.version < "4.8") ''
+        ${optionalString (builtins.compareVersions cfg.package.version "4.8" < 0) ''
           time=0
           timeout=30
           # Wait for xenstored to actually come up, timing out after 30 seconds
@@ -326,7 +326,7 @@ in
       serviceConfig = {
         ExecStart = ''
           ${cfg.package}/bin/xenconsoled\
-            ${optionalString ((cfg.package.version >= "4.8")) " -i"}\
+            ${optionalString ((builtins.compareVersions cfg.package.version "4.8" >= 0)) " -i"}\
             ${optionalString cfg.trace " --log=all --log-dir=/var/log/xen"}
           '';
       };
@@ -339,7 +339,8 @@ in
       after = [ "xen-console.service" ];
       requires = [ "xen-store.service" ];
       serviceConfig.ExecStart = ''
-        ${cfg.qemu} -xen-attach -xen-domid 0 -name dom0 -M xenpv \
+        ${cfg.package-qemu}/${cfg.package-qemu.qemu-system-i386} \
+           -xen-attach -xen-domid 0 -name dom0 -M xenpv \
            -nographic -monitor /dev/null -serial /dev/null -parallel /dev/null
         '';
     };
@@ -448,7 +449,7 @@ in
       before = [ "dhcpd.service" ];
       restartIfChanged = false;
       serviceConfig.RemainAfterExit = "yes";
-      path = [ cfg.package cfg.qemu-package ];
+      path = [ cfg.package cfg.package-qemu ];
       environment.XENDOM_CONFIG = "${cfg.package}/etc/sysconfig/xendomains";
       preStart = "mkdir -p /var/lock/subsys -m 755";
       serviceConfig.ExecStart = "${cfg.package}/etc/init.d/xendomains start";