about summary refs log tree commit diff
path: root/nixpkgs/nixos/maintainers
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/maintainers')
-rw-r--r--nixpkgs/nixos/maintainers/option-usages.nix192
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/.gitignore1
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/README.md42
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure-new/boot-vm.sh36
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/common.sh7
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/image.nix10
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/system.nix33
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/shell.nix13
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure-new/upload-image.sh58
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure/create-azure.sh8
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh22
-rw-r--r--nixpkgs/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix20
-rw-r--r--nixpkgs/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix12
-rw-r--r--nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix160
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh368
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/gce/create-gce.sh35
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix20
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image.nix31
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix20
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix31
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/nix.tpl7
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/oci/create-image.sh24
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/oci/upload-image.sh100
-rw-r--r--nixpkgs/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix107
-rw-r--r--nixpkgs/nixos/maintainers/scripts/openstack/openstack-image.nix27
25 files changed, 1384 insertions, 0 deletions
diff --git a/nixpkgs/nixos/maintainers/option-usages.nix b/nixpkgs/nixos/maintainers/option-usages.nix
new file mode 100644
index 000000000000..e9bafa21a58a
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/option-usages.nix
@@ -0,0 +1,192 @@
+{ configuration ? import ../lib/from-env.nix "NIXOS_CONFIG" <nixos-config>
+
+# provide an option name, as a string literal.
+, testOption ? null
+
+# provide a list of option names, as string literals.
+, testOptions ? [ ]
+}:
+
+# This file is made to be used as follow:
+#
+#   $ nix-instantiate ./option-usages.nix --argstr testOption service.xserver.enable -A txtContent --eval
+#
+# or
+#
+#   $ nix-build ./option-usages.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
+#
+# Other targets exists such as `dotContent`, `dot`, and `pdf`.  If you are
+# looking for the option usage of multiple options, you can provide a list
+# as argument.
+#
+#   $ nix-build ./option-usages.nix --arg testOptions \
+#      '["boot.loader.gummiboot.enable" "boot.loader.gummiboot.timeout"]' \
+#      -A txt -o gummiboot.list
+#
+# Note, this script is slow as it has to evaluate all options of the system
+# once per queried option.
+#
+# This nix expression works by doing a first evaluation, which evaluates the
+# result of every option.
+#
+# Then, for each queried option, we evaluate the NixOS modules a second
+# time, except that we replace the `config` argument of all the modules with
+# the result of the original evaluation, except for the tested option which
+# value is replaced by a `throw` statement which is caught by the `tryEval`
+# evaluation of each option value.
+#
+# We then compare the result of the evaluation of the original module, with
+# the result of the second evaluation, and consider that the new failures are
+# caused by our mutation of the `config` argument.
+#
+# Doing so returns all option results which are directly using the
+# tested option result.
+
+with import ../../lib;
+
+let
+
+  evalFun = {
+    specialArgs ? {}
+  }: import ../lib/eval-config.nix {
+       modules = [ configuration ];
+       inherit specialArgs;
+     };
+
+  eval = evalFun {};
+  inherit (eval) pkgs;
+
+  excludedTestOptions = [
+    # We cannot evluate _module.args, as it is used during the computation
+    # of the modules list.
+    "_module.args"
+
+    # For some reasons which we yet have to investigate, some options cannot
+    # be replaced by a throw without causing a non-catchable failure.
+    "networking.bonds"
+    "networking.bridges"
+    "networking.interfaces"
+    "networking.macvlans"
+    "networking.sits"
+    "networking.vlans"
+    "services.openssh.startWhenNeeded"
+  ];
+
+  # for some reasons which we yet have to investigate, some options are
+  # time-consuming to compute, thus we filter them out at the moment.
+  excludedOptions = [
+    "boot.systemd.services"
+    "systemd.services"
+    "kde.extraPackages"
+  ];
+  excludeOptions = list:
+    filter (opt: !(elem (showOption opt.loc) excludedOptions)) list;
+
+
+  reportNewFailures = old: new:
+    let
+      filterChanges =
+        filter ({fst, snd}:
+          !(fst.success -> snd.success)
+        );
+
+      keepNames =
+        map ({fst, snd}:
+          /* assert fst.name == snd.name; */ snd.name
+        );
+
+      # Use  tryEval (strict ...)  to know if there is any failure while
+      # evaluating the option value.
+      #
+      # Note, the `strict` function is not strict enough, but using toXML
+      # builtins multiply by 4 the memory usage and the time used to compute
+      # each options.
+      tryCollectOptions = moduleResult:
+        forEach (excludeOptions (collect isOption moduleResult)) (opt:
+          { name = showOption opt.loc; } // builtins.tryEval (strict opt.value));
+     in
+       keepNames (
+         filterChanges (
+           zipLists (tryCollectOptions old) (tryCollectOptions new)
+         )
+       );
+
+
+  # Create a list of modules where each module contains only one failling
+  # options.
+  introspectionModules =
+    let
+      setIntrospection = opt: rec {
+        name = showOption opt.loc;
+        path = opt.loc;
+        config = setAttrByPath path
+          (throw "Usage introspection of '${name}' by forced failure.");
+      };
+    in
+      map setIntrospection (collect isOption eval.options);
+
+  overrideConfig = thrower:
+    recursiveUpdateUntil (path: old: new:
+      path == thrower.path
+    ) eval.config thrower.config;
+
+
+  graph =
+    map (thrower: {
+      option = thrower.name;
+      usedBy = assert __trace "Investigate ${thrower.name}" true;
+        reportNewFailures eval.options (evalFun {
+          specialArgs = {
+            config = overrideConfig thrower;
+          };
+        }).options;
+    }) introspectionModules;
+
+  displayOptionsGraph =
+     let
+       checkList =
+         if testOption != null then [ testOption ]
+         else testOptions;
+       checkAll = checkList == [];
+     in
+       flip filter graph ({option, ...}:
+         (checkAll || elem option checkList)
+         && !(elem option excludedTestOptions)
+       );
+
+  graphToDot = graph: ''
+    digraph "Option Usages" {
+      ${concatMapStrings ({option, usedBy}:
+          concatMapStrings (user: ''
+            "${option}" -> "${user}"''
+          ) usedBy
+        ) displayOptionsGraph}
+    }
+  '';
+
+  graphToText = graph:
+    concatMapStrings ({usedBy, ...}:
+        concatMapStrings (user: ''
+          ${user}
+        '') usedBy
+      ) displayOptionsGraph;
+
+in
+
+rec {
+  dotContent = graphToDot graph;
+  dot = pkgs.writeTextFile {
+    name = "option_usages.dot";
+    text = dotContent;
+  };
+
+  pdf = pkgs.texFunctions.dot2pdf {
+    dotGraph = dot;
+  };
+
+  txtContent = graphToText graph;
+  txt = pkgs.writeTextFile {
+    name = "option_usages.txt";
+    text = txtContent;
+  };
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/.gitignore b/nixpkgs/nixos/maintainers/scripts/azure-new/.gitignore
new file mode 100644
index 000000000000..9271abf14a0f
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/.gitignore
@@ -0,0 +1 @@
+azure
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/README.md b/nixpkgs/nixos/maintainers/scripts/azure-new/README.md
new file mode 100644
index 000000000000..e5b69dacec08
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/README.md
@@ -0,0 +1,42 @@
+# azure
+
+## Demo
+
+Here's a demo of this being used: https://asciinema.org/a/euXb9dIeUybE3VkstLWLbvhmp
+
+## Usage
+
+This is meant to be an example image that you can copy into your own
+project and modify to your own needs. Notice that the example image
+includes a built-in test user account, which by default uses your
+`~/.ssh/id_ed25519.pub` as an `authorized_key`.
+
+Build and upload the image
+```shell
+$ ./upload-image.sh ./examples/basic/image.nix
+
+...
++ attr=azbasic
++ nix-build ./examples/basic/image.nix --out-link azure
+/nix/store/qdpzknpskzw30vba92mb24xzll1dqsmd-azure-image
+...
+95.5 %, 0 Done, 0 Failed, 1 Pending, 0 Skipped, 1 Total, 2-sec Throughput (Mb/s): 932.9565
+...
+/subscriptions/aff271ee-e9be-4441-b9bb-42f5af4cbaeb/resourceGroups/nixos-images/providers/Microsoft.Compute/images/azure-image-todo-makethisbetter
+```
+
+Take the output, boot an Azure VM:
+
+```
+img="/subscriptions/.../..." # use output from last command
+./boot-vm.sh "${img}"
+...
+=> booted
+```
+
+## Future Work
+
+1. If the user specifies a hard-coded user, then the agent could be removed.
+   Probably has security benefits; definitely has closure-size benefits.
+   (It's likely the VM will need to be booted with a special flag. See:
+   https://github.com/Azure/azure-cli/issues/12775 for details.)
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/boot-vm.sh b/nixpkgs/nixos/maintainers/scripts/azure-new/boot-vm.sh
new file mode 100755
index 000000000000..1ce3a5f9db1e
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/boot-vm.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+set -euo pipefail
+set -x
+
+image="${1}"
+location="westus2"
+group="nixos-test-vm"
+vm_size="Standard_D2s_v3";  os_size=42;
+
+# ensure group
+az group create --location "westus2" --name "${group}"
+group_id="$(az group show --name "${group}" -o tsv --query "[id]")"
+
+# (optional) identity
+if ! az identity show -n "${group}-identity" -g "${group}" &>/dev/stderr; then
+  az identity create --name "${group}-identity" --resource-group "${group}"
+fi
+
+# (optional) role assignment, to the resource group, bad but not really great alternatives
+identity_id="$(az identity show --name "${group}-identity" --resource-group "${group}" -o tsv --query "[id]")"
+principal_id="$(az identity show --name "${group}-identity" --resource-group "${group}" -o tsv --query "[principalId]")"
+until az role assignment create --assignee "${principal_id}" --role "Owner" --scope "${group_id}"; do sleep 1; done
+
+# boot vm
+az vm create \
+  --name "${group}-vm" \
+  --resource-group "${group}" \
+  --assign-identity "${identity_id}" \
+  --size "${vm_size}" \
+  --os-disk-size-gb "${os_size}" \
+  --image "${image}" \
+  --admin-username "${USER}" \
+  --location "westus2" \
+  --storage-sku "Premium_LRS" \
+  --ssh-key-values "$(ssh-add -L)"
+
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/common.sh b/nixpkgs/nixos/maintainers/scripts/azure-new/common.sh
new file mode 100644
index 000000000000..eb87c3e06501
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/common.sh
@@ -0,0 +1,7 @@
+export group="${AZURE_RESOURCE_GROUP:-"azure"}"
+export location="${AZURE_LOCATION:-"westus2"}"
+
+img_file=$(echo azure/*.vhd)
+img_name="$(basename "${img_file}")"
+img_name="${img_name%".vhd"}"
+export img_name="${img_name//[._]/-}"
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/image.nix b/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/image.nix
new file mode 100644
index 000000000000..310eba3621a6
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/image.nix
@@ -0,0 +1,10 @@
+let
+  pkgs = (import ../../../../../../default.nix {});
+  machine = import (pkgs.path + "/nixos/lib/eval-config.nix") {
+    system = "x86_64-linux";
+    modules = [
+      ({config, ...}: { imports = [ ./system.nix ]; })
+    ];
+  };
+in
+  machine.config.system.build.azureImage
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/system.nix b/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/system.nix
new file mode 100644
index 000000000000..d1044802e1f0
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/system.nix
@@ -0,0 +1,33 @@
+{ pkgs, modulesPath, ... }:
+
+let username = "azurenixosuser";
+in
+{
+  imports = [
+    "${modulesPath}/virtualisation/azure-common.nix"
+    "${modulesPath}/virtualisation/azure-image.nix"
+  ];
+
+  ## NOTE: This is just an example of how to hard-code a user.
+  ## The normal Azure agent IS included and DOES provision a user based
+  ## on the information passed at VM creation time.
+  users.users."${username}" = {
+    isNormalUser = true;
+    home = "/home/${username}";
+    description = "Azure NixOS Test User";
+    openssh.authorizedKeys.keys = [ (builtins.readFile ~/.ssh/id_ed25519.pub) ];
+  };
+  nix.settings.trusted-users = [ username ];
+
+  virtualisation.azureImage.diskSize = 2500;
+
+  boot.kernelPackages = pkgs.linuxPackages_latest;
+
+  # test user doesn't have a password
+  services.openssh.passwordAuthentication = false;
+  security.sudo.wheelNeedsPassword = false;
+
+  environment.systemPackages = with pkgs; [
+    git file htop wget curl
+  ];
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/shell.nix b/nixpkgs/nixos/maintainers/scripts/azure-new/shell.nix
new file mode 100644
index 000000000000..592f1bf9056e
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/shell.nix
@@ -0,0 +1,13 @@
+with (import ../../../../default.nix {});
+stdenv.mkDerivation {
+  name = "nixcfg-azure-devenv";
+
+  nativeBuildInputs = [
+    azure-cli
+    bash
+    cacert
+    azure-storage-azcopy
+  ];
+
+  AZURE_CONFIG_DIR="/tmp/azure-cli/.azure";
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/upload-image.sh b/nixpkgs/nixos/maintainers/scripts/azure-new/upload-image.sh
new file mode 100755
index 000000000000..143afbd7f962
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/upload-image.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+set -euo pipefail
+set -x
+
+image_nix="${1:-"./examples/basic/image.nix"}"
+
+nix-build "${image_nix}" --out-link "azure"
+
+group="nixos-images"
+location="westus2"
+img_name="nixos-image"
+img_file="$(readlink -f ./azure/disk.vhd)"
+
+if ! az group show -n "${group}" &>/dev/null; then
+  az group create --name "${group}" --location "${location}"
+fi
+
+# note: the disk access token song/dance is tedious
+# but allows us to upload direct to a disk image
+# thereby avoid storage accounts (and naming them) entirely!
+if ! az disk show -g "${group}" -n "${img_name}" &>/dev/null; then
+  bytes="$(stat -c %s ${img_file})"
+  size="30"
+  az disk create \
+    --resource-group "${group}" \
+    --name "${img_name}" \
+    --for-upload true --upload-size-bytes "${bytes}"
+
+  timeout=$(( 60 * 60 )) # disk access token timeout
+  sasurl="$(\
+    az disk grant-access \
+      --access-level Write \
+      --resource-group "${group}" \
+      --name "${img_name}" \
+      --duration-in-seconds ${timeout} \
+        | jq -r '.accessSas'
+  )"
+
+  azcopy copy "${img_file}" "${sasurl}" \
+    --blob-type PageBlob
+
+  az disk revoke-access \
+    --resource-group "${group}" \
+    --name "${img_name}"
+fi
+
+if ! az image show -g "${group}" -n "${img_name}" &>/dev/null; then
+  diskid="$(az disk show -g "${group}" -n "${img_name}" -o json | jq -r .id)"
+
+  az image create \
+    --resource-group "${group}" \
+    --name "${img_name}" \
+    --source "${diskid}" \
+    --os-type "linux" >/dev/null
+fi
+
+imageid="$(az image show -g "${group}" -n "${img_name}" -o json | jq -r .id)"
+echo "${imageid}"
diff --git a/nixpkgs/nixos/maintainers/scripts/azure/create-azure.sh b/nixpkgs/nixos/maintainers/scripts/azure/create-azure.sh
new file mode 100755
index 000000000000..0558f8dfffcb
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure/create-azure.sh
@@ -0,0 +1,8 @@
+#! /bin/sh -eu
+
+export NIX_PATH=nixpkgs=$(dirname $(readlink -f $0))/../../../..
+export NIXOS_CONFIG=$(dirname $(readlink -f $0))/../../../modules/virtualisation/azure-image.nix
+export TIMESTAMP=$(date +%Y%m%d%H%M)
+
+nix-build '<nixpkgs/nixos>' \
+   -A config.system.build.azureImage --argstr system x86_64-linux -o azure -j 10
diff --git a/nixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh b/nixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh
new file mode 100755
index 000000000000..2ea35d1d4c33
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh
@@ -0,0 +1,22 @@
+#! /bin/sh -e
+
+export STORAGE=${STORAGE:-nixos}
+export THREADS=${THREADS:-8}
+
+azure-vhd-utils-for-go  upload --localvhdpath azure/disk.vhd  --stgaccountname "$STORAGE"  --stgaccountkey "$KEY" \
+   --containername images --blobname nixos-unstable-nixops-updated.vhd --parallelism "$THREADS" --overwrite
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/nixpkgs/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix b/nixpkgs/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix
new file mode 100644
index 000000000000..b66ee5d7b9bc
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix
@@ -0,0 +1,20 @@
+# nix-build '<nixpkgs/nixos>' -A config.system.build.cloudstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/cloudstack/cloudstack-image.nix ]; }"
+
+{ config, lib, pkgs, ... }:
+
+{
+  imports =
+    [ ../../../modules/virtualisation/cloudstack-config.nix ];
+
+  system.build.cloudstackImage = import ../../../lib/make-disk-image.nix {
+    inherit lib config pkgs;
+    format = "qcow2";
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        {
+          imports = [ <nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix> ];
+        }
+      '';
+  };
+
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix
new file mode 100644
index 000000000000..32dd96a7cb7e
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix
@@ -0,0 +1,12 @@
+{
+  imports = [ ./amazon-image.nix ];
+  ec2.zfs = {
+    enable = true;
+    datasets = {
+      "tank/system/root".mount = "/";
+      "tank/system/var".mount = "/var";
+      "tank/local/nix".mount = "/nix";
+      "tank/user/home".mount = "/home";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix
new file mode 100644
index 000000000000..d12339bca1f8
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.amazonImage;
+  amiBootMode = if config.ec2.efi then "uefi" else "legacy-bios";
+
+in {
+
+  imports = [ ../../../modules/virtualisation/amazon-image.nix ];
+
+  # Amazon recommends setting this to the highest possible value for a good EBS
+  # experience, which prior to 4.15 was 255.
+  # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html#timeout-nvme-ebs-volumes
+  config.boot.kernelParams =
+    let timeout =
+      if pkgs.lib.versionAtLeast config.boot.kernelPackages.kernel.version "4.15"
+      then "4294967295"
+      else  "255";
+    in [ "nvme_core.io_timeout=${timeout}" ];
+
+  options.amazonImage = {
+    name = mkOption {
+      type = types.str;
+      description = lib.mdDoc "The name of the generated derivation";
+      default = "nixos-amazon-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+    };
+
+    contents = mkOption {
+      example = literalExpression ''
+        [ { source = pkgs.memtest86 + "/memtest.bin";
+            target = "boot/memtest.bin";
+          }
+        ]
+      '';
+      default = [];
+      description = lib.mdDoc ''
+        This option lists files to be copied to fixed locations in the
+        generated image. Glob patterns work.
+      '';
+    };
+
+    sizeMB = mkOption {
+      type = with types; either (enum [ "auto" ]) int;
+      default = 3072;
+      example = 8192;
+      description = lib.mdDoc "The size in MB of the image";
+    };
+
+    format = mkOption {
+      type = types.enum [ "raw" "qcow2" "vpc" ];
+      default = "vpc";
+      description = lib.mdDoc "The image format to output";
+    };
+  };
+
+  config.system.build.amazonImage = let
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        { modulesPath, ... }: {
+          imports = [ "''${modulesPath}/virtualisation/amazon-image.nix" ];
+          ${optionalString config.ec2.efi ''
+            ec2.efi = true;
+          ''}
+          ${optionalString config.ec2.zfs.enable ''
+            ec2.zfs.enable = true;
+            networking.hostId = "${config.networking.hostId}";
+          ''}
+        }
+      '';
+
+    zfsBuilder = import ../../../lib/make-multi-disk-zfs-image.nix {
+      inherit lib config configFile;
+      inherit (cfg) contents format name;
+      pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+
+      includeChannel = true;
+
+      bootSize = 1000; # 1G is the minimum EBS volume
+
+      rootSize = cfg.sizeMB;
+      rootPoolProperties = {
+        ashift = 12;
+        autoexpand = "on";
+      };
+
+      datasets = config.ec2.zfs.datasets;
+
+      postVM = ''
+        extension=''${rootDiskImage##*.}
+        friendlyName=$out/${cfg.name}
+        rootDisk="$friendlyName.root.$extension"
+        bootDisk="$friendlyName.boot.$extension"
+        mv "$rootDiskImage" "$rootDisk"
+        mv "$bootDiskImage" "$bootDisk"
+
+        mkdir -p $out/nix-support
+        echo "file ${cfg.format} $bootDisk" >> $out/nix-support/hydra-build-products
+        echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products
+
+       ${pkgs.jq}/bin/jq -n \
+         --arg system_label ${lib.escapeShellArg config.system.nixos.label} \
+         --arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
+         --arg root_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
+         --arg boot_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$bootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
+         --arg boot_mode "${amiBootMode}" \
+         --arg root "$rootDisk" \
+         --arg boot "$bootDisk" \
+        '{}
+          | .label = $system_label
+          | .boot_mode = $boot_mode
+          | .system = $system
+          | .disks.boot.logical_bytes = $boot_logical_bytes
+          | .disks.boot.file = $boot
+          | .disks.root.logical_bytes = $root_logical_bytes
+          | .disks.root.file = $root
+          ' > $out/nix-support/image-info.json
+      '';
+    };
+
+    extBuilder = import ../../../lib/make-disk-image.nix {
+      inherit lib config configFile;
+
+      inherit (cfg) contents format name;
+      pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+
+      fsType = "ext4";
+      partitionTableType = if config.ec2.efi then "efi" else "legacy+gpt";
+
+      diskSize = cfg.sizeMB;
+
+      postVM = ''
+        extension=''${diskImage##*.}
+        friendlyName=$out/${cfg.name}.$extension
+        mv "$diskImage" "$friendlyName"
+        diskImage=$friendlyName
+
+        mkdir -p $out/nix-support
+        echo "file ${cfg.format} $diskImage" >> $out/nix-support/hydra-build-products
+
+       ${pkgs.jq}/bin/jq -n \
+         --arg system_label ${lib.escapeShellArg config.system.nixos.label} \
+         --arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
+         --arg logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
+         --arg boot_mode "${amiBootMode}" \
+         --arg file "$diskImage" \
+          '{}
+          | .label = $system_label
+          | .boot_mode = $boot_mode
+          | .system = $system
+          | .logical_bytes = $logical_bytes
+          | .file = $file
+          | .disks.root.logical_bytes = $logical_bytes
+          | .disks.root.file = $file
+          ' > $out/nix-support/image-info.json
+      '';
+    };
+  in if config.ec2.zfs.enable then zfsBuilder else extBuilder;
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh b/nixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh
new file mode 100755
index 000000000000..d182c5c2a479
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh
@@ -0,0 +1,368 @@
+#!/usr/bin/env nix-shell
+#!nix-shell -p awscli -p jq -p qemu -i bash
+# shellcheck shell=bash
+#
+# Future Deprecation?
+# This entire thing should probably be replaced with a generic terraform config
+
+# Uploads and registers NixOS images built from the
+# <nixos/release.nix> amazonImage attribute. Images are uploaded and
+# registered via a home region, and then copied to other regions.
+
+# The home region requires an s3 bucket, and an IAM role named "vmimport"
+# (by default) with access to the S3 bucket. The name can be
+# configured with the "service_role_name" variable. Configuration of the
+# vmimport role is documented in
+# https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html
+
+# set -x
+set -euo pipefail
+
+var () { true; }
+
+# configuration
+var ${state_dir:=$HOME/amis/ec2-images}
+var ${home_region:=eu-west-1}
+var ${bucket:=nixos-amis}
+var ${service_role_name:=vmimport}
+
+# Output of the command:
+# $ nix-shell -I nixpkgs=. -p awscli --run 'aws ec2 describe-regions --region us-east-1 --all-regions --query "Regions[].{Name:RegionName}" --output text | sort | sed -e s/^/\ \ /'
+var ${regions:=
+  af-south-1
+  ap-east-1
+  ap-northeast-1
+  ap-northeast-2
+  ap-northeast-3
+  ap-south-1
+  ap-south-2
+  ap-southeast-1
+  ap-southeast-2
+  ap-southeast-3
+  ap-southeast-4
+  ca-central-1
+  eu-central-1
+  eu-central-2
+  eu-north-1
+  eu-south-1
+  eu-south-2
+  eu-west-1
+  eu-west-2
+  eu-west-3
+  il-central-1
+  me-central-1
+  me-south-1
+  sa-east-1
+  us-east-1
+  us-east-2
+  us-west-1
+  us-west-2
+}
+
+regions=($regions)
+
+log() {
+    echo "$@" >&2
+}
+
+if [ "$#" -ne 1 ]; then
+    log "Usage: ./upload-amazon-image.sh IMAGE_OUTPUT"
+    exit 1
+fi
+
+# result of the amazon-image from nixos/release.nix
+store_path=$1
+
+if [ ! -e "$store_path" ]; then
+    log "Store path: $store_path does not exist, fetching..."
+    nix-store --realise "$store_path"
+fi
+
+if [ ! -d "$store_path" ]; then
+    log "store_path: $store_path is not a directory. aborting"
+    exit 1
+fi
+
+read_image_info() {
+    if [ ! -e "$store_path/nix-support/image-info.json" ]; then
+        log "Image missing metadata"
+        exit 1
+    fi
+    jq -r "$1" "$store_path/nix-support/image-info.json"
+}
+
+# We handle a single image per invocation, store all attributes in
+# globals for convenience.
+zfs_disks=$(read_image_info .disks)
+is_zfs_image=
+if jq -e .boot <<< "$zfs_disks"; then
+  is_zfs_image=1
+  zfs_boot=".disks.boot"
+fi
+image_label="$(read_image_info .label)${is_zfs_image:+-ZFS}"
+image_system=$(read_image_info .system)
+image_files=( $(read_image_info ".disks.root.file") )
+
+image_logical_bytes=$(read_image_info "${zfs_boot:-.disks.root}.logical_bytes")
+
+if [[ -n "$is_zfs_image" ]]; then
+  image_files+=( $(read_image_info .disks.boot.file) )
+fi
+
+# Derived attributes
+
+image_logical_gigabytes=$(((image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB
+
+case "$image_system" in
+    aarch64-linux)
+        amazon_arch=arm64
+        ;;
+    x86_64-linux)
+        amazon_arch=x86_64
+        ;;
+    *)
+        log "Unknown system: $image_system"
+        exit 1
+esac
+
+image_name="NixOS-${image_label}-${image_system}"
+image_description="NixOS ${image_label} ${image_system}"
+
+log "Image Details:"
+log " Name: $image_name"
+log " Description: $image_description"
+log " Size (gigabytes): $image_logical_gigabytes"
+log " System: $image_system"
+log " Amazon Arch: $amazon_arch"
+
+read_state() {
+    local state_key=$1
+    local type=$2
+
+    cat "$state_dir/$state_key.$type" 2>/dev/null || true
+}
+
+write_state() {
+    local state_key=$1
+    local type=$2
+    local val=$3
+
+    mkdir -p "$state_dir"
+    echo "$val" > "$state_dir/$state_key.$type"
+}
+
+wait_for_import() {
+    local region=$1
+    local task_id=$2
+    local state snapshot_id
+    log "Waiting for import task $task_id to be completed"
+    while true; do
+        read -r state message snapshot_id < <(
+            aws ec2 describe-import-snapshot-tasks --region "$region" --import-task-ids "$task_id" | \
+                jq -r '.ImportSnapshotTasks[].SnapshotTaskDetail | "\(.Status) \(.StatusMessage) \(.SnapshotId)"'
+        )
+        log " ... state=$state message=$message snapshot_id=$snapshot_id"
+        case "$state" in
+            active)
+                sleep 10
+                ;;
+            completed)
+                echo "$snapshot_id"
+                return
+                ;;
+            *)
+                log "Unexpected snapshot import state: '${state}'"
+                log "Full response: "
+                aws ec2 describe-import-snapshot-tasks --region "$region" --import-task-ids "$task_id" >&2
+                exit 1
+                ;;
+        esac
+    done
+}
+
+wait_for_image() {
+    local region=$1
+    local ami_id=$2
+    local state
+    log "Waiting for image $ami_id to be available"
+
+    while true; do
+        read -r state < <(
+            aws ec2 describe-images --image-ids "$ami_id" --region "$region" | \
+                jq -r ".Images[].State"
+        )
+        log " ... state=$state"
+        case "$state" in
+            pending)
+                sleep 10
+                ;;
+            available)
+                return
+                ;;
+            *)
+                log "Unexpected AMI state: '${state}'"
+                exit 1
+                ;;
+        esac
+    done
+}
+
+
+make_image_public() {
+    local region=$1
+    local ami_id=$2
+
+    wait_for_image "$region" "$ami_id"
+
+    log "Making image $ami_id public"
+
+    aws ec2 modify-image-attribute \
+        --image-id "$ami_id" --region "$region" --launch-permission 'Add={Group=all}' >&2
+}
+
+upload_image() {
+    local region=$1
+
+    for image_file in "${image_files[@]}"; do
+        local aws_path=${image_file#/}
+
+        if [[ -n "$is_zfs_image" ]]; then
+            local suffix=${image_file%.*}
+            suffix=${suffix##*.}
+        fi
+
+        local state_key="$region.$image_label${suffix:+.${suffix}}.$image_system"
+        local task_id
+        task_id=$(read_state "$state_key" task_id)
+        local snapshot_id
+        snapshot_id=$(read_state "$state_key" snapshot_id)
+        local ami_id
+        ami_id=$(read_state "$state_key" ami_id)
+
+        if [ -z "$task_id" ]; then
+            log "Checking for image on S3"
+            if ! aws s3 ls --region "$region" "s3://${bucket}/${aws_path}" >&2; then
+                log "Image missing from aws, uploading"
+                aws s3 cp --region "$region" "$image_file" "s3://${bucket}/${aws_path}" >&2
+            fi
+
+            log "Importing image from S3 path s3://$bucket/$aws_path"
+
+            task_id=$(aws ec2 import-snapshot --role-name "$service_role_name" --disk-container "{
+              \"Description\": \"nixos-image-${image_label}-${image_system}\",
+              \"Format\": \"vhd\",
+              \"UserBucket\": {
+                  \"S3Bucket\": \"$bucket\",
+                  \"S3Key\": \"$aws_path\"
+              }
+            }" --region "$region" | jq -r '.ImportTaskId')
+
+            write_state "$state_key" task_id "$task_id"
+        fi
+
+        if [ -z "$snapshot_id" ]; then
+            snapshot_id=$(wait_for_import "$region" "$task_id")
+            write_state "$state_key" snapshot_id "$snapshot_id"
+        fi
+    done
+
+    if [ -z "$ami_id" ]; then
+        log "Registering snapshot $snapshot_id as AMI"
+
+        local block_device_mappings=(
+            "DeviceName=/dev/xvda,Ebs={SnapshotId=$snapshot_id,VolumeSize=$image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp3}"
+        )
+
+        if [[ -n "$is_zfs_image" ]]; then
+            local root_snapshot_id=$(read_state "$region.$image_label.root.$image_system" snapshot_id)
+
+            local root_image_logical_bytes=$(read_image_info ".disks.root.logical_bytes")
+            local root_image_logical_gigabytes=$(((root_image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB
+
+            block_device_mappings+=(
+                "DeviceName=/dev/xvdb,Ebs={SnapshotId=$root_snapshot_id,VolumeSize=$root_image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp3}"
+            )
+        fi
+
+
+        local extra_flags=(
+            --root-device-name /dev/xvda
+            --sriov-net-support simple
+            --ena-support
+            --virtualization-type hvm
+        )
+
+        block_device_mappings+=("DeviceName=/dev/sdb,VirtualName=ephemeral0")
+        block_device_mappings+=("DeviceName=/dev/sdc,VirtualName=ephemeral1")
+        block_device_mappings+=("DeviceName=/dev/sdd,VirtualName=ephemeral2")
+        block_device_mappings+=("DeviceName=/dev/sde,VirtualName=ephemeral3")
+
+        ami_id=$(
+            aws ec2 register-image \
+                --name "$image_name" \
+                --description "$image_description" \
+                --region "$region" \
+                --architecture $amazon_arch \
+                --block-device-mappings "${block_device_mappings[@]}" \
+                --boot-mode $(read_image_info .boot_mode) \
+                "${extra_flags[@]}" \
+                | jq -r '.ImageId'
+              )
+
+        write_state "$state_key" ami_id "$ami_id"
+    fi
+
+    [[ -v PRIVATE ]] || make_image_public "$region" "$ami_id"
+
+    echo "$ami_id"
+}
+
+copy_to_region() {
+    local region=$1
+    local from_region=$2
+    local from_ami_id=$3
+
+    state_key="$region.$image_label.$image_system"
+    ami_id=$(read_state "$state_key" ami_id)
+
+    if [ -z "$ami_id" ]; then
+        log "Copying $from_ami_id to $region"
+        ami_id=$(
+            aws ec2 copy-image \
+                --region "$region" \
+                --source-region "$from_region" \
+                --source-image-id "$from_ami_id" \
+                --name "$image_name" \
+                --description "$image_description" \
+                | jq -r '.ImageId'
+              )
+
+        write_state "$state_key" ami_id "$ami_id"
+    fi
+
+    [[ -v PRIVATE ]] || make_image_public "$region" "$ami_id"
+
+    echo "$ami_id"
+}
+
+upload_all() {
+    home_image_id=$(upload_image "$home_region")
+    jq -n \
+       --arg key "$home_region.$image_system" \
+       --arg value "$home_image_id" \
+       '$ARGS.named'
+
+    for region in "${regions[@]}"; do
+        if [ "$region" = "$home_region" ]; then
+            continue
+        fi
+        copied_image_id=$(copy_to_region "$region" "$home_region" "$home_image_id")
+
+        jq -n \
+           --arg key "$region.$image_system" \
+           --arg value "$copied_image_id" \
+           '$ARGS.named'
+    done
+}
+
+upload_all | jq --slurp from_entries
diff --git a/nixpkgs/nixos/maintainers/scripts/gce/create-gce.sh b/nixpkgs/nixos/maintainers/scripts/gce/create-gce.sh
new file mode 100755
index 000000000000..0eec4d041108
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/gce/create-gce.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env nix-shell
+#! nix-shell -i bash -p google-cloud-sdk
+
+set -euo pipefail
+
+BUCKET_NAME="${BUCKET_NAME:-nixos-cloud-images}"
+TIMESTAMP="$(date +%Y%m%d%H%M)"
+export TIMESTAMP
+
+nix-build '<nixpkgs/nixos/lib/eval-config.nix>' \
+   -A config.system.build.googleComputeImage \
+   --arg modules "[ <nixpkgs/nixos/modules/virtualisation/google-compute-image.nix> ]" \
+   --argstr system x86_64-linux \
+   -o gce \
+   -j 10
+
+img_path=$(echo gce/*.tar.gz)
+img_name=${IMAGE_NAME:-$(basename "$img_path")}
+img_id=$(echo "$img_name" | sed 's|.raw.tar.gz$||;s|\.|-|g;s|_|-|g')
+img_family=$(echo "$img_id" | cut -d - -f1-4)
+
+if ! gsutil ls "gs://${BUCKET_NAME}/$img_name"; then
+  gsutil cp "$img_path" "gs://${BUCKET_NAME}/$img_name"
+  gsutil acl ch -u AllUsers:R "gs://${BUCKET_NAME}/$img_name"
+
+  gcloud compute images create \
+    "$img_id" \
+    --source-uri "gs://${BUCKET_NAME}/$img_name" \
+    --family="$img_family"
+
+  gcloud compute images add-iam-policy-binding \
+    "$img_id" \
+    --member='allAuthenticatedUsers' \
+    --role='roles/compute.imageUser'
+fi
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix
new file mode 100644
index 000000000000..ef00c6f86cbd
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix
@@ -0,0 +1,20 @@
+# Edit this configuration file to define what should be installed on
+# your system.  Help is available in the configuration.nix(5) man page
+# and in the NixOS manual (accessible by running ‘nixos-help’).
+
+{ config, pkgs, lib, modulesPath, ... }:
+
+{
+  imports =
+    [
+      # Include the default lxd configuration.
+      "${modulesPath}/virtualisation/lxc-container.nix"
+      # Include the container-specific autogenerated configuration.
+      ./lxd.nix
+    ];
+
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+
+  system.stateVersion = "@stateVersion@"; # Did you read the comment?
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image.nix b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image.nix
new file mode 100644
index 000000000000..b77f9f5aabe0
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image.nix
@@ -0,0 +1,31 @@
+{ lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../../modules/virtualisation/lxc-container.nix
+  ];
+
+  virtualisation.lxc.templates.nix = {
+    enable = true;
+    target = "/etc/nixos/lxd.nix";
+    template = ./nix.tpl;
+    when = [ "create" "copy" ];
+  };
+
+  # copy the config for nixos-rebuild
+  system.activationScripts.config = let
+    config = pkgs.substituteAll {
+      src = ./lxd-container-image-inner.nix;
+      stateVersion = lib.trivial.release;
+    };
+  in ''
+    if [ ! -e /etc/nixos/configuration.nix ]; then
+      mkdir -p /etc/nixos
+      cp ${config} /etc/nixos/configuration.nix
+    fi
+  '';
+
+  # Network
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix
new file mode 100644
index 000000000000..c1c50b32ff5b
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix
@@ -0,0 +1,20 @@
+# Edit this configuration file to define what should be installed on
+# your system.  Help is available in the configuration.nix(5) man page
+# and in the NixOS manual (accessible by running ‘nixos-help’).
+
+{ config, pkgs, lib, modulesPath, ... }:
+
+{
+  imports =
+    [
+      # Include the default lxd configuration.
+      "${modulesPath}/virtualisation/lxd-virtual-machine.nix"
+      # Include the container-specific autogenerated configuration.
+      ./lxd.nix
+    ];
+
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+
+  system.stateVersion = "@stateVersion@"; # Did you read the comment?
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix
new file mode 100644
index 000000000000..0d96eea0e2d2
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix
@@ -0,0 +1,31 @@
+{ lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../../modules/virtualisation/lxd-virtual-machine.nix
+  ];
+
+  virtualisation.lxc.templates.nix = {
+    enable = true;
+    target = "/etc/nixos/lxd.nix";
+    template = ./nix.tpl;
+    when = ["create" "copy"];
+  };
+
+  # copy the config for nixos-rebuild
+  system.activationScripts.config = let
+    config = pkgs.substituteAll {
+      src = ./lxd-virtual-machine-image-inner.nix;
+      stateVersion = lib.trivial.release;
+    };
+  in ''
+    if [ ! -e /etc/nixos/configuration.nix ]; then
+      mkdir -p /etc/nixos
+      cp ${config} /etc/nixos/configuration.nix
+    fi
+  '';
+
+  # Network
+  networking.useDHCP = false;
+  networking.interfaces.enp5s0.useDHCP = true;
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/nix.tpl b/nixpkgs/nixos/maintainers/scripts/lxd/nix.tpl
new file mode 100644
index 000000000000..25ae1bc399f2
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/nix.tpl
@@ -0,0 +1,7 @@
+{ lib, config, pkgs, ... }:
+
+# WARNING: THIS CONFIGURATION IS AUTOGENERATED AND WILL BE OVERWRITTEN AUTOMATICALLY
+
+{
+  networking.hostName = "{{ container.name }}";
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/oci/create-image.sh b/nixpkgs/nixos/maintainers/scripts/oci/create-image.sh
new file mode 100755
index 000000000000..0d7332a0b272
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/oci/create-image.sh
@@ -0,0 +1,24 @@
+#! /usr/bin/env bash
+
+set -euo pipefail
+
+export NIX_PATH=nixpkgs=$(dirname $(readlink -f $0))/../../../..
+export NIXOS_CONFIG=$(dirname $(readlink -f $0))/../../../modules/virtualisation/oci-image.nix
+
+if (( $# < 1 )); then
+    (
+    echo "Usage: create-image.sh <architecture>"
+    echo
+    echo "Where <architecture> is one of:"
+    echo "  x86_64-linux"
+    echo "  aarch64-linux"
+    ) >&2
+fi
+
+system="$1"; shift
+
+nix-build '<nixpkgs/nixos>' \
+    -A config.system.build.OCIImage \
+    --argstr system "$system" \
+    --option system-features kvm \
+    -o oci-image
diff --git a/nixpkgs/nixos/maintainers/scripts/oci/upload-image.sh b/nixpkgs/nixos/maintainers/scripts/oci/upload-image.sh
new file mode 100755
index 000000000000..e4870e94bf54
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/oci/upload-image.sh
@@ -0,0 +1,100 @@
+#! /usr/bin/env bash
+
+set -euo pipefail
+
+script_dir="$(dirname $(readlink -f $0))"
+nixpkgs_root="$script_dir/../../../.."
+export NIX_PATH="nixpkgs=$nixpkgs_root"
+
+cat - <<EOF
+This script will locally build a NixOS image and upload it as a Custom Image
+using oci-cli. Make sure that an API key for the tenancy administrator has been
+added to '~/.oci'.
+For more info about configuring oci-cli, please visit
+https://docs.cloud.oracle.com/iaas/Content/API/Concepts/apisigningkey.htm#Required_Keys_and_OCIDs
+
+EOF
+
+qcow="oci-image/nixos.qcow2"
+if [ ! -f "$qcow" ]; then
+    echo "OCI image $qcow does not exist"
+    echo "Building image with create-image.sh for 'x86_64-linux'"
+    "$script_dir/create-image.sh" x86_64-linux
+    [ -f "$qcow" ] || { echo "Build failed: image not present after build"; exit 1; }
+else
+    echo "Using prebuilt image $qcow"
+fi
+
+cli="$(
+  nix-build '<nixpkgs>' \
+    --no-out-link \
+    -A oci-cli
+)"
+
+PATH="$cli/bin:$PATH"
+bucket="_TEMP_NIXOS_IMAGES_$RANDOM"
+
+echo "Creating a temporary bucket"
+root_ocid="$(
+  oci iam compartment list \
+  --all \
+  --compartment-id-in-subtree true \
+  --access-level ACCESSIBLE \
+  --include-root \
+  --raw-output \
+  --query "data[?contains(\"id\",'tenancy')].id | [0]"
+)"
+bucket_ocid=$(
+  oci os bucket create \
+    -c "$root_ocid" \
+    --name "$bucket" \
+    --raw-output \
+    --query "data.id"
+)
+# Clean up bucket on script termination
+trap 'echo Removing temporary bucket; oci os bucket delete --force --name "$bucket"' INT TERM EXIT
+
+echo "Uploading image to temporary bucket"
+oci os object put -bn "$bucket" --file "$qcow"
+
+echo "Importing image as a Custom Image"
+bucket_ns="$(oci os ns get --query "data" --raw-output)"
+image_id="$(
+  oci compute image import from-object \
+    -c "$root_ocid" \
+    --namespace "$bucket_ns" \
+    --bucket-name "$bucket" \
+    --name nixos.qcow2 \
+    --operating-system NixOS \
+    --source-image-type QCOW2 \
+    --launch-mode PARAVIRTUALIZED \
+    --display-name NixOS \
+    --raw-output \
+    --query "data.id"
+)"
+
+cat - <<EOF
+Image created! Please mark all available shapes as compatible with this image by
+visiting the following link and by selecting the 'Edit Details' button on:
+https://cloud.oracle.com/compute/images/$image_id
+EOF
+
+# Workaround until https://github.com/oracle/oci-cli/issues/399 is addressed
+echo "Sleeping for 15 minutes before cleaning up files in the temporary bucket"
+sleep $((15 * 60))
+
+echo "Deleting image from bucket"
+par_id="$(
+  oci os preauth-request list \
+    --bucket-name "$bucket" \
+    --raw-output \
+    --query "data[0].id"
+)"
+
+if [[ -n $par_id ]]; then
+  oci os preauth-request delete \
+    --bucket-name "$bucket" \
+    --par-id "$par_id"
+fi
+
+oci os object delete -bn "$bucket" --object-name nixos.qcow2 --force
diff --git a/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix b/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
new file mode 100644
index 000000000000..60f0535854dd
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
@@ -0,0 +1,107 @@
+# nix-build '<nixpkgs/nixos>' -A config.system.build.openstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/openstack-image.nix ]; }"
+
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) mkOption types;
+  copyChannel = true;
+  cfg = config.openstackImage;
+  imageBootMode = if config.openstack.efi then "uefi" else "legacy-bios";
+in
+{
+  imports = [
+    ../../../modules/virtualisation/openstack-config.nix
+  ] ++ (lib.optional copyChannel ../../../modules/installer/cd-dvd/channel.nix);
+
+
+  options.openstackImage = {
+    name = mkOption {
+      type = types.str;
+      description = lib.mdDoc "The name of the generated derivation";
+      default = "nixos-openstack-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+    };
+
+    ramMB = mkOption {
+      type = types.int;
+      default = 1024;
+      description = lib.mdDoc "RAM allocation for build VM";
+    };
+
+    sizeMB = mkOption {
+      type = types.int;
+      default = 8192;
+      description = lib.mdDoc "The size in MB of the image";
+    };
+
+    format = mkOption {
+      type = types.enum [ "raw" "qcow2" ];
+      default = "qcow2";
+      description = lib.mdDoc "The image format to output";
+    };
+  };
+
+  config = {
+    documentation.enable = copyChannel;
+    openstack = {
+      efi = true;
+      zfs = {
+        enable = true;
+        datasets = {
+          "tank/system/root".mount = "/";
+          "tank/system/var".mount = "/var";
+          "tank/local/nix".mount = "/nix";
+          "tank/user/home".mount = "/home";
+        };
+      };
+    };
+
+    system.build.openstackImage = import ../../../lib/make-single-disk-zfs-image.nix {
+      inherit lib config;
+      inherit (cfg) contents format name;
+      pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+
+      configFile = pkgs.writeText "configuration.nix"
+        ''
+          { modulesPath, ... }: {
+            imports = [ "''${modulesPath}/virtualisation/openstack-config.nix" ];
+            openstack.zfs.enable = true;
+          }
+        '';
+
+      includeChannel = copyChannel;
+
+      bootSize = 1000;
+      memSize = cfg.ramMB;
+      rootSize = cfg.sizeMB;
+      rootPoolProperties = {
+        ashift = 12;
+        autoexpand = "on";
+      };
+
+      datasets = config.openstack.zfs.datasets;
+
+      postVM = ''
+         extension=''${rootDiskImage##*.}
+         friendlyName=$out/${cfg.name}
+         rootDisk="$friendlyName.root.$extension"
+         mv "$rootDiskImage" "$rootDisk"
+
+         mkdir -p $out/nix-support
+         echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products
+
+        ${pkgs.jq}/bin/jq -n \
+          --arg system_label ${lib.escapeShellArg config.system.nixos.label} \
+          --arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
+          --arg root_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
+          --arg boot_mode "${imageBootMode}" \
+          --arg root "$rootDisk" \
+         '{}
+           | .label = $system_label
+           | .boot_mode = $boot_mode
+           | .system = $system
+           | .disks.root.logical_bytes = $root_logical_bytes
+           | .disks.root.file = $root
+           ' > $out/nix-support/image-info.json
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image.nix b/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image.nix
new file mode 100644
index 000000000000..6728a98758b8
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image.nix
@@ -0,0 +1,27 @@
+# nix-build '<nixpkgs/nixos>' -A config.system.build.openstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/openstack-image.nix ]; }"
+
+{ config, lib, pkgs, ... }:
+let
+  copyChannel = true;
+in
+{
+  imports = [
+    ../../../modules/virtualisation/openstack-config.nix
+  ] ++ (lib.optional copyChannel ../../../modules/installer/cd-dvd/channel.nix);
+
+  documentation.enable = copyChannel;
+
+  system.build.openstackImage = import ../../../lib/make-disk-image.nix {
+    inherit lib config copyChannel;
+    additionalSpace = "1024M";
+    pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+    format = "qcow2";
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        {
+          imports = [ <nixpkgs/nixos/modules/virtualisation/openstack-config.nix> ];
+        }
+      '';
+  };
+
+}