about summary refs log tree commit diff
path: root/nixpkgs/nixos/maintainers
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2019-01-07 02:18:36 +0000
committerAlyssa Ross <hi@alyssa.is>2019-01-07 02:18:47 +0000
commit36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2 (patch)
treeb3faaf573407b32aa645237a4d16b82778a39a92 /nixpkgs/nixos/maintainers
parent4e31070265257dc67d120c27e0f75c2344fdfa9a (diff)
parentabf060725d7614bd3b9f96764262dfbc2f9c2199 (diff)
downloadnixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.gz
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.bz2
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.lz
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.xz
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.zst
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.zip
Add 'nixpkgs/' from commit 'abf060725d7614bd3b9f96764262dfbc2f9c2199'
git-subtree-dir: nixpkgs
git-subtree-mainline: 4e31070265257dc67d120c27e0f75c2344fdfa9a
git-subtree-split: abf060725d7614bd3b9f96764262dfbc2f9c2199
Diffstat (limited to 'nixpkgs/nixos/maintainers')
-rw-r--r--nixpkgs/nixos/maintainers/option-usages.nix192
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure/create-azure.sh8
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh22
-rw-r--r--nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix66
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh279
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/gce/create-gce.sh23
-rw-r--r--nixpkgs/nixos/maintainers/scripts/openstack/nova-image.nix26
7 files changed, 616 insertions, 0 deletions
diff --git a/nixpkgs/nixos/maintainers/option-usages.nix b/nixpkgs/nixos/maintainers/option-usages.nix
new file mode 100644
index 000000000000..242c2a4dd442
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/option-usages.nix
@@ -0,0 +1,192 @@
+{ configuration ? import ../lib/from-env.nix "NIXOS_CONFIG" <nixos-config>
+
+# provide an option name, as a string literal.
+, testOption ? null
+
+# provide a list of option names, as string literals.
+, testOptions ? [ ]
+}:
+
+# This file is made to be used as follow:
+#
+#   $ nix-instantiate ./option-usage.nix --argstr testOption service.xserver.enable -A txtContent --eval
+#
+# or
+#
+#   $ nix-build ./option-usage.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
+#
+# Other targets exists such as `dotContent`, `dot`, and `pdf`.  If you are
+# looking for the option usage of multiple options, you can provide a list
+# as argument.
+#
+#   $ nix-build ./option-usage.nix --arg testOptions \
+#      '["boot.loader.gummiboot.enable" "boot.loader.gummiboot.timeout"]' \
+#      -A txt -o gummiboot.list
+#
+# Note, this script is slow as it has to evaluate all options of the system
+# once per queried option.
+#
+# This nix expression works by doing a first evaluation, which evaluates the
+# result of every option.
+#
+# Then, for each queried option, we evaluate the NixOS modules a second
+# time, except that we replace the `config` argument of all the modules with
+# the result of the original evaluation, except for the tested option which
+# value is replaced by a `throw` statement which is caught by the `tryEval`
+# evaluation of each option value.
+#
+# We then compare the result of the evaluation of the original module, with
+# the result of the second evaluation, and consider that the new failures are
+# caused by our mutation of the `config` argument.
+#
+# Doing so returns all option results which are directly using the
+# tested option result.
+
+with import ../../lib;
+
+let
+
+  evalFun = {
+    specialArgs ? {}
+  }: import ../lib/eval-config.nix {
+       modules = [ configuration ];
+       inherit specialArgs;
+     };
+
+  eval = evalFun {};
+  inherit (eval) pkgs;
+
+  excludedTestOptions = [
+    # We cannot evluate _module.args, as it is used during the computation
+    # of the modules list.
+    "_module.args"
+
+    # For some reasons which we yet have to investigate, some options cannot
+    # be replaced by a throw without causing a non-catchable failure.
+    "networking.bonds"
+    "networking.bridges"
+    "networking.interfaces"
+    "networking.macvlans"
+    "networking.sits"
+    "networking.vlans"
+    "services.openssh.startWhenNeeded"
+  ];
+
+  # for some reasons which we yet have to investigate, some options are
+  # time-consuming to compute, thus we filter them out at the moment.
+  excludedOptions = [
+    "boot.systemd.services"
+    "systemd.services"
+    "kde.extraPackages"
+  ];
+  excludeOptions = list:
+    filter (opt: !(elem (showOption opt.loc) excludedOptions)) list;
+
+
+  reportNewFailures = old: new:
+    let
+      filterChanges =
+        filter ({fst, snd}:
+          !(fst.success -> snd.success)
+        );
+
+      keepNames =
+        map ({fst, snd}:
+          /* assert fst.name == snd.name; */ snd.name
+        );
+
+      # Use  tryEval (strict ...)  to know if there is any failure while
+      # evaluating the option value.
+      #
+      # Note, the `strict` function is not strict enough, but using toXML
+      # builtins multiply by 4 the memory usage and the time used to compute
+      # each options.
+      tryCollectOptions = moduleResult:
+        flip map (excludeOptions (collect isOption moduleResult)) (opt:
+          { name = showOption opt.loc; } // builtins.tryEval (strict opt.value));
+     in
+       keepNames (
+         filterChanges (
+           zipLists (tryCollectOptions old) (tryCollectOptions new)
+         )
+       );
+
+
+  # Create a list of modules where each module contains only one failling
+  # options.
+  introspectionModules =
+    let
+      setIntrospection = opt: rec {
+        name = showOption opt.loc;
+        path = opt.loc;
+        config = setAttrByPath path
+          (throw "Usage introspection of '${name}' by forced failure.");
+      };
+    in
+      map setIntrospection (collect isOption eval.options);
+
+  overrideConfig = thrower:
+    recursiveUpdateUntil (path: old: new:
+      path == thrower.path
+    ) eval.config thrower.config;
+
+
+  graph =
+    map (thrower: {
+      option = thrower.name;
+      usedBy = assert __trace "Investigate ${thrower.name}" true;
+        reportNewFailures eval.options (evalFun {
+          specialArgs = {
+            config = overrideConfig thrower;
+          };
+        }).options;
+    }) introspectionModules;
+
+  displayOptionsGraph =
+     let
+       checkList =
+         if !(isNull testOption) then [ testOption ]
+         else testOptions;
+       checkAll = checkList == [];
+     in
+       flip filter graph ({option, ...}:
+         (checkAll || elem option checkList)
+         && !(elem option excludedTestOptions)
+       );
+
+  graphToDot = graph: ''
+    digraph "Option Usages" {
+      ${concatMapStrings ({option, usedBy}:
+          concatMapStrings (user: ''
+            "${option}" -> "${user}"''
+          ) usedBy
+        ) displayOptionsGraph}
+    }
+  '';
+
+  graphToText = graph:
+    concatMapStrings ({usedBy, ...}:
+        concatMapStrings (user: ''
+          ${user}
+        '') usedBy
+      ) displayOptionsGraph;
+
+in
+
+rec {
+  dotContent = graphToDot graph;
+  dot = pkgs.writeTextFile {
+    name = "option_usages.dot";
+    text = dotContent;
+  };
+
+  pdf = pkgs.texFunctions.dot2pdf {
+    dotGraph = dot;
+  };
+
+  txtContent = graphToText graph;
+  txt = pkgs.writeTextFile {
+    name = "option_usages.txt";
+    text = txtContent;
+  };
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/azure/create-azure.sh b/nixpkgs/nixos/maintainers/scripts/azure/create-azure.sh
new file mode 100755
index 000000000000..2b22cb536619
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure/create-azure.sh
@@ -0,0 +1,8 @@
+#! /bin/sh -e
+
+export NIX_PATH=nixpkgs=../../../..
+export NIXOS_CONFIG=$(dirname $(readlink -f $0))/../../../modules/virtualisation/azure-image.nix
+export TIMESTAMP=$(date +%Y%m%d%H%M)
+
+nix-build '<nixpkgs/nixos>' \
+   -A config.system.build.azureImage --argstr system x86_64-linux -o azure -j 10
diff --git a/nixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh b/nixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh
new file mode 100755
index 000000000000..2ea35d1d4c33
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh
@@ -0,0 +1,22 @@
+#! /bin/sh -e
+
+export STORAGE=${STORAGE:-nixos}
+export THREADS=${THREADS:-8}
+
+azure-vhd-utils-for-go  upload --localvhdpath azure/disk.vhd  --stgaccountname "$STORAGE"  --stgaccountkey "$KEY" \
+   --containername images --blobname nixos-unstable-nixops-updated.vhd --parallelism "$THREADS" --overwrite
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix
new file mode 100644
index 000000000000..eeae27ede0f8
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.amazonImage;
+in {
+
+  imports = [ ../../../modules/virtualisation/amazon-image.nix ];
+
+  # Required to provide good EBS experience,
+  # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html#timeout-nvme-ebs-volumes
+  # TODO change value to 4294967295 when kernel is updated to 4.15 or later
+  config.boot.kernelParams = [ "nvme_core.io_timeout=255" ];
+
+  options.amazonImage = {
+    name = mkOption {
+      type = types.str;
+      description = "The name of the generated derivation";
+      default = "nixos-disk-image";
+    };
+
+    contents = mkOption {
+      example = literalExample ''
+        [ { source = pkgs.memtest86 + "/memtest.bin";
+            target = "boot/memtest.bin";
+          }
+        ]
+      '';
+      default = [];
+      description = ''
+        This option lists files to be copied to fixed locations in the
+        generated image. Glob patterns work.
+      '';
+    };
+
+    sizeMB = mkOption {
+      type = types.int;
+      default = if config.ec2.hvm then 2048 else 8192;
+      description = "The size in MB of the image";
+    };
+
+    format = mkOption {
+      type = types.enum [ "raw" "qcow2" "vpc" ];
+      default = "qcow2";
+      description = "The image format to output";
+    };
+  };
+
+  config.system.build.amazonImage = import ../../../lib/make-disk-image.nix {
+    inherit lib config;
+    inherit (cfg) contents format name;
+    pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+    partitionTableType = if config.ec2.hvm then "legacy" else "none";
+    diskSize = cfg.sizeMB;
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        {
+          imports = [ <nixpkgs/nixos/modules/virtualisation/amazon-image.nix> ];
+          ${optionalString config.ec2.hvm ''
+            ec2.hvm = true;
+          ''}
+        }
+      '';
+  };
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh b/nixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh
new file mode 100755
index 000000000000..790cc6cbc531
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh
@@ -0,0 +1,279 @@
+#!/usr/bin/env nix-shell
+#! nix-shell -i bash -p qemu ec2_ami_tools jq ec2_api_tools awscli
+
+# To start with do: nix-shell -p awscli --run "aws configure"
+
+set -e
+set -o pipefail
+
+version=$(nix-instantiate --eval --strict '<nixpkgs>' -A lib.version | sed s/'"'//g)
+major=${version:0:5}
+echo "NixOS version is $version ($major)"
+
+stateDir=/home/deploy/amis/ec2-image-$version
+echo "keeping state in $stateDir"
+mkdir -p $stateDir
+
+rm -f ec2-amis.nix
+
+types="hvm"
+stores="ebs"
+regions="eu-west-1 eu-west-2 eu-west-3 eu-central-1 us-east-1 us-east-2 us-west-1 us-west-2 ca-central-1 ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2 sa-east-1 ap-south-1"
+
+for type in $types; do
+    link=$stateDir/$type
+    imageFile=$link/nixos.qcow2
+    system=x86_64-linux
+    arch=x86_64
+
+    # Build the image.
+    if ! [ -L $link ]; then
+        if [ $type = pv ]; then hvmFlag=false; else hvmFlag=true; fi
+
+        echo "building image type '$type'..."
+        nix-build -o $link \
+            '<nixpkgs/nixos>' \
+            -A config.system.build.amazonImage \
+            --arg configuration "{ imports = [ <nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix> ]; ec2.hvm = $hvmFlag; }"
+    fi
+
+    for store in $stores; do
+
+        bucket=nixos-amis
+        bucketDir="$version-$type-$store"
+
+        prevAmi=
+        prevRegion=
+
+        for region in $regions; do
+
+            name=nixos-$version-$arch-$type-$store
+            description="NixOS $system $version ($type-$store)"
+
+            amiFile=$stateDir/$region.$type.$store.ami-id
+
+            if ! [ -e $amiFile ]; then
+
+                echo "doing $name in $region..."
+
+                if [ -n "$prevAmi" ]; then
+                    ami=$(aws ec2 copy-image \
+                        --region "$region" \
+                        --source-region "$prevRegion" --source-image-id "$prevAmi" \
+                        --name "$name" --description "$description" | jq -r '.ImageId')
+                    if [ "$ami" = null ]; then break; fi
+                else
+
+                    if [ $store = s3 ]; then
+
+                        # Bundle the image.
+                        imageDir=$stateDir/$type-bundled
+
+                        # Convert the image to raw format.
+                        rawFile=$stateDir/$type.raw
+                        if ! [ -e $rawFile ]; then
+                            qemu-img convert -f qcow2 -O raw $imageFile $rawFile.tmp
+                            mv $rawFile.tmp $rawFile
+                        fi
+
+                        if ! [ -d $imageDir ]; then
+                            rm -rf $imageDir.tmp
+                            mkdir -p $imageDir.tmp
+                            ec2-bundle-image \
+                                -d $imageDir.tmp \
+                                -i $rawFile --arch $arch \
+                                --user "$AWS_ACCOUNT" -c "$EC2_CERT" -k "$EC2_PRIVATE_KEY"
+                            mv $imageDir.tmp $imageDir
+                        fi
+
+                        # Upload the bundle to S3.
+                        if ! [ -e $imageDir/uploaded ]; then
+                            echo "uploading bundle to S3..."
+                            ec2-upload-bundle \
+                                -m $imageDir/$type.raw.manifest.xml \
+                                -b "$bucket/$bucketDir" \
+                                -a "$AWS_ACCESS_KEY_ID" -s "$AWS_SECRET_ACCESS_KEY" \
+                                --location EU
+                            touch $imageDir/uploaded
+                        fi
+
+                        extraFlags="--image-location $bucket/$bucketDir/$type.raw.manifest.xml"
+
+                    else
+
+                        # Convert the image to vhd format so we don't have
+                        # to upload a huge raw image.
+                        vhdFile=$stateDir/$type.vhd
+                        if ! [ -e $vhdFile ]; then
+                            qemu-img convert -f qcow2 -O vpc $imageFile $vhdFile.tmp
+                            mv $vhdFile.tmp $vhdFile
+                        fi
+
+                        vhdFileLogicalBytes="$(qemu-img info "$vhdFile" | grep ^virtual\ size: | cut -f 2 -d \(  | cut -f 1 -d \ )"
+                        vhdFileLogicalGigaBytes=$(((vhdFileLogicalBytes-1)/1024/1024/1024+1)) # Round to the next GB
+
+                        echo "Disk size is $vhdFileLogicalBytes bytes. Will be registered as $vhdFileLogicalGigaBytes GB."
+
+                        taskId=$(cat $stateDir/$region.$type.task-id 2> /dev/null || true)
+                        volId=$(cat $stateDir/$region.$type.vol-id 2> /dev/null || true)
+                        snapId=$(cat $stateDir/$region.$type.snap-id 2> /dev/null || true)
+
+                        # Import the VHD file.
+                        if [ -z "$snapId" -a -z "$volId" -a -z "$taskId" ]; then
+                            echo "importing $vhdFile..."
+                            taskId=$(ec2-import-volume $vhdFile --no-upload -f vhd \
+                                -O "$AWS_ACCESS_KEY_ID" -W "$AWS_SECRET_ACCESS_KEY" \
+                                -o "$AWS_ACCESS_KEY_ID" -w "$AWS_SECRET_ACCESS_KEY" \
+                                --region "$region" -z "${region}a" \
+                                --bucket "$bucket" --prefix "$bucketDir/" \
+                                | tee /dev/stderr \
+                                | sed 's/.*\(import-vol-[0-9a-z]\+\).*/\1/ ; t ; d')
+                            echo -n "$taskId" > $stateDir/$region.$type.task-id
+                        fi
+
+                        if [ -z "$snapId" -a -z "$volId" ]; then
+                            ec2-resume-import  $vhdFile -t "$taskId" --region "$region" \
+                                -O "$AWS_ACCESS_KEY_ID" -W "$AWS_SECRET_ACCESS_KEY" \
+                                -o "$AWS_ACCESS_KEY_ID" -w "$AWS_SECRET_ACCESS_KEY"
+                        fi
+
+                        # Wait for the volume creation to finish.
+                        if [ -z "$snapId" -a -z "$volId" ]; then
+                            echo "waiting for import to finish..."
+                            while true; do
+                                volId=$(aws ec2 describe-conversion-tasks --conversion-task-ids "$taskId" --region "$region" | jq -r .ConversionTasks[0].ImportVolume.Volume.Id)
+                                if [ "$volId" != null ]; then break; fi
+                                sleep 10
+                            done
+
+                            echo -n "$volId" > $stateDir/$region.$type.vol-id
+                        fi
+
+                        # Delete the import task.
+                        if [ -n "$volId" -a -n "$taskId" ]; then
+                            echo "removing import task..."
+                            ec2-delete-disk-image -t "$taskId" --region "$region" \
+                                -O "$AWS_ACCESS_KEY_ID" -W "$AWS_SECRET_ACCESS_KEY" \
+                                -o "$AWS_ACCESS_KEY_ID" -w "$AWS_SECRET_ACCESS_KEY" || true
+                            rm -f $stateDir/$region.$type.task-id
+                        fi
+
+                        # Create a snapshot.
+                        if [ -z "$snapId" ]; then
+                            echo "creating snapshot..."
+                            # FIXME: this can fail with InvalidVolume.NotFound. Eventual consistency yay.
+                            snapId=$(aws ec2 create-snapshot --volume-id "$volId" --region "$region" --description "$description" | jq -r .SnapshotId)
+                            if [ "$snapId" = null ]; then exit 1; fi
+                            echo -n "$snapId" > $stateDir/$region.$type.snap-id
+                        fi
+
+                        # Wait for the snapshot to finish.
+                        echo "waiting for snapshot to finish..."
+                        while true; do
+                            status=$(aws ec2 describe-snapshots --snapshot-ids "$snapId" --region "$region" | jq -r .Snapshots[0].State)
+                            if [ "$status" = completed ]; then break; fi
+                            sleep 10
+                        done
+
+                        # Delete the volume.
+                        if [ -n "$volId" ]; then
+                            echo "deleting volume..."
+                            aws ec2 delete-volume --volume-id "$volId" --region "$region" || true
+                            rm -f $stateDir/$region.$type.vol-id
+                        fi
+
+                        blockDeviceMappings="DeviceName=/dev/sda1,Ebs={SnapshotId=$snapId,VolumeSize=$vhdFileLogicalGigaBytes,DeleteOnTermination=true,VolumeType=gp2}"
+                        extraFlags=""
+
+                        if [ $type = pv ]; then
+                            extraFlags+=" --root-device-name /dev/sda1"
+                        else
+                            extraFlags+=" --root-device-name /dev/sda1"
+                            extraFlags+=" --sriov-net-support simple"
+                            extraFlags+=" --ena-support"
+                        fi
+
+                        blockDeviceMappings+=" DeviceName=/dev/sdb,VirtualName=ephemeral0"
+                        blockDeviceMappings+=" DeviceName=/dev/sdc,VirtualName=ephemeral1"
+                        blockDeviceMappings+=" DeviceName=/dev/sdd,VirtualName=ephemeral2"
+                        blockDeviceMappings+=" DeviceName=/dev/sde,VirtualName=ephemeral3"
+                    fi
+
+                    if [ $type = hvm ]; then
+                        extraFlags+=" --sriov-net-support simple"
+                        extraFlags+=" --ena-support"
+                    fi
+
+                    # Register the AMI.
+                    if [ $type = pv ]; then
+                        kernel=$(aws ec2 describe-images --owner amazon --filters "Name=name,Values=pv-grub-hd0_1.05-$arch.gz" | jq -r .Images[0].ImageId)
+                        if [ "$kernel" = null ]; then break; fi
+                        echo "using PV-GRUB kernel $kernel"
+                        extraFlags+=" --virtualization-type paravirtual --kernel $kernel"
+                    else
+                        extraFlags+=" --virtualization-type hvm"
+                    fi
+
+                    ami=$(aws ec2 register-image \
+                        --name "$name" \
+                        --description "$description" \
+                        --region "$region" \
+                        --architecture "$arch" \
+                        --block-device-mappings $blockDeviceMappings \
+                        $extraFlags | jq -r .ImageId)
+                    if [ "$ami" = null ]; then break; fi
+                fi
+
+                echo -n "$ami" > $amiFile
+                echo "created AMI $ami of type '$type' in $region..."
+
+            else
+                ami=$(cat $amiFile)
+            fi
+
+            echo "region = $region, type = $type, store = $store, ami = $ami"
+
+            if [ -z "$prevAmi" ]; then
+                prevAmi="$ami"
+                prevRegion="$region"
+            fi
+        done
+
+    done
+
+done
+
+for type in $types; do
+    link=$stateDir/$type
+    system=x86_64-linux
+    arch=x86_64
+
+    for store in $stores; do
+
+        for region in $regions; do
+
+            name=nixos-$version-$arch-$type-$store
+            amiFile=$stateDir/$region.$type.$store.ami-id
+            ami=$(cat $amiFile)
+
+            echo "region = $region, type = $type, store = $store, ami = $ami"
+
+            echo -n "waiting for AMI..."
+            while true; do
+                status=$(aws ec2 describe-images --image-ids "$ami" --region "$region" | jq -r .Images[0].State)
+                if [ "$status" = available ]; then break; fi
+                sleep 10
+                echo -n '.'
+            done
+            echo
+
+            # Make the image public.
+            aws ec2 modify-image-attribute \
+                --image-id "$ami" --region "$region" --launch-permission 'Add={Group=all}'
+
+            echo "  \"$major\".$region.$type-$store = \"$ami\";" >> ec2-amis.nix
+        done
+
+    done
+
+done
diff --git a/nixpkgs/nixos/maintainers/scripts/gce/create-gce.sh b/nixpkgs/nixos/maintainers/scripts/gce/create-gce.sh
new file mode 100755
index 000000000000..48748a59d298
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/gce/create-gce.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env nix-shell
+#! nix-shell -i bash -p google-cloud-sdk
+
+set -euo pipefail
+
+BUCKET_NAME="${BUCKET_NAME:-nixos-cloud-images}"
+TIMESTAMP="$(date +%Y%m%d%H%M)"
+export TIMESTAMP
+
+nix-build '<nixpkgs/nixos/lib/eval-config.nix>' \
+   -A config.system.build.googleComputeImage \
+   --arg modules "[ <nixpkgs/nixos/modules/virtualisation/google-compute-image.nix> ]" \
+   --argstr system x86_64-linux \
+   -o gce \
+   -j 10
+
+img_path=$(echo gce/*.tar.gz)
+img_name=$(basename "$img_path")
+img_id=$(echo "$img_name" | sed 's|.raw.tar.gz$||;s|\.|-|g;s|_|-|g')
+if ! gsutil ls "gs://${BUCKET_NAME}/$img_name"; then
+  gsutil cp "$img_path" "gs://${BUCKET_NAME}/$img_name"
+  gsutil acl ch -u AllUsers:R "gs://${BUCKET_NAME}/$img_name"
+fi
diff --git a/nixpkgs/nixos/maintainers/scripts/openstack/nova-image.nix b/nixpkgs/nixos/maintainers/scripts/openstack/nova-image.nix
new file mode 100644
index 000000000000..b6f3a5b15200
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/openstack/nova-image.nix
@@ -0,0 +1,26 @@
+# nix-build '<nixpkgs/nixos>' -A config.system.build.novaImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/nova-image.nix ]; }"
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports =
+    [ ../../../modules/installer/cd-dvd/channel.nix
+      ../../../modules/virtualisation/nova-config.nix
+    ];
+
+  system.build.novaImage = import ../../../lib/make-disk-image.nix {
+    inherit lib config;
+    pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+    diskSize = 8192;
+    format = "qcow2";
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        {
+          imports = [ <nixpkgs/nixos/modules/virtualisation/nova-config.nix> ];
+        }
+      '';
+  };
+
+}