about summary refs log tree commit diff
path: root/nixpkgs/pkgs/build-support
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2021-09-08 17:57:14 +0000
committerAlyssa Ross <hi@alyssa.is>2021-09-13 11:31:47 +0000
commitee7984efa14902a2ddd820c937457667a4f40c6a (patch)
treec9c1d046733cefe5e21fdd8a52104175d47b2443 /nixpkgs/pkgs/build-support
parentffc9d4ba381da62fd08b361bacd1e71e2a3d934d (diff)
parentb3c692172e5b5241b028a98e1977f9fb12eeaf42 (diff)
downloadnixlib-ee7984efa14902a2ddd820c937457667a4f40c6a.tar
nixlib-ee7984efa14902a2ddd820c937457667a4f40c6a.tar.gz
nixlib-ee7984efa14902a2ddd820c937457667a4f40c6a.tar.bz2
nixlib-ee7984efa14902a2ddd820c937457667a4f40c6a.tar.lz
nixlib-ee7984efa14902a2ddd820c937457667a4f40c6a.tar.xz
nixlib-ee7984efa14902a2ddd820c937457667a4f40c6a.tar.zst
nixlib-ee7984efa14902a2ddd820c937457667a4f40c6a.zip
Merge commit 'b3c692172e5b5241b028a98e1977f9fb12eeaf42'
Diffstat (limited to 'nixpkgs/pkgs/build-support')
-rw-r--r--nixpkgs/pkgs/build-support/agda/default.nix6
-rw-r--r--nixpkgs/pkgs/build-support/bintools-wrapper/add-hardening.sh2
-rw-r--r--nixpkgs/pkgs/build-support/bintools-wrapper/default.nix2
-rw-r--r--nixpkgs/pkgs/build-support/build-fhs-userenv/chrootenv/chrootenv.c2
-rw-r--r--nixpkgs/pkgs/build-support/build-fhs-userenv/env.nix12
-rw-r--r--nixpkgs/pkgs/build-support/cc-wrapper/default.nix7
-rw-r--r--nixpkgs/pkgs/build-support/coq/default.nix10
-rw-r--r--nixpkgs/pkgs/build-support/dhall-to-nix.nix4
-rw-r--r--nixpkgs/pkgs/build-support/docker/default.nix1004
-rw-r--r--nixpkgs/pkgs/build-support/docker/examples.nix2
-rw-r--r--nixpkgs/pkgs/build-support/docker/tarsum.nix42
-rw-r--r--nixpkgs/pkgs/build-support/emacs/elpa.nix2
-rw-r--r--nixpkgs/pkgs/build-support/emacs/wrapper.nix2
-rwxr-xr-xnixpkgs/pkgs/build-support/fetchgit/nix-prefetch-git9
-rw-r--r--nixpkgs/pkgs/build-support/fetchgitlab/default.nix28
-rw-r--r--nixpkgs/pkgs/build-support/fetchgx/default.nix4
-rw-r--r--nixpkgs/pkgs/build-support/fetchurl/mirrors.nix50
-rw-r--r--nixpkgs/pkgs/build-support/fetchzip/default.nix13
-rw-r--r--nixpkgs/pkgs/build-support/kernel/make-initrd.nix2
-rw-r--r--nixpkgs/pkgs/build-support/kernel/make-initrd.sh4
-rw-r--r--nixpkgs/pkgs/build-support/libredirect/default.nix13
-rw-r--r--nixpkgs/pkgs/build-support/libredirect/libredirect.c9
-rw-r--r--nixpkgs/pkgs/build-support/libredirect/test.c15
-rw-r--r--nixpkgs/pkgs/build-support/nuget-to-nix/default.nix5
-rwxr-xr-xnixpkgs/pkgs/build-support/nuget-to-nix/nuget-to-nix.sh23
-rw-r--r--nixpkgs/pkgs/build-support/replace-secret/replace-secret.nix7
-rw-r--r--nixpkgs/pkgs/build-support/rust/build-rust-crate/default.nix586
-rw-r--r--nixpkgs/pkgs/build-support/rust/default-crate-overrides.nix42
-rw-r--r--nixpkgs/pkgs/build-support/rust/default.nix2
-rw-r--r--nixpkgs/pkgs/build-support/setup-hooks/auto-patchelf.sh1
-rw-r--r--nixpkgs/pkgs/build-support/skaware/build-skaware-man-pages.nix51
-rw-r--r--nixpkgs/pkgs/build-support/skaware/build-skaware-package.nix4
-rw-r--r--nixpkgs/pkgs/build-support/templaterpm/default.nix2
-rw-r--r--nixpkgs/pkgs/build-support/trivial-builders.nix9
-rw-r--r--nixpkgs/pkgs/build-support/vm/default.nix4
-rw-r--r--nixpkgs/pkgs/build-support/vm/test.nix4
-rw-r--r--nixpkgs/pkgs/build-support/writers/default.nix45
-rw-r--r--nixpkgs/pkgs/build-support/writers/test.nix26
38 files changed, 1109 insertions, 946 deletions
diff --git a/nixpkgs/pkgs/build-support/agda/default.nix b/nixpkgs/pkgs/build-support/agda/default.nix
index ed7d11a13147..99cc1259023a 100644
--- a/nixpkgs/pkgs/build-support/agda/default.nix
+++ b/nixpkgs/pkgs/build-support/agda/default.nix
@@ -1,6 +1,6 @@
 # Builder for Agda packages.
 
-{ stdenv, lib, self, Agda, runCommandNoCC, makeWrapper, writeText, ghcWithPackages, nixosTests }:
+{ stdenv, lib, self, Agda, runCommand, makeWrapper, writeText, ghcWithPackages, nixosTests }:
 
 with lib.strings;
 
@@ -15,7 +15,7 @@ let
     '';
     pname = "agdaWithPackages";
     version = Agda.version;
-  in runCommandNoCC "${pname}-${version}" {
+  in runCommand "${pname}-${version}" {
     inherit pname version;
     nativeBuildInputs = [ makeWrapper ];
     passthru = {
@@ -46,6 +46,7 @@ let
 
   defaults =
     { pname
+    , meta
     , buildInputs ? []
     , everythingFile ? "./Everything.agda"
     , libraryName ? pname
@@ -76,6 +77,7 @@ let
           find -not \( -path ${everythingFile} -or -path ${lib.interfaceFile everythingFile} \) -and \( ${concatMapStringsSep " -or " (p: "-name '*.${p}'") (extensions ++ extraExtensions)} \) -exec cp -p --parents -t "$out" {} +
           runHook postInstall
         '';
+        meta = if meta.broken or false then meta // { hydraPlatforms = lib.platforms.none; } else meta;
       };
 in
 {
diff --git a/nixpkgs/pkgs/build-support/bintools-wrapper/add-hardening.sh b/nixpkgs/pkgs/build-support/bintools-wrapper/add-hardening.sh
index 5e49b7bd9053..4d289a334b77 100644
--- a/nixpkgs/pkgs/build-support/bintools-wrapper/add-hardening.sh
+++ b/nixpkgs/pkgs/build-support/bintools-wrapper/add-hardening.sh
@@ -37,7 +37,7 @@ fi
 for flag in "${!hardeningEnableMap[@]}"; do
   case $flag in
     pie)
-      if [[ ! ("$*" =~ " -shared " || "$*" =~ " -static ") ]]; then
+      if [[ ! ("$*" =~ " -shared " || "$*" =~ " -static " || "$*" =~ " -r " || "$*" =~ " -Ur " || "$*" =~ " -i ") ]]; then
         if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling LDFlags -pie >&2; fi
         hardeningLDFlags+=('-pie')
       fi
diff --git a/nixpkgs/pkgs/build-support/bintools-wrapper/default.nix b/nixpkgs/pkgs/build-support/bintools-wrapper/default.nix
index 5d2f2f977a70..82d173697438 100644
--- a/nixpkgs/pkgs/build-support/bintools-wrapper/default.nix
+++ b/nixpkgs/pkgs/build-support/bintools-wrapper/default.nix
@@ -208,6 +208,8 @@ stdenv.mkDerivation {
       else if targetPlatform.isAlpha then "alpha"
       else if targetPlatform.isVc4 then "vc4"
       else if targetPlatform.isOr1k then "or1k"
+      else if targetPlatform.isM68k then "m68k"
+      else if targetPlatform.isS390 then "s390"
       else if targetPlatform.isRiscV then "lriscv"
       else throw "unknown emulation for platform: ${targetPlatform.config}";
     in if targetPlatform.useLLVM or false then ""
diff --git a/nixpkgs/pkgs/build-support/build-fhs-userenv/chrootenv/chrootenv.c b/nixpkgs/pkgs/build-support/build-fhs-userenv/chrootenv/chrootenv.c
index 27e70e3fe5c4..324c9d24ba04 100644
--- a/nixpkgs/pkgs/build-support/build-fhs-userenv/chrootenv/chrootenv.c
+++ b/nixpkgs/pkgs/build-support/build-fhs-userenv/chrootenv/chrootenv.c
@@ -122,7 +122,7 @@ int main(gint argc, gchar **argv) {
     }
 
     // hide all mounts we do from the parent
-    fail_if(mount(0, "/", 0, MS_PRIVATE | MS_REC, 0));
+    fail_if(mount(0, "/", 0, MS_SLAVE | MS_REC, 0));
 
     if (uid != 0) {
       spit("/proc/self/setgroups", "deny");
diff --git a/nixpkgs/pkgs/build-support/build-fhs-userenv/env.nix b/nixpkgs/pkgs/build-support/build-fhs-userenv/env.nix
index 226904f311b6..da4521b4de3b 100644
--- a/nixpkgs/pkgs/build-support/build-fhs-userenv/env.nix
+++ b/nixpkgs/pkgs/build-support/build-fhs-userenv/env.nix
@@ -1,8 +1,11 @@
 { stdenv, buildEnv, writeText, pkgs, pkgsi686Linux }:
 
-{ name, profile ? ""
-, targetPkgs ? pkgs: [], multiPkgs ? pkgs: []
-, extraBuildCommands ? "", extraBuildCommandsMulti ? ""
+{ name
+, profile ? ""
+, targetPkgs ? pkgs: []
+, multiPkgs ? pkgs: []
+, extraBuildCommands ? ""
+, extraBuildCommandsMulti ? ""
 , extraOutputsToInstall ? []
 }:
 
@@ -23,7 +26,8 @@
 
 let
   is64Bit = stdenv.hostPlatform.parsed.cpu.bits == 64;
-  isMultiBuild  = multiPkgs != null && is64Bit;
+  # multi-lib glibc is only supported on x86_64
+  isMultiBuild  = multiPkgs != null && stdenv.hostPlatform.system == "x86_64-linux";
   isTargetBuild = !isMultiBuild;
 
   # list of packages (usually programs) which are only be installed for the
diff --git a/nixpkgs/pkgs/build-support/cc-wrapper/default.nix b/nixpkgs/pkgs/build-support/cc-wrapper/default.nix
index 235d244a7c0d..804f59286c7a 100644
--- a/nixpkgs/pkgs/build-support/cc-wrapper/default.nix
+++ b/nixpkgs/pkgs/build-support/cc-wrapper/default.nix
@@ -234,6 +234,11 @@ stdenv.mkDerivation {
       wrap ${targetPrefix}gnatmake ${./gnat-wrapper.sh} $ccPath/${targetPrefix}gnatmake
       wrap ${targetPrefix}gnatbind ${./gnat-wrapper.sh} $ccPath/${targetPrefix}gnatbind
       wrap ${targetPrefix}gnatlink ${./gnat-wrapper.sh} $ccPath/${targetPrefix}gnatlink
+
+      # this symlink points to the unwrapped gnat's output "out". It is used by
+      # our custom gprconfig compiler description to find GNAT's ada runtime. See
+      # ../../development/tools/build-managers/gprbuild/{boot.nix, nixpkgs-gnat.xml}
+      ln -sf ${cc} $out/nix-support/gprconfig-gnat-unwrapped
     ''
 
     + optionalString cc.langD or false ''
@@ -467,7 +472,7 @@ stdenv.mkDerivation {
     + optionalString hostPlatform.isCygwin ''
       hardening_unsupported_flags+=" pic"
     '' + optionalString targetPlatform.isMinGW ''
-      hardening_unsupported_flags+=" stackprotector"
+      hardening_unsupported_flags+=" stackprotector fortify"
     '' + optionalString targetPlatform.isAvr ''
       hardening_unsupported_flags+=" stackprotector pic"
     '' + optionalString (targetPlatform.libc == "newlib") ''
diff --git a/nixpkgs/pkgs/build-support/coq/default.nix b/nixpkgs/pkgs/build-support/coq/default.nix
index ba300f2f8cf5..5f2b5e646b0b 100644
--- a/nixpkgs/pkgs/build-support/coq/default.nix
+++ b/nixpkgs/pkgs/build-support/coq/default.nix
@@ -27,6 +27,7 @@ in
   dropDerivationAttrs ? [],
   useDune2ifVersion ? (x: false),
   useDune2 ? false,
+  opam-name ? "coq-${pname}",
   ...
 }@args:
 let
@@ -34,7 +35,7 @@ let
     "version" "fetcher" "repo" "owner" "domain" "releaseRev"
     "displayVersion" "defaultVersion" "useMelquiondRemake"
     "release" "extraBuildInputs" "extraPropagatedBuildInputs" "namePrefix"
-    "meta" "useDune2ifVersion" "useDune2"
+    "meta" "useDune2ifVersion" "useDune2" "opam-name"
     "extraInstallFlags" "setCOQBIN" "mlPlugin"
     "dropAttrs" "dropDerivationAttrs" "keepAttrs" ] ++ dropAttrs) keepAttrs;
   fetch = import ../coq/meta-fetch/default.nix
@@ -90,9 +91,14 @@ stdenv.mkDerivation (removeAttrs ({
     extraInstallFlags;
 })
 // (optionalAttrs useDune2 {
+  buildPhase = ''
+    runHook preBuild
+    dune build -p ${opam-name} ''${enableParallelBuilding:+-j $NIX_BUILD_CORES}
+    runHook postBuild
+  '';
   installPhase = ''
     runHook preInstall
-    dune install --prefix=$out
+    dune install ${opam-name} --prefix=$out
     mv $out/lib/coq $out/lib/TEMPORARY
     mkdir $out/lib/coq/
     mv $out/lib/TEMPORARY $out/lib/coq/${coq.coq-version}
diff --git a/nixpkgs/pkgs/build-support/dhall-to-nix.nix b/nixpkgs/pkgs/build-support/dhall-to-nix.nix
index 3805656dfa0e..96cc16e16f36 100644
--- a/nixpkgs/pkgs/build-support/dhall-to-nix.nix
+++ b/nixpkgs/pkgs/build-support/dhall-to-nix.nix
@@ -15,12 +15,12 @@
     Note that this uses "import from derivation", meaning that Nix will perform
     a build during the evaluation phase if you use this `dhallToNix` utility
 */
-{ stdenv, dhall-nix }:
+{ stdenv, dhall-nix, writeText }:
 
 let
   dhallToNix = code :
     let
-      file = builtins.toFile "dhall-expression" code;
+      file = writeText "dhall-expression" code;
 
       drv = stdenv.mkDerivation {
         name = "dhall-compiled.nix";
diff --git a/nixpkgs/pkgs/build-support/docker/default.nix b/nixpkgs/pkgs/build-support/docker/default.nix
index 4bda09670abf..9e4709dd9bf8 100644
--- a/nixpkgs/pkgs/build-support/docker/default.nix
+++ b/nixpkgs/pkgs/build-support/docker/default.nix
@@ -1,65 +1,63 @@
-{
-  bashInteractive,
-  buildPackages,
-  cacert,
-  callPackage,
-  closureInfo,
-  coreutils,
-  docker,
-  e2fsprogs,
-  fakeroot,
-  findutils,
-  go,
-  jq,
-  jshon,
-  lib,
-  makeWrapper,
-  moreutils,
-  nix,
-  pigz,
-  rsync,
-  runCommand,
-  runtimeShell,
-  shadow,
-  skopeo,
-  storeDir ? builtins.storeDir,
-  substituteAll,
-  symlinkJoin,
-  util-linux,
-  vmTools,
-  writeReferencesToFile,
-  writeScript,
-  writeText,
-  writeTextDir,
-  writePython3,
-  system,  # Note: This is the cross system we're compiling for
+{ bashInteractive
+, buildPackages
+, cacert
+, callPackage
+, closureInfo
+, coreutils
+, e2fsprogs
+, fakeroot
+, findutils
+, go
+, jq
+, jshon
+, lib
+, makeWrapper
+, moreutils
+, nix
+, pigz
+, pkgs
+, rsync
+, runCommand
+, runtimeShell
+, shadow
+, skopeo
+, storeDir ? builtins.storeDir
+, substituteAll
+, symlinkJoin
+, util-linux
+, vmTools
+, writeReferencesToFile
+, writeScript
+, writeText
+, writeTextDir
+, writePython3
+, system
+, # Note: This is the cross system we're compiling for
 }:
 
 let
 
-  inherit (lib)
-    optionals
-    ;
-
-  mkDbExtraCommand = contents: let
-    contentsList = if builtins.isList contents then contents else [ contents ];
-  in ''
-    echo "Generating the nix database..."
-    echo "Warning: only the database of the deepest Nix layer is loaded."
-    echo "         If you want to use nix commands in the container, it would"
-    echo "         be better to only have one layer that contains a nix store."
-
-    export NIX_REMOTE=local?root=$PWD
-    # A user is required by nix
-    # https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
-    export USER=nobody
-    ${buildPackages.nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration
-
-    mkdir -p nix/var/nix/gcroots/docker/
-    for i in ${lib.concatStringsSep " " contentsList}; do
-    ln -s $i nix/var/nix/gcroots/docker/$(basename $i)
-    done;
-  '';
+  mkDbExtraCommand = contents:
+    let
+      contentsList = if builtins.isList contents then contents else [ contents ];
+    in
+    ''
+      echo "Generating the nix database..."
+      echo "Warning: only the database of the deepest Nix layer is loaded."
+      echo "         If you want to use nix commands in the container, it would"
+      echo "         be better to only have one layer that contains a nix store."
+
+      export NIX_REMOTE=local?root=$PWD
+      # A user is required by nix
+      # https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
+      export USER=nobody
+      ${buildPackages.nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration
+
+      mkdir -p nix/var/nix/gcroots/docker/
+      for i in ${lib.concatStringsSep " " contentsList}; do
+      ln -s $i nix/var/nix/gcroots/docker/$(basename $i)
+      done;
+    '';
 
   # The OCI Image specification recommends that configurations use values listed
   # in the Go Language document for GOARCH.
@@ -70,14 +68,14 @@ let
 
 in
 rec {
-
   examples = callPackage ./examples.nix {
     inherit buildImage buildLayeredImage fakeNss pullImage shadowSetup buildImageWithNixDb;
   };
 
-  pullImage = let
-    fixName = name: builtins.replaceStrings ["/" ":"] ["-" "-"] name;
-  in
+  pullImage =
+    let
+      fixName = name: builtins.replaceStrings [ "/" ":" ] [ "-" "-" ] name;
+    in
     { imageName
       # To find the digest of an image, you can use skopeo:
       # see doc/functions.xml
@@ -96,60 +94,46 @@ rec {
     , name ? fixName "docker-image-${finalImageName}-${finalImageTag}.tar"
     }:
 
-    runCommand name {
-      inherit imageDigest;
-      imageName = finalImageName;
-      imageTag = finalImageTag;
-      impureEnvVars = lib.fetchers.proxyImpureEnvVars;
-      outputHashMode = "flat";
-      outputHashAlgo = "sha256";
-      outputHash = sha256;
-
-      nativeBuildInputs = lib.singleton skopeo;
-      SSL_CERT_FILE = "${cacert.out}/etc/ssl/certs/ca-bundle.crt";
-
-      sourceURL = "docker://${imageName}@${imageDigest}";
-      destNameTag = "${finalImageName}:${finalImageTag}";
-    } ''
+    runCommand name
+      {
+        inherit imageDigest;
+        imageName = finalImageName;
+        imageTag = finalImageTag;
+        impureEnvVars = lib.fetchers.proxyImpureEnvVars;
+        outputHashMode = "flat";
+        outputHashAlgo = "sha256";
+        outputHash = sha256;
+
+        nativeBuildInputs = lib.singleton skopeo;
+        SSL_CERT_FILE = "${cacert.out}/etc/ssl/certs/ca-bundle.crt";
+
+        sourceURL = "docker://${imageName}@${imageDigest}";
+        destNameTag = "${finalImageName}:${finalImageTag}";
+      } ''
       skopeo \
-        --src-tls-verify=${lib.boolToString tlsVerify} \
         --insecure-policy \
         --tmpdir=$TMPDIR \
         --override-os ${os} \
         --override-arch ${arch} \
-        copy "$sourceURL" "docker-archive://$out:$destNameTag" \
+        copy \
+        --src-tls-verify=${lib.boolToString tlsVerify} \
+        "$sourceURL" "docker-archive://$out:$destNameTag" \
         | cat  # pipe through cat to force-disable progress bar
     '';
 
   # We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
   # And we cannot untar it, because then we cannot preserve permissions etc.
-  tarsum = runCommand "tarsum" {
-    nativeBuildInputs = [ go ];
-  } ''
-    mkdir tarsum
-    cd tarsum
-
-    cp ${./tarsum.go} tarsum.go
-    export GOPATH=$(pwd)
-    export GOCACHE="$TMPDIR/go-cache"
-    export GO111MODULE=off
-    mkdir -p src/github.com/docker/docker/pkg
-    ln -sT ${docker.moby-src}/pkg/tarsum src/github.com/docker/docker/pkg/tarsum
-    go build
-
-    mkdir -p $out/bin
-
-    cp tarsum $out/bin/
-  '';
+  tarsum = pkgs.tarsum;
 
   # buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
-  mergeDrvs = {
-    derivations,
-    onlyDeps ? false
-  }:
-    runCommand "merge-drvs" {
-      inherit derivations onlyDeps;
-    } ''
+  mergeDrvs =
+    { derivations
+    , onlyDeps ? false
+    }:
+    runCommand "merge-drvs"
+      {
+        inherit derivations onlyDeps;
+      } ''
       if [[ -n "$onlyDeps" ]]; then
         echo $derivations > $out
         exit 0
@@ -197,104 +181,105 @@ rec {
   '';
 
   # Run commands in a virtual machine.
-  runWithOverlay = {
-    name,
-    fromImage ? null,
-    fromImageName ? null,
-    fromImageTag ? null,
-    diskSize ? 1024,
-    preMount ? "",
-    postMount ? "",
-    postUmount ? ""
-  }:
-  let
-    result = vmTools.runInLinuxVM (
-      runCommand name {
-        preVM = vmTools.createEmptyImage {
-          size = diskSize;
-          fullName = "docker-run-disk";
-        };
-        inherit fromImage fromImageName fromImageTag;
-
-        nativeBuildInputs = [ util-linux e2fsprogs jshon rsync jq ];
-      } ''
-      mkdir disk
-      mkfs /dev/${vmTools.hd}
-      mount /dev/${vmTools.hd} disk
-      cd disk
-
-      if [[ -n "$fromImage" ]]; then
-        echo "Unpacking base image..."
-        mkdir image
-        tar -C image -xpf "$fromImage"
-
-        if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
-          parentID="$(
-            cat "image/manifest.json" |
-              jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
-                --arg desiredTag "$fromImageName:$fromImageTag"
-          )"
-        else
-          echo "From-image name or tag wasn't set. Reading the first ID."
-          parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
-        fi
+  runWithOverlay =
+    { name
+    , fromImage ? null
+    , fromImageName ? null
+    , fromImageTag ? null
+    , diskSize ? 1024
+    , preMount ? ""
+    , postMount ? ""
+    , postUmount ? ""
+    }:
+    let
+      result = vmTools.runInLinuxVM (
+        runCommand name
+          {
+            preVM = vmTools.createEmptyImage {
+              size = diskSize;
+              fullName = "docker-run-disk";
+            };
+            inherit fromImage fromImageName fromImageTag;
+
+            nativeBuildInputs = [ util-linux e2fsprogs jshon rsync jq ];
+          } ''
+          mkdir disk
+          mkfs /dev/${vmTools.hd}
+          mount /dev/${vmTools.hd} disk
+          cd disk
+
+          if [[ -n "$fromImage" ]]; then
+            echo "Unpacking base image..."
+            mkdir image
+            tar -C image -xpf "$fromImage"
+
+            if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
+              parentID="$(
+                cat "image/manifest.json" |
+                  jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
+                    --arg desiredTag "$fromImageName:$fromImageTag"
+              )"
+            else
+              echo "From-image name or tag wasn't set. Reading the first ID."
+              parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
+            fi
 
-        cat ./image/manifest.json  | jq -r '.[0].Layers | .[]' > layer-list
-      else
-        touch layer-list
-      fi
+            cat ./image/manifest.json  | jq -r '.[0].Layers | .[]' > layer-list
+          else
+            touch layer-list
+          fi
 
-      # Unpack all of the parent layers into the image.
-      lowerdir=""
-      extractionID=0
-      for layerTar in $(tac layer-list); do
-        echo "Unpacking layer $layerTar"
-        extractionID=$((extractionID + 1))
+          # Unpack all of the parent layers into the image.
+          lowerdir=""
+          extractionID=0
+          for layerTar in $(tac layer-list); do
+            echo "Unpacking layer $layerTar"
+            extractionID=$((extractionID + 1))
 
-        mkdir -p image/$extractionID/layer
-        tar -C image/$extractionID/layer -xpf image/$layerTar
-        rm image/$layerTar
+            mkdir -p image/$extractionID/layer
+            tar -C image/$extractionID/layer -xpf image/$layerTar
+            rm image/$layerTar
 
-        find image/$extractionID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
+            find image/$extractionID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
 
-        # Get the next lower directory and continue the loop.
-        lowerdir=image/$extractionID/layer''${lowerdir:+:}$lowerdir
-      done
+            # Get the next lower directory and continue the loop.
+            lowerdir=image/$extractionID/layer''${lowerdir:+:}$lowerdir
+          done
 
-      mkdir work
-      mkdir layer
-      mkdir mnt
+          mkdir work
+          mkdir layer
+          mkdir mnt
 
-      ${lib.optionalString (preMount != "") ''
-        # Execute pre-mount steps
-        echo "Executing pre-mount steps..."
-        ${preMount}
-      ''}
+          ${lib.optionalString (preMount != "") ''
+            # Execute pre-mount steps
+            echo "Executing pre-mount steps..."
+            ${preMount}
+          ''}
 
-      if [ -n "$lowerdir" ]; then
-        mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
-      else
-        mount --bind layer mnt
-      fi
+          if [ -n "$lowerdir" ]; then
+            mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
+          else
+            mount --bind layer mnt
+          fi
 
-      ${lib.optionalString (postMount != "") ''
-        # Execute post-mount steps
-        echo "Executing post-mount steps..."
-        ${postMount}
-      ''}
+          ${lib.optionalString (postMount != "") ''
+            # Execute post-mount steps
+            echo "Executing post-mount steps..."
+            ${postMount}
+          ''}
 
-      umount mnt
+          umount mnt
 
-      (
-        cd layer
-        cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"'
-        find . -type c -exec bash -c "$cmd" \;
-      )
+          (
+            cd layer
+            cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"'
+            find . -type c -exec bash -c "$cmd" \;
+          )
 
-      ${postUmount}
-      '');
+          ${postUmount}
+        '');
     in
-    runCommand name {} ''
+    runCommand name { } ''
       mkdir -p $out
       cd ${result}
       cp layer.tar json VERSION $out
@@ -310,7 +295,6 @@ rec {
       '';
     };
 
-
   # Create an executable shell script which has the coreutils in its
   # PATH. Since root scripts are executed in a blank environment, even
   # things like `ls` or `echo` will be missing.
@@ -323,94 +307,100 @@ rec {
     '';
 
   # Create a "layer" (set of files).
-  mkPureLayer = {
-    # Name of the layer
-    name,
-    # JSON containing configuration and metadata for this layer.
-    baseJson,
-    # Files to add to the layer.
-    contents ? null,
-    # When copying the contents into the image, preserve symlinks to
-    # directories (see `rsync -K`).  Otherwise, transform those symlinks
-    # into directories.
-    keepContentsDirlinks ? false,
-    # Additional commands to run on the layer before it is tar'd up.
-    extraCommands ? "", uid ? 0, gid ? 0
-  }:
-    runCommand "docker-layer-${name}" {
-      inherit baseJson contents extraCommands;
-      nativeBuildInputs = [ jshon rsync tarsum ];
-    }
-    ''
-      mkdir layer
-      if [[ -n "$contents" ]]; then
-        echo "Adding contents..."
-        for item in $contents; do
-          echo "Adding $item"
-          rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
-        done
-      else
-        echo "No contents to add to layer."
-      fi
+  mkPureLayer =
+    {
+      # Name of the layer
+      name
+    , # JSON containing configuration and metadata for this layer.
+      baseJson
+    , # Files to add to the layer.
+      contents ? null
+    , # When copying the contents into the image, preserve symlinks to
+      # directories (see `rsync -K`).  Otherwise, transform those symlinks
+      # into directories.
+      keepContentsDirlinks ? false
+    , # Additional commands to run on the layer before it is tar'd up.
+      extraCommands ? ""
+    , uid ? 0
+    , gid ? 0
+    }:
+    runCommand "docker-layer-${name}"
+      {
+        inherit baseJson contents extraCommands;
+        nativeBuildInputs = [ jshon rsync tarsum ];
+      }
+      ''
+        mkdir layer
+        if [[ -n "$contents" ]]; then
+          echo "Adding contents..."
+          for item in $contents; do
+            echo "Adding $item"
+            rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
+          done
+        else
+          echo "No contents to add to layer."
+        fi
 
-      chmod ug+w layer
+        chmod ug+w layer
 
-      if [[ -n "$extraCommands" ]]; then
-        (cd layer; eval "$extraCommands")
-      fi
+        if [[ -n "$extraCommands" ]]; then
+          (cd layer; eval "$extraCommands")
+        fi
 
-      # Tar up the layer and throw it into 'layer.tar'.
-      echo "Packing layer..."
-      mkdir $out
-      tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee -p $out/layer.tar | tarsum)
+        # Tar up the layer and throw it into 'layer.tar'.
+        echo "Packing layer..."
+        mkdir $out
+        tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee -p $out/layer.tar | tarsum)
 
-      # Add a 'checksum' field to the JSON, with the value set to the
-      # checksum of the tarball.
-      cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
+        # Add a 'checksum' field to the JSON, with the value set to the
+        # checksum of the tarball.
+        cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
 
-      # Indicate to docker that we're using schema version 1.0.
-      echo -n "1.0" > $out/VERSION
+        # Indicate to docker that we're using schema version 1.0.
+        echo -n "1.0" > $out/VERSION
 
-      echo "Finished building layer '${name}'"
-    '';
+        echo "Finished building layer '${name}'"
+      '';
 
   # Make a "root" layer; required if we need to execute commands as a
   # privileged user on the image. The commands themselves will be
   # performed in a virtual machine sandbox.
-  mkRootLayer = {
-    # Name of the image.
-    name,
-    # Script to run as root. Bash.
-    runAsRoot,
-    # Files to add to the layer. If null, an empty layer will be created.
-    contents ? null,
-    # When copying the contents into the image, preserve symlinks to
-    # directories (see `rsync -K`).  Otherwise, transform those symlinks
-    # into directories.
-    keepContentsDirlinks ? false,
-    # JSON containing configuration and metadata for this layer.
-    baseJson,
-    # Existing image onto which to append the new layer.
-    fromImage ? null,
-    # Name of the image we're appending onto.
-    fromImageName ? null,
-    # Tag of the image we're appending onto.
-    fromImageTag ? null,
-    # How much disk to allocate for the temporary virtual machine.
-    diskSize ? 1024,
-    # Commands (bash) to run on the layer; these do not require sudo.
-    extraCommands ? ""
-  }:
+  mkRootLayer =
+    {
+      # Name of the image.
+      name
+    , # Script to run as root. Bash.
+      runAsRoot
+    , # Files to add to the layer. If null, an empty layer will be created.
+      contents ? null
+    , # When copying the contents into the image, preserve symlinks to
+      # directories (see `rsync -K`).  Otherwise, transform those symlinks
+      # into directories.
+      keepContentsDirlinks ? false
+    , # JSON containing configuration and metadata for this layer.
+      baseJson
+    , # Existing image onto which to append the new layer.
+      fromImage ? null
+    , # Name of the image we're appending onto.
+      fromImageName ? null
+    , # Tag of the image we're appending onto.
+      fromImageTag ? null
+    , # How much disk to allocate for the temporary virtual machine.
+      diskSize ? 1024
+    , # Commands (bash) to run on the layer; these do not require sudo.
+      extraCommands ? ""
+    }:
     # Generate an executable script from the `runAsRoot` text.
     let
       runAsRootScript = shellScript "run-as-root.sh" runAsRoot;
       extraCommandsScript = shellScript "extra-commands.sh" extraCommands;
-    in runWithOverlay {
+    in
+    runWithOverlay {
       name = "docker-layer-${name}";
 
       inherit fromImage fromImageName fromImageTag diskSize;
 
-      preMount = lib.optionalString (contents != null && contents != []) ''
+      preMount = lib.optionalString (contents != null && contents != [ ]) ''
         echo "Adding contents..."
         for item in ${toString contents}; do
           echo "Adding $item..."
@@ -462,11 +452,12 @@ rec {
       '';
     };
 
-  buildLayeredImage = {name, ...}@args:
+  buildLayeredImage = { name, ... }@args:
     let
       stream = streamLayeredImage args;
     in
-      runCommand "${baseNameOf name}.tar.gz" {
+    runCommand "${baseNameOf name}.tar.gz"
+      {
         inherit (stream) imageName;
         passthru = { inherit (stream) imageTag; };
         nativeBuildInputs = [ pigz ];
@@ -478,40 +469,45 @@ rec {
   # 4. compute the layer id
   # 5. put the layer in the image
   # 6. repack the image
-  buildImage = args@{
-    # Image name.
-    name,
-    # Image tag, when null then the nix output hash will be used.
-    tag ? null,
-    # Parent image, to append to.
-    fromImage ? null,
-    # Name of the parent image; will be read from the image otherwise.
-    fromImageName ? null,
-    # Tag of the parent image; will be read from the image otherwise.
-    fromImageTag ? null,
-    # Files to put on the image (a nix store path or list of paths).
-    contents ? null,
-    # When copying the contents into the image, preserve symlinks to
-    # directories (see `rsync -K`).  Otherwise, transform those symlinks
-    # into directories.
-    keepContentsDirlinks ? false,
-    # Docker config; e.g. what command to run on the container.
-    config ? null,
-    # Optional bash script to run on the files prior to fixturizing the layer.
-    extraCommands ? "", uid ? 0, gid ? 0,
-    # Optional bash script to run as root on the image when provisioning.
-    runAsRoot ? null,
-    # Size of the virtual machine disk to provision when building the image.
-    diskSize ? 1024,
-    # Time of creation of the image.
-    created ? "1970-01-01T00:00:01Z",
-  }:
+  buildImage =
+    args@{
+      # Image name.
+      name
+    , # Image tag, when null then the nix output hash will be used.
+      tag ? null
+    , # Parent image, to append to.
+      fromImage ? null
+    , # Name of the parent image; will be read from the image otherwise.
+      fromImageName ? null
+    , # Tag of the parent image; will be read from the image otherwise.
+      fromImageTag ? null
+    , # Files to put on the image (a nix store path or list of paths).
+      contents ? null
+    , # When copying the contents into the image, preserve symlinks to
+      # directories (see `rsync -K`).  Otherwise, transform those symlinks
+      # into directories.
+      keepContentsDirlinks ? false
+    , # Docker config; e.g. what command to run on the container.
+      config ? null
+    , # Optional bash script to run on the files prior to fixturizing the layer.
+      extraCommands ? ""
+    , uid ? 0
+    , gid ? 0
+    , # Optional bash script to run as root on the image when provisioning.
+      runAsRoot ? null
+    , # Size of the virtual machine disk to provision when building the image.
+      diskSize ? 1024
+    , # Time of creation of the image.
+      created ? "1970-01-01T00:00:01Z"
+    ,
+    }:
 
     let
       baseName = baseNameOf name;
 
       # Create a JSON blob of the configuration. Set the date to unix zero.
-      baseJson = let
+      baseJson =
+        let
           pure = writeText "${baseName}-config.json" (builtins.toJSON {
             inherit created config;
             architecture = defaultArch;
@@ -520,38 +516,41 @@ rec {
           impure = runCommand "${baseName}-config.json"
             { nativeBuildInputs = [ jq ]; }
             ''
-               jq ".created = \"$(TZ=utc date --iso-8601="seconds")\"" ${pure} > $out
+              jq ".created = \"$(TZ=utc date --iso-8601="seconds")\"" ${pure} > $out
             '';
-        in if created == "now" then impure else pure;
+        in
+        if created == "now" then impure else pure;
 
       layer =
         if runAsRoot == null
-        then mkPureLayer {
-          name = baseName;
-          inherit baseJson contents keepContentsDirlinks extraCommands uid gid;
-        } else mkRootLayer {
-          name = baseName;
-          inherit baseJson fromImage fromImageName fromImageTag
-                  contents keepContentsDirlinks runAsRoot diskSize
-                  extraCommands;
-        };
-      result = runCommand "docker-image-${baseName}.tar.gz" {
-        nativeBuildInputs = [ jshon pigz coreutils findutils jq moreutils ];
-        # Image name must be lowercase
-        imageName = lib.toLower name;
-        imageTag = if tag == null then "" else tag;
-        inherit fromImage baseJson;
-        layerClosure = writeReferencesToFile layer;
-        passthru.buildArgs = args;
-        passthru.layer = layer;
-        passthru.imageTag =
-          if tag != null
+        then
+          mkPureLayer
+            {
+              name = baseName;
+              inherit baseJson contents keepContentsDirlinks extraCommands uid gid;
+            } else
+          mkRootLayer {
+            name = baseName;
+            inherit baseJson fromImage fromImageName fromImageTag
+              contents keepContentsDirlinks runAsRoot diskSize
+              extraCommands;
+          };
+      result = runCommand "docker-image-${baseName}.tar.gz"
+        {
+          nativeBuildInputs = [ jshon pigz coreutils findutils jq moreutils ];
+          # Image name must be lowercase
+          imageName = lib.toLower name;
+          imageTag = if tag == null then "" else tag;
+          inherit fromImage baseJson;
+          layerClosure = writeReferencesToFile layer;
+          passthru.buildArgs = args;
+          passthru.layer = layer;
+          passthru.imageTag =
+            if tag != null
             then tag
             else
               lib.head (lib.strings.splitString "-" (baseNameOf result.outPath));
-        # Docker can't be made to run darwin binaries
-        meta.badPlatforms = lib.platforms.darwin;
-      } ''
+        } ''
         ${lib.optionalString (tag == null) ''
           outName="$(basename "$out")"
           outHash=$(echo "$outName" | cut -d - -f 1)
@@ -751,14 +750,24 @@ rec {
         root:x:0:
         nobody:x:65534:
       '')
-      (runCommand "var-empty" {} ''
+      (writeTextDir "etc/nsswitch.conf" ''
+        hosts: files dns
+      '')
+      (runCommand "var-empty" { } ''
         mkdir -p $out/var/empty
       '')
     ];
   };
 
+  # This provides a /usr/bin/env, for shell scripts using the
+  # "#!/usr/bin/env executable" shebang.
+  usrBinEnv = runCommand "usr-bin-env" { } ''
+    mkdir -p $out/usr/bin
+    ln -s ${pkgs.coreutils}/bin/env $out/usr/bin
+  '';
+
   # This provides /bin/sh, pointing to bashInteractive.
-  binSh = runCommand "bin-sh" {} ''
+  binSh = runCommand "bin-sh" { } ''
     mkdir -p $out/bin
     ln -s ${bashInteractive}/bin/bash $out/bin/sh
   '';
@@ -779,189 +788,194 @@ rec {
     })
   );
 
-  streamLayeredImage = {
-    # Image Name
-    name,
-    # Image tag, the Nix's output hash will be used if null
-    tag ? null,
-    # Parent image, to append to.
-    fromImage ? null,
-    # Files to put on the image (a nix store path or list of paths).
-    contents ? [],
-    # Docker config; e.g. what command to run on the container.
-    config ? {},
-    # Time of creation of the image. Passing "now" will make the
-    # created date be the time of building.
-    created ? "1970-01-01T00:00:01Z",
-    # Optional bash script to run on the files prior to fixturizing the layer.
-    extraCommands ? "",
-    # Optional bash script to run inside fakeroot environment.
-    # Could be used for changing ownership of files in customisation layer.
-    fakeRootCommands ? "",
-    # We pick 100 to ensure there is plenty of room for extension. I
-    # believe the actual maximum is 128.
-    maxLayers ? 100,
-    # Whether to include store paths in the image. You generally want to leave
-    # this on, but tooling may disable this to insert the store paths more
-    # efficiently via other means, such as bind mounting the host store.
-    includeStorePaths ? true,
-  }:
-    assert
+  streamLayeredImage =
+    {
+      # Image Name
+      name
+    , # Image tag, the Nix's output hash will be used if null
+      tag ? null
+    , # Parent image, to append to.
+      fromImage ? null
+    , # Files to put on the image (a nix store path or list of paths).
+      contents ? [ ]
+    , # Docker config; e.g. what command to run on the container.
+      config ? { }
+    , # Time of creation of the image. Passing "now" will make the
+      # created date be the time of building.
+      created ? "1970-01-01T00:00:01Z"
+    , # Optional bash script to run on the files prior to fixturizing the layer.
+      extraCommands ? ""
+    , # Optional bash script to run inside fakeroot environment.
+      # Could be used for changing ownership of files in customisation layer.
+      fakeRootCommands ? ""
+    , # We pick 100 to ensure there is plenty of room for extension. I
+      # believe the actual maximum is 128.
+      maxLayers ? 100
+    , # Whether to include store paths in the image. You generally want to leave
+      # this on, but tooling may disable this to insert the store paths more
+      # efficiently via other means, such as bind mounting the host store.
+      includeStorePaths ? true
+    ,
+    }:
+      assert
       (lib.assertMsg (maxLayers > 1)
-      "the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})");
-    let
-      baseName = baseNameOf name;
-
-      streamScript = writePython3 "stream" {} ./stream_layered_image.py;
-      baseJson = writeText "${baseName}-base.json" (builtins.toJSON {
-         inherit config;
-         architecture = defaultArch;
-         os = "linux";
-      });
-
-      contentsList = if builtins.isList contents then contents else [ contents ];
-
-      # We store the customisation layer as a tarball, to make sure that
-      # things like permissions set on 'extraCommands' are not overriden
-      # by Nix. Then we precompute the sha256 for performance.
-      customisationLayer = symlinkJoin {
-        name = "${baseName}-customisation-layer";
-        paths = contentsList;
-        inherit extraCommands fakeRootCommands;
-        nativeBuildInputs = [ fakeroot ];
-        postBuild = ''
-          mv $out old_out
-          (cd old_out; eval "$extraCommands" )
-
-          mkdir $out
-
-          fakeroot bash -c '
-            source $stdenv/setup
-            cd old_out
-            eval "$fakeRootCommands"
-            tar \
-              --sort name \
-              --numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
-              --hard-dereference \
-              -cf $out/layer.tar .
-          '
-
-          sha256sum $out/layer.tar \
-            | cut -f 1 -d ' ' \
-            > $out/checksum
-        '';
-      };
-
-      closureRoots = optionals includeStorePaths /* normally true */ (
-        [ baseJson ] ++ contentsList
-      );
-      overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots);
-
-      # These derivations are only created as implementation details of docker-tools,
-      # so they'll be excluded from the created images.
-      unnecessaryDrvs = [ baseJson overallClosure ];
-
-      conf = runCommand "${baseName}-conf.json" {
-        inherit fromImage maxLayers created;
-        imageName = lib.toLower name;
-        passthru.imageTag =
-          if tag != null
-            then tag
-            else
-              lib.head (lib.strings.splitString "-" (baseNameOf conf.outPath));
-        paths = buildPackages.referencesByPopularity overallClosure;
-        nativeBuildInputs = [ jq ];
-      } ''
-        ${if (tag == null) then ''
-          outName="$(basename "$out")"
-          outHash=$(echo "$outName" | cut -d - -f 1)
-
-          imageTag=$outHash
-        '' else ''
-          imageTag="${tag}"
-        ''}
+        "the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})");
+      let
+        baseName = baseNameOf name;
+
+        streamScript = writePython3 "stream" { } ./stream_layered_image.py;
+        baseJson = writeText "${baseName}-base.json" (builtins.toJSON {
+          inherit config;
+          architecture = defaultArch;
+          os = "linux";
+        });
+
+        contentsList = if builtins.isList contents then contents else [ contents ];
+
+        # We store the customisation layer as a tarball, to make sure that
+        # things like permissions set on 'extraCommands' are not overriden
+        # by Nix. Then we precompute the sha256 for performance.
+        customisationLayer = symlinkJoin {
+          name = "${baseName}-customisation-layer";
+          paths = contentsList;
+          inherit extraCommands fakeRootCommands;
+          nativeBuildInputs = [ fakeroot ];
+          postBuild = ''
+            mv $out old_out
+            (cd old_out; eval "$extraCommands" )
+
+            mkdir $out
+
+            fakeroot bash -c '
+              source $stdenv/setup
+              cd old_out
+              eval "$fakeRootCommands"
+              tar \
+                --sort name \
+                --numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
+                --hard-dereference \
+                -cf $out/layer.tar .
+            '
+
+            sha256sum $out/layer.tar \
+              | cut -f 1 -d ' ' \
+              > $out/checksum
+          '';
+        };
 
-        # convert "created" to iso format
-        if [[ "$created" != "now" ]]; then
-            created="$(date -Iseconds -d "$created")"
-        fi
+        closureRoots = lib.optionals includeStorePaths /* normally true */ (
+          [ baseJson ] ++ contentsList
+        );
+        overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots);
+
+        # These derivations are only created as implementation details of docker-tools,
+        # so they'll be excluded from the created images.
+        unnecessaryDrvs = [ baseJson overallClosure ];
+
+        conf = runCommand "${baseName}-conf.json"
+          {
+            inherit fromImage maxLayers created;
+            imageName = lib.toLower name;
+            passthru.imageTag =
+              if tag != null
+              then tag
+              else
+                lib.head (lib.strings.splitString "-" (baseNameOf conf.outPath));
+            paths = buildPackages.referencesByPopularity overallClosure;
+            nativeBuildInputs = [ jq ];
+          } ''
+          ${if (tag == null) then ''
+            outName="$(basename "$out")"
+            outHash=$(echo "$outName" | cut -d - -f 1)
+
+            imageTag=$outHash
+          '' else ''
+            imageTag="${tag}"
+          ''}
+
+          # convert "created" to iso format
+          if [[ "$created" != "now" ]]; then
+              created="$(date -Iseconds -d "$created")"
+          fi
 
-        paths() {
-          cat $paths ${lib.concatMapStringsSep " "
-                         (path: "| (grep -v ${path} || true)")
-                         unnecessaryDrvs}
-        }
+          paths() {
+            cat $paths ${lib.concatMapStringsSep " "
+                           (path: "| (grep -v ${path} || true)")
+                           unnecessaryDrvs}
+          }
 
-        # Compute the number of layers that are already used by a potential
-        # 'fromImage' as well as the customization layer. Ensure that there is
-        # still at least one layer available to store the image contents.
-        usedLayers=0
+          # Compute the number of layers that are already used by a potential
+          # 'fromImage' as well as the customization layer. Ensure that there is
+          # still at least one layer available to store the image contents.
+          usedLayers=0
 
-        # subtract number of base image layers
-        if [[ -n "$fromImage" ]]; then
-          (( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
-        fi
+          # subtract number of base image layers
+          if [[ -n "$fromImage" ]]; then
+            (( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
+          fi
 
-        # one layer will be taken up by the customisation layer
-        (( usedLayers += 1 ))
+          # one layer will be taken up by the customisation layer
+          (( usedLayers += 1 ))
 
-        if ! (( $usedLayers < $maxLayers )); then
-          echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
-                    "'extraCommands', but only maxLayers=$maxLayers were" \
-                    "allowed. At least 1 layer is required to store contents."
-          exit 1
-        fi
-        availableLayers=$(( maxLayers - usedLayers ))
+          if ! (( $usedLayers < $maxLayers )); then
+            echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
+                      "'extraCommands', but only maxLayers=$maxLayers were" \
+                      "allowed. At least 1 layer is required to store contents."
+            exit 1
+          fi
+          availableLayers=$(( maxLayers - usedLayers ))
+
+          # Create $maxLayers worth of Docker Layers, one layer per store path
+          # unless there are more paths than $maxLayers. In that case, create
+          # $maxLayers-1 for the most popular layers, and smush the remainaing
+          # store paths in to one final layer.
+          #
+          # The following code is fiddly w.r.t. ensuring every layer is
+          # created, and that no paths are missed. If you change the
+          # following lines, double-check that your code behaves properly
+          # when the number of layers equals:
+          #      maxLayers-1, maxLayers, and maxLayers+1, 0
+          store_layers="$(
+            paths |
+              jq -sR '
+                rtrimstr("\n") | split("\n")
+                  | (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
+                  | map(select(length > 0))
+              ' \
+                --argjson maxLayers "$availableLayers"
+          )"
 
-        # Create $maxLayers worth of Docker Layers, one layer per store path
-        # unless there are more paths than $maxLayers. In that case, create
-        # $maxLayers-1 for the most popular layers, and smush the remainaing
-        # store paths in to one final layer.
-        #
-        # The following code is fiddly w.r.t. ensuring every layer is
-        # created, and that no paths are missed. If you change the
-        # following lines, double-check that your code behaves properly
-        # when the number of layers equals:
-        #      maxLayers-1, maxLayers, and maxLayers+1, 0
-        store_layers="$(
-          paths |
-            jq -sR '
-              rtrimstr("\n") | split("\n")
-                | (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
-                | map(select(length > 0))
-            ' \
-              --argjson maxLayers "$availableLayers"
-        )"
-
-        cat ${baseJson} | jq '
-          . + {
-            "store_dir": $store_dir,
-            "from_image": $from_image,
-            "store_layers": $store_layers,
-            "customisation_layer", $customisation_layer,
-            "repo_tag": $repo_tag,
-            "created": $created
-          }
-          ' --arg store_dir "${storeDir}" \
-            --argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
-            --argjson store_layers "$store_layers" \
-            --arg customisation_layer ${customisationLayer} \
-            --arg repo_tag "$imageName:$imageTag" \
-            --arg created "$created" |
-          tee $out
-      '';
-      result = runCommand "stream-${baseName}" {
-        inherit (conf) imageName;
-        passthru = {
-          inherit (conf) imageTag;
-
-          # Distinguish tarballs and exes at the Nix level so functions that
-          # take images can know in advance how the image is supposed to be used.
-          isExe = true;
-        };
-        nativeBuildInputs = [ makeWrapper ];
-      } ''
-        makeWrapper ${streamScript} $out --add-flags ${conf}
-      '';
-    in result;
+          cat ${baseJson} | jq '
+            . + {
+              "store_dir": $store_dir,
+              "from_image": $from_image,
+              "store_layers": $store_layers,
+              "customisation_layer", $customisation_layer,
+              "repo_tag": $repo_tag,
+              "created": $created
+            }
+            ' --arg store_dir "${storeDir}" \
+              --argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
+              --argjson store_layers "$store_layers" \
+              --arg customisation_layer ${customisationLayer} \
+              --arg repo_tag "$imageName:$imageTag" \
+              --arg created "$created" |
+            tee $out
+        '';
+        result = runCommand "stream-${baseName}"
+          {
+            inherit (conf) imageName;
+            passthru = {
+              inherit (conf) imageTag;
+
+              # Distinguish tarballs and exes at the Nix level so functions that
+              # take images can know in advance how the image is supposed to be used.
+              isExe = true;
+            };
+            nativeBuildInputs = [ makeWrapper ];
+          } ''
+          makeWrapper ${streamScript} $out --add-flags ${conf}
+        '';
+      in
+      result;
 }
diff --git a/nixpkgs/pkgs/build-support/docker/examples.nix b/nixpkgs/pkgs/build-support/docker/examples.nix
index f890d0a77a26..c66aca56fea0 100644
--- a/nixpkgs/pkgs/build-support/docker/examples.nix
+++ b/nixpkgs/pkgs/build-support/docker/examples.nix
@@ -463,7 +463,7 @@ rec {
   layeredStoreSymlink =
   let
     target = pkgs.writeTextDir "dir/target" "Content doesn't matter.";
-    symlink = pkgs.runCommandNoCC "symlink" {} "ln -s ${target} $out";
+    symlink = pkgs.runCommand "symlink" {} "ln -s ${target} $out";
   in
     pkgs.dockerTools.buildLayeredImage {
       name = "layeredstoresymlink";
diff --git a/nixpkgs/pkgs/build-support/docker/tarsum.nix b/nixpkgs/pkgs/build-support/docker/tarsum.nix
new file mode 100644
index 000000000000..734c6b3d5aeb
--- /dev/null
+++ b/nixpkgs/pkgs/build-support/docker/tarsum.nix
@@ -0,0 +1,42 @@
+{ stdenv, go, docker, nixosTests }:
+
+stdenv.mkDerivation {
+  name = "tarsum";
+
+  nativeBuildInputs = [ go ];
+  disallowedReferences = [ go ];
+
+  dontUnpack = true;
+
+  CGO_ENABLED = 0;
+  GOFLAGS = "-trimpath";
+  GO111MODULE = "off";
+
+  buildPhase = ''
+    runHook preBuild
+    mkdir tarsum
+    cd tarsum
+    cp ${./tarsum.go} tarsum.go
+    export GOPATH=$(pwd)
+    export GOCACHE="$TMPDIR/go-cache"
+    mkdir -p src/github.com/docker/docker/pkg
+    ln -sT ${docker.moby-src}/pkg/tarsum src/github.com/docker/docker/pkg/tarsum
+    go build
+    runHook postBuild
+  '';
+
+  installPhase = ''
+    runHook preInstall
+    mkdir -p $out/bin
+    cp tarsum $out/bin/
+    runHook postInstall
+  '';
+
+  passthru = {
+    tests = {
+      dockerTools = nixosTests.docker-tools;
+    };
+  };
+
+  meta.platforms = go.meta.platforms;
+}
diff --git a/nixpkgs/pkgs/build-support/emacs/elpa.nix b/nixpkgs/pkgs/build-support/emacs/elpa.nix
index 08257ff25425..f7027dc499d8 100644
--- a/nixpkgs/pkgs/build-support/emacs/elpa.nix
+++ b/nixpkgs/pkgs/build-support/emacs/elpa.nix
@@ -21,7 +21,7 @@ in
 
 import ./generic.nix { inherit lib stdenv emacs texinfo writeText gcc; } ({
 
-  phases = "installPhase fixupPhase distPhase";
+  dontUnpack = true;
 
   installPhase = ''
     runHook preInstall
diff --git a/nixpkgs/pkgs/build-support/emacs/wrapper.nix b/nixpkgs/pkgs/build-support/emacs/wrapper.nix
index 6b53f3fdd954..ccbd58485ea8 100644
--- a/nixpkgs/pkgs/build-support/emacs/wrapper.nix
+++ b/nixpkgs/pkgs/build-support/emacs/wrapper.nix
@@ -27,7 +27,7 @@ let customEmacsPackages =
         # use the unstable MELPA version of magit
         magit = self.melpaPackages.magit;
       });
-in customEmacsPackages.emacs.pkgs.withPackages (epkgs: [ epkgs.evil epkgs.magit ])
+in customEmacsPackages.withPackages (epkgs: [ epkgs.evil epkgs.magit ])
 ```
 
 */
diff --git a/nixpkgs/pkgs/build-support/fetchgit/nix-prefetch-git b/nixpkgs/pkgs/build-support/fetchgit/nix-prefetch-git
index 8110d670e41c..6e869ab5e437 100755
--- a/nixpkgs/pkgs/build-support/fetchgit/nix-prefetch-git
+++ b/nixpkgs/pkgs/build-support/fetchgit/nix-prefetch-git
@@ -300,7 +300,9 @@ clone_user_rev() {
     local rev="${3:-HEAD}"
 
     if [ -n "$fetchLFS" ]; then
-        HOME=$TMPDIR
+        tmpHomePath="$(mktemp -d -p nix-prefetch-git-tmp-home-XXXXXXXXXX)"
+        exit_handlers+=(remove_tmpHomePath)
+        HOME="$tmpHomePath"
         git lfs install
     fi
 
@@ -396,6 +398,7 @@ print_results() {
   "date": "$(json_escape "$commitDateStrict8601")",
   "path": "$(json_escape "$finalPath")",
   "$(json_escape "$hashType")": "$(json_escape "$hash")",
+  "fetchLFS": $([[ -n "$fetchLFS" ]] && echo true || echo false),
   "fetchSubmodules": $([[ -n "$fetchSubmodules" ]] && echo true || echo false),
   "deepClone": $([[ -n "$deepClone" ]] && echo true || echo false),
   "leaveDotGit": $([[ -n "$leaveDotGit" ]] && echo true || echo false)
@@ -408,6 +411,10 @@ remove_tmpPath() {
     rm -rf "$tmpPath"
 }
 
+remove_tmpHomePath() {
+    rm -rf "$tmpHomePath"
+}
+
 if test -n "$QUIET"; then
     quiet_mode
 fi
diff --git a/nixpkgs/pkgs/build-support/fetchgitlab/default.nix b/nixpkgs/pkgs/build-support/fetchgitlab/default.nix
index 77512510a7c6..5b9dbd71c595 100644
--- a/nixpkgs/pkgs/build-support/fetchgitlab/default.nix
+++ b/nixpkgs/pkgs/build-support/fetchgitlab/default.nix
@@ -1,22 +1,26 @@
-{ fetchzip, lib }:
+{ fetchgit, fetchzip, lib }:
 
 # gitlab example
 { owner, repo, rev, domain ? "gitlab.com", name ? "source", group ? null
+, fetchSubmodules ? false, leaveDotGit ? false, deepClone ? false
 , ... # For hash agility
 } @ args:
 
-with lib;
-
 let
-  slug = concatStringsSep "/"
-    ((optional (group != null) group) ++ [ owner repo ]);
+  slug = lib.concatStringsSep "/" ((lib.optional (group != null) group) ++ [ owner repo ]);
+  escapedSlug = lib.replaceStrings [ "." "/" ] [ "%2E" "%2F" ] slug;
+  escapedRev = lib.replaceStrings [ "+" "%" "/" ] [ "%2B" "%25" "%2F" ] rev;
+  passthruAttrs = removeAttrs args [ "domain" "owner" "group" "repo" "rev" ];
+
+  useFetchGit = deepClone || fetchSubmodules || leaveDotGit;
+  fetcher = if useFetchGit then fetchgit else fetchzip;
 
-  escapedSlug = replaceStrings ["." "/"] ["%2E" "%2F"] slug;
-  escapedRev = replaceStrings ["+" "%" "/"] ["%2B" "%25" "%2F"] rev;
+  fetcherArgs = (if useFetchGit then {
+    inherit rev deepClone fetchSubmodules leaveDotGit;
+    url = "https://${domain}/${slug}.git";
+  } else {
+    url = "https://${domain}/api/v4/projects/${escapedSlug}/repository/archive.tar.gz?sha=${escapedRev}";
+  }) // passthruAttrs // { inherit name; };
 in
 
-fetchzip ({
-  inherit name;
-  url = "https://${domain}/api/v4/projects/${escapedSlug}/repository/archive.tar.gz?sha=${escapedRev}";
-  meta.homepage = "https://${domain}/${slug}/";
-} // removeAttrs args [ "domain" "owner" "group" "repo" "rev" ]) // { inherit rev; }
+fetcher fetcherArgs // { meta.homepage = "https://${domain}/${slug}/"; inherit rev; }
diff --git a/nixpkgs/pkgs/build-support/fetchgx/default.nix b/nixpkgs/pkgs/build-support/fetchgx/default.nix
index 3ccf5d273fc5..93f60c0a9cac 100644
--- a/nixpkgs/pkgs/build-support/fetchgx/default.nix
+++ b/nixpkgs/pkgs/build-support/fetchgx/default.nix
@@ -12,7 +12,9 @@ stdenvNoCC.mkDerivation {
   outputHashMode = "recursive";
   outputHash = sha256;
 
-  phases = [ "unpackPhase" "buildPhase" "installPhase" ];
+  dontConfigure = true;
+  doCheck = false;
+  doInstallCheck = false;
 
   buildPhase = ''
     export GOPATH=$(pwd)/vendor
diff --git a/nixpkgs/pkgs/build-support/fetchurl/mirrors.nix b/nixpkgs/pkgs/build-support/fetchurl/mirrors.nix
index e5095478002a..8dc4f4207b6c 100644
--- a/nixpkgs/pkgs/build-support/fetchurl/mirrors.nix
+++ b/nixpkgs/pkgs/build-support/fetchurl/mirrors.nix
@@ -43,9 +43,9 @@
 
     "http://ftp.nluug.nl/pub/gnu/"
     "http://mirrors.kernel.org/gnu/"
-    "ftp://mirror.cict.fr/gnu/"
-    "ftp://ftp.cs.tu-berlin.de/pub/gnu/"
-    "ftp://ftp.chg.ru/pub/gnu/"
+    "http://mirror.ibcp.fr/pub/gnu/"
+    "http://mirror.dogado.de/gnu/"
+    "http://mirror.tochlab.net/pub/gnu/"
     "ftp://ftp.funet.fi/pub/mirrors/ftp.gnu.org/gnu/"
 
     # This one is the master repository, and thus it's always up-to-date.
@@ -66,7 +66,6 @@
   gnupg = [
     "https://gnupg.org/ftp/gcrypt/"
     "http://www.ring.gr.jp/pub/net/"
-    "http://gd.tuwien.ac.at/privacy/"
     "http://mirrors.dotsrc.org/gcrypt/"
     "http://ftp.heanet.ie/mirrors/ftp.gnupg.org/gcrypt/"
     "http://www.mirrorservice.org/sites/ftp.gnupg.org/gcrypt/"
@@ -75,14 +74,13 @@
   # kernel.org's /pub (/pub/{linux,software}) tree.
   kernel = [
     "http://cdn.kernel.org/pub/"
-    "http://www.all.kernel.org/pub/"
     "http://ramses.wh2.tu-dresden.de/pub/mirrors/kernel.org/"
     "http://linux-kernel.uio.no/pub/"
     "http://kernel.osuosl.org/pub/"
     "ftp://ftp.funet.fi/pub/mirrors/ftp.kernel.org/pub/"
   ];
 
-  # Mirrors from https://download.kde.org/extra/download-mirrors.html
+  # Mirrors from https://download.kde.org/ls-lR.mirrorlist
   kde = [
     "https://download.kde.org/download.php?url="
     "https://ftp.gwdg.de/pub/linux/kde/"
@@ -90,7 +88,6 @@
     "http://mirrors.mit.edu/kde/"
     "https://mirrors.ustc.edu.cn/kde/"
     "http://ftp.funet.fi/pub/mirrors/ftp.kde.org/pub/kde/"
-    "ftp://ftp.kde.org/pub/kde/"
   ];
 
   # Gentoo files.
@@ -113,15 +110,12 @@
     "http://ftp.cc.uoc.gr/mirrors/nongnu.org/"
     "http://nongnu.uib.no/"
     "http://mirrors.fe.up.pt/pub/nongnu/"
-    "http://mirror.lihnidos.org/GNU/savannah/"
-    "http://savannah.mirror.si/"
     "http://ftp.acc.umu.se/mirror/gnu.org/savannah/"
     "http://ftp.twaren.net/Unix/NonGNU/"
     "http://ftp.yzu.edu.tw/pub/nongnu/"
     "http://mirror.rackdc.com/savannah/"
     "http://savannah-nongnu-org.ip-connect.vn.ua/"
     "http://www.mirrorservice.org/sites/download.savannah.gnu.org/releases/"
-    "http://savannah.spinellicreations.com/"
     "http://gnu.mirrors.pair.com/savannah/savannah/"
     "ftp://mirror.easyname.at/nongnu/"
     "ftp://mirror2.klaus-uwe.me/nongnu/"
@@ -129,11 +123,9 @@
     "ftp://mirror.csclub.uwaterloo.ca/nongnu/"
     "ftp://mirror.cedia.org.ec/nongnu"
     "ftp://ftp.igh.cnrs.fr/pub/nongnu/"
-    "ftp://mirror6.layerjet.com/nongnu/"
     "ftp://mirror.netcologne.de/savannah/"
     "ftp://nongnu.uib.no/pub/nongnu/"
     "ftp://mirrors.fe.up.pt/pub/nongnu/"
-    "ftp://savannah.mirror.si/savannah/"
     "ftp://ftp.twaren.net/Unix/NonGNU/"
     "ftp://ftp.yzu.edu.tw/pub/nongnu/"
     "ftp://savannah-nongnu-org.ip-connect.vn.ua/mirror/savannah.nongnu.org/"
@@ -149,8 +141,6 @@
   # BitlBee mirrors, see https://www.bitlbee.org/main.php/mirrors.html .
   bitlbee = [
     "http://get.bitlbee.org/"
-    "http://get.bitlbee.be/"
-    "http://get.us.bitlbee.org/"
     "http://ftp.snt.utwente.nl/pub/software/bitlbee/"
     "http://bitlbee.intergenia.de/"
   ];
@@ -165,8 +155,6 @@
     "ftp://ftp.imagemagick.org/pub/ImageMagick/"
     "http://ftp.fifi.org/ImageMagick/"
     "ftp://ftp.fifi.org/ImageMagick/"
-    "http://imagemagick.mirrorcatalogs.com/"
-    "ftp://imagemagick.mirrorcatalogs.com/imagemagick"
   ];
 
   # CPAN mirrors.
@@ -185,7 +173,6 @@
     "http://ftp.jaist.ac.jp/pub/Linux/CentOS-vault/"
     "http://mirrors.aliyun.com/centos-vault/"
     "https://mirror.chpc.utah.edu/pub/vault.centos.org/"
-    "https://mirror.its.sfu.ca/mirror/CentOS-vault/"
     "https://mirror.math.princeton.edu/pub/centos-vault/"
     "https://mirrors.tripadvisor.com/centos-vault/"
   ];
@@ -193,14 +180,10 @@
   # Debian.
   debian = [
     "http://httpredir.debian.org/debian/"
-    "ftp://ftp.au.debian.org/debian/"
     "ftp://ftp.de.debian.org/debian/"
-    "ftp://ftp.es.debian.org/debian/"
     "ftp://ftp.fr.debian.org/debian/"
-    "ftp://ftp.it.debian.org/debian/"
     "ftp://ftp.nl.debian.org/debian/"
     "ftp://ftp.ru.debian.org/debian/"
-    "ftp://ftp.debian.org/debian/"
     "http://ftp.debian.org/debian/"
     "http://archive.debian.org/debian-archive/debian/"
     "ftp://ftp.funet.fi/pub/mirrors/ftp.debian.org/debian/"
@@ -229,13 +212,6 @@
     "http://archives.fedoraproject.org/pub/archive/fedora/"
   ];
 
-  # Old SUSE distributions.  Unfortunately there is no master site,
-  # since SUSE actually delete their old distributions (see
-  # ftp://ftp.suse.com/pub/suse/discontinued/deleted-20070817/README.txt).
-  oldsuse = [
-    "ftp://ftp.gmd.de/ftp.suse.com-discontinued/"
-  ];
-
   # openSUSE.
   opensuse = [
     "http://opensuse.hro.nl/opensuse/distribution/"
@@ -257,10 +233,8 @@
     "http://ftp.unina.it/pub/linux/GNOME/"
     "http://fr2.rpmfind.net/linux/gnome.org/"
     "ftp://ftp.dit.upm.es/pub/GNOME/"
-    "ftp://ftp.no.gnome.org/pub/GNOME/"
     "http://ftp.acc.umu.se/pub/GNOME/"
     "http://ftp.belnet.be/mirror/ftp.gnome.org/"
-    "http://ftp.df.lth.se/pub/gnome/"
     "http://linorg.usp.br/gnome/"
     "http://mirror.aarnet.edu.au/pub/GNOME/"
     "ftp://ftp.cse.buffalo.edu/pub/Gnome/"
@@ -290,7 +264,7 @@
   # Apache mirrors (see http://www.apache.org/mirrors/).
   apache = [
     "https://www-eu.apache.org/dist/"
-    "https://www-us.apache.org/dist/"
+    "https://ftp.wayne.edu/apache/"
     "http://www.eu.apache.org/dist/"
     "ftp://ftp.fu-berlin.de/unix/www/apache/"
     "http://ftp.tudelft.nl/apache/"
@@ -305,13 +279,11 @@
   postgresql = [
     "http://ftp.postgresql.org/pub/"
     "ftp://ftp.postgresql.org/pub/"
-    "ftp://ftp-archives.postgresql.org/pub/"
   ];
 
   metalab = [
-    "ftp://mirrors.kernel.org/metalab/"
     "ftp://ftp.gwdg.de/pub/linux/metalab/"
-    "ftp://ftp.xemacs.org/sites/metalab.unc.edu/"
+    "ftp://ftp.metalab.unc.edu/pub/linux/"
   ];
 
   # Bioconductor mirrors (from http://bioconductor.org/about/mirrors)
@@ -325,7 +297,6 @@
     # http://watson.nci.nih.gov/bioc_mirror/
     "http://bioconductor.jp/packages/"
     "http://bioconductor.statistik.tu-dortmund.de/packages/"
-    "http://mirrors.ebi.ac.uk/bioconductor/packages/"
     "http://mirrors.ustc.edu.cn/bioc/"
   ];
 
@@ -340,14 +311,12 @@
   # Roy marples mirrors
   roy = [
     "http://roy.marples.name/downloads/"
-    "http://roy.aydogan.net/"
     "http://cflags.cc/roy/"
   ];
 
   # Sage mirrors (http://www.sagemath.org/mirrors.html)
   sageupstream = [
     # Africa
-    "http://sagemath.polytechnic.edu.na/spkg/upstream/"
     "ftp://ftp.sun.ac.za/pub/mirrors/www.sagemath.org/spkg/upstream/"
     "http://sagemath.mirror.ac.za/spkg/upstream/"
     "https://ftp.leg.uct.ac.za/pub/packages/sage/spkg/upstream/"
@@ -366,7 +335,6 @@
     "http://linorg.usp.br/sage/spkg/upstream"
 
     # Asia
-    "http://sage.asis.io/spkg/upstream/"
     "http://mirror.hust.edu.cn/sagemath/spkg/upstream/"
     "https://ftp.iitm.ac.in/sage/spkg/upstream/"
     "http://ftp.kaist.ac.kr/sage/spkg/upstream/"
@@ -378,11 +346,10 @@
     "https://mirror.yandex.ru/mirrors/sage.math.washington.edu/spkg/upstream/"
 
     # Australia
-    "http://echidna.maths.usyd.edu.au/sage/spkg/upstream/"
+    "http://mirror.aarnet.edu.au/pub/sage/spkg/upstream/"
 
     # Europe
     "http://sage.mirror.garr.it/mirrors/sage/spkg/upstream/"
-    "http://sunsite.rediris.es/mirror/sagemath/spkg/upstream/"
     "http://mirror.switch.ch/mirror/sagemath/spkg/upstream/"
     "http://mirrors.fe.up.pt/pub/sage/spkg/upstream/"
     "http://www-ftp.lip6.fr/pub/math/sagemath/spkg/upstream/"
@@ -399,8 +366,6 @@
     "http://ftp.openbsd.org/pub/OpenBSD/"
     "ftp://ftp.nluug.nl/pub/OpenBSD/"
     "ftp://ftp-stud.fht-esslingen.de/pub/OpenBSD/"
-    "ftp://ftp.halifax.rwth-aachen.de/pub/OpenBSD/"
-    "ftp://mirror.switch.ch/pub/OpenBSD/"
   ];
 
   # Steam Runtime mirrors
@@ -439,6 +404,5 @@
     "ftp://ftp.alsa-project.org/pub/"
     "http://alsa.cybermirror.org/"
     "http://www.mirrorservice.org/sites/ftp.alsa-project.org/pub/"
-    "http://alsa.mirror.fr/"
   ];
 }
diff --git a/nixpkgs/pkgs/build-support/fetchzip/default.nix b/nixpkgs/pkgs/build-support/fetchzip/default.nix
index cde4d4f579f4..b174c252fc0e 100644
--- a/nixpkgs/pkgs/build-support/fetchzip/default.nix
+++ b/nixpkgs/pkgs/build-support/fetchzip/default.nix
@@ -13,10 +13,17 @@
 , urls ? []
 , extraPostFetch ? ""
 , name ? "source"
+, # Allows to set the extension for the intermediate downloaded
+  # file. This can be used as a hint for the unpackCmdHooks to select
+  # an appropriate unpacking tool.
+  extension ? null
 , ... } @ args:
 
 (fetchurl (let
-  basename = baseNameOf (if url != "" then url else builtins.head urls);
+  tmpFilename =
+    if extension != null
+    then "download.${extension}"
+    else baseNameOf (if url != "" then url else builtins.head urls);
 in {
   inherit name;
 
@@ -30,7 +37,7 @@ in {
       mkdir "$unpackDir"
       cd "$unpackDir"
 
-      renamed="$TMPDIR/${basename}"
+      renamed="$TMPDIR/${tmpFilename}"
       mv "$downloadedFile" "$renamed"
       unpackFile "$renamed"
     ''
@@ -56,7 +63,7 @@ in {
     + ''
       chmod 755 "$out"
     '';
-} // removeAttrs args [ "stripRoot" "extraPostFetch" ])).overrideAttrs (x: {
+} // removeAttrs args [ "stripRoot" "extraPostFetch" "extension" ])).overrideAttrs (x: {
   # Hackety-hack: we actually need unzip hooks, too
   nativeBuildInputs = x.nativeBuildInputs ++ [ unzip ];
 })
diff --git a/nixpkgs/pkgs/build-support/kernel/make-initrd.nix b/nixpkgs/pkgs/build-support/kernel/make-initrd.nix
index 83d3bb65baec..23ce992f0d55 100644
--- a/nixpkgs/pkgs/build-support/kernel/make-initrd.nix
+++ b/nixpkgs/pkgs/build-support/kernel/make-initrd.nix
@@ -78,7 +78,7 @@ let
 in stdenvNoCC.mkDerivation rec {
   inherit name makeUInitrd extension uInitrdArch prepend;
 
-  ${if makeUInitrd then "uinitrdCompression" else null} = uInitrdCompression;
+  ${if makeUInitrd then "uInitrdCompression" else null} = uInitrdCompression;
 
   builder = ./make-initrd.sh;
 
diff --git a/nixpkgs/pkgs/build-support/kernel/make-initrd.sh b/nixpkgs/pkgs/build-support/kernel/make-initrd.sh
index c0619ef14ae0..70884741af3a 100644
--- a/nixpkgs/pkgs/build-support/kernel/make-initrd.sh
+++ b/nixpkgs/pkgs/build-support/kernel/make-initrd.sh
@@ -43,9 +43,9 @@ done
 (cd root && find * .[^.*] -print0 | sort -z | cpio -o -H newc -R +0:+0 --reproducible --null | eval -- $compress >> "$out/initrd")
 
 if [ -n "$makeUInitrd" ]; then
-    mkimage -A $uInitrdArch -O linux -T ramdisk -C "$uInitrdCompression" -d $out/initrd"$extension" $out/initrd.img
+    mkimage -A "$uInitrdArch" -O linux -T ramdisk -C "$uInitrdCompression" -d "$out/initrd" $out/initrd.img
     # Compatibility symlink
-    ln -s "initrd.img" "$out/initrd"
+    ln -sf "initrd.img" "$out/initrd"
 else
     ln -s "initrd" "$out/initrd$extension"
 fi
diff --git a/nixpkgs/pkgs/build-support/libredirect/default.nix b/nixpkgs/pkgs/build-support/libredirect/default.nix
index 4678d35442f6..42525ec98a7e 100644
--- a/nixpkgs/pkgs/build-support/libredirect/default.nix
+++ b/nixpkgs/pkgs/build-support/libredirect/default.nix
@@ -14,6 +14,8 @@ stdenv.mkDerivation rec {
   outputs = ["out" "hook"];
 
   buildPhase = ''
+    runHook preBuild
+
     $CC -Wall -std=c99 -O3 -fPIC -ldl -shared \
       ${lib.optionalString stdenv.isDarwin "-Wl,-install_name,$out/lib/$libName"} \
       -o "$libName" \
@@ -22,9 +24,18 @@ stdenv.mkDerivation rec {
     if [ -n "$doInstallCheck" ]; then
       $CC -Wall -std=c99 -O3 test.c -o test
     fi
+
+    runHook postBuild
   '';
 
+  # We want to retain debugging info to be able to use GDB on libredirect.so
+  # to more easily investigate which function overrides are missing or why
+  # existing ones do not have the intended effect.
+  dontStrip = true;
+
   installPhase = ''
+    runHook preInstall
+
     install -vD "$libName" "$out/lib/$libName"
 
     mkdir -p "$hook/nix-support"
@@ -36,6 +47,8 @@ stdenv.mkDerivation rec {
     export LD_PRELOAD="$out/lib/$libName"
     ''}
     SETUP_HOOK
+
+    runHook postInstall
   '';
 
   doInstallCheck = true;
diff --git a/nixpkgs/pkgs/build-support/libredirect/libredirect.c b/nixpkgs/pkgs/build-support/libredirect/libredirect.c
index dfa2978e9f44..5b0ef4856708 100644
--- a/nixpkgs/pkgs/build-support/libredirect/libredirect.c
+++ b/nixpkgs/pkgs/build-support/libredirect/libredirect.c
@@ -17,15 +17,22 @@ static int nrRedirects = 0;
 static char * from[MAX_REDIRECTS];
 static char * to[MAX_REDIRECTS];
 
+static int isInitialized = 0;
+
 // FIXME: might run too late.
 static void init() __attribute__((constructor));
 
 static void init()
 {
+    if (isInitialized) return;
+
     char * spec = getenv("NIX_REDIRECTS");
     if (!spec) return;
 
-    unsetenv("NIX_REDIRECTS");
+    // Ensure we only run this code once.
+    // We do not do `unsetenv("NIX_REDIRECTS")` to ensure that redirects
+    // also get initialized for subprocesses.
+    isInitialized = 1;
 
     char * spec2 = malloc(strlen(spec) + 1);
     strcpy(spec2, spec);
diff --git a/nixpkgs/pkgs/build-support/libredirect/test.c b/nixpkgs/pkgs/build-support/libredirect/test.c
index 722d1303771c..853f26bb5209 100644
--- a/nixpkgs/pkgs/build-support/libredirect/test.c
+++ b/nixpkgs/pkgs/build-support/libredirect/test.c
@@ -10,6 +10,7 @@
 #include <sys/wait.h>
 
 #define TESTPATH "/foo/bar/test"
+#define SUBTEST "./test sub"
 
 extern char **environ;
 
@@ -36,7 +37,11 @@ void test_system(void) {
     assert(system(TESTPATH) == 0);
 }
 
-int main(void)
+void test_subprocess(void) {
+    assert(system(SUBTEST) == 0);
+}
+
+int main(int argc, char *argv[])
 {
     FILE *testfp;
     int testfd;
@@ -56,6 +61,14 @@ int main(void)
 
     test_spawn();
     test_system();
+
+    // Only run subprocess if no arguments are given
+    // as the subprocess will be called without argument
+    // otherwise we will have infinite recursion
+    if (argc == 1) {
+        test_subprocess();
+    }
+
     test_execv();
 
     /* If all goes well, this is never reached because test_execv() replaces
diff --git a/nixpkgs/pkgs/build-support/nuget-to-nix/default.nix b/nixpkgs/pkgs/build-support/nuget-to-nix/default.nix
new file mode 100644
index 000000000000..a5fc4e209cd2
--- /dev/null
+++ b/nixpkgs/pkgs/build-support/nuget-to-nix/default.nix
@@ -0,0 +1,5 @@
+{ runCommand }:
+
+runCommand "nuget-to-nix" { preferLocalBuild = true; } ''
+  install -D -m755 ${./nuget-to-nix.sh} $out/bin/nuget-to-nix
+''
diff --git a/nixpkgs/pkgs/build-support/nuget-to-nix/nuget-to-nix.sh b/nixpkgs/pkgs/build-support/nuget-to-nix/nuget-to-nix.sh
new file mode 100755
index 000000000000..c14844bec59e
--- /dev/null
+++ b/nixpkgs/pkgs/build-support/nuget-to-nix/nuget-to-nix.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+if [ $# -eq 0 ]; then
+  >&2 echo "Usage: $0 [packages directory] > deps.nix"
+  exit 1
+fi
+
+pkgs=$1
+
+echo "{ fetchNuGet }: ["
+
+while read pkg_spec; do
+  { read pkg_name; read pkg_version; } < <(
+    # Build version part should be ignored: `3.0.0-beta2.20059.3+77df2220` -> `3.0.0-beta2.20059.3`
+    sed -nE 's/.*<id>([^<]*).*/\1/p; s/.*<version>([^<+]*).*/\1/p' "$pkg_spec")
+  pkg_sha256="$(nix-hash --type sha256 --flat --base32 "$(dirname "$pkg_spec")"/*.nupkg)"
+
+  echo "  (fetchNuGet { name = \"$pkg_name\"; version = \"$pkg_version\"; sha256 = \"$pkg_sha256\"; })"
+done < <(find $1 -name '*.nuspec' | sort)
+
+echo "]"
diff --git a/nixpkgs/pkgs/build-support/replace-secret/replace-secret.nix b/nixpkgs/pkgs/build-support/replace-secret/replace-secret.nix
index e04d1aed5f70..4881ba25f5d2 100644
--- a/nixpkgs/pkgs/build-support/replace-secret/replace-secret.nix
+++ b/nixpkgs/pkgs/build-support/replace-secret/replace-secret.nix
@@ -3,13 +3,14 @@
 stdenv.mkDerivation {
   name = "replace-secret";
   buildInputs = [ python3 ];
-  phases = [ "installPhase" "checkPhase" ];
+  dontUnpack = true;
   installPhase = ''
+    runHook preInstall
     install -D ${./replace-secret.py} $out/bin/replace-secret
     patchShebangs $out
+    runHook postInstall
   '';
-  doCheck = true;
-  checkPhase = ''
+  installCheckPhase = ''
     install -m 0600 ${./test/input_file} long_test
     $out/bin/replace-secret "replace this" ${./test/passwd} long_test
     $out/bin/replace-secret "and this" ${./test/rsa} long_test
diff --git a/nixpkgs/pkgs/build-support/rust/build-rust-crate/default.nix b/nixpkgs/pkgs/build-support/rust/build-rust-crate/default.nix
index e605c9550e53..13b153315f7f 100644
--- a/nixpkgs/pkgs/build-support/rust/build-rust-crate/default.nix
+++ b/nixpkgs/pkgs/build-support/rust/build-rust-crate/default.nix
@@ -4,313 +4,347 @@
 # This can be useful for deploying packages with NixOps, and to share
 # binary dependencies between projects.
 
-{ lib, stdenv, defaultCrateOverrides, fetchCrate, pkgsBuildBuild, rustc, rust
-, cargo, jq }:
+{ lib
+, stdenv
+, defaultCrateOverrides
+, fetchCrate
+, pkgsBuildBuild
+, rustc
+, rust
+, cargo
+, jq
+, libiconv
+}:
 
 let
-    # Create rustc arguments to link against the given list of dependencies
-    # and renames.
-    #
-    # See docs for crateRenames below.
-    mkRustcDepArgs = dependencies: crateRenames:
-      lib.concatMapStringsSep " " (dep:
+  # Create rustc arguments to link against the given list of dependencies
+  # and renames.
+  #
+  # See docs for crateRenames below.
+  mkRustcDepArgs = dependencies: crateRenames:
+    lib.concatMapStringsSep " "
+      (dep:
         let
-          normalizeName = lib.replaceStrings ["-"] ["_"];
+          normalizeName = lib.replaceStrings [ "-" ] [ "_" ];
           extern = normalizeName dep.libName;
           # Find a choice that matches in name and optionally version.
           findMatchOrUseExtern = choices:
-            lib.findFirst (choice:
-              (!(choice ? version)
-                 || choice.version == dep.version or ""))
-            { rename = extern; }
-            choices;
-          name = if lib.hasAttr dep.crateName crateRenames then
-            let choices = crateRenames.${dep.crateName};
-            in
-            normalizeName (
-              if builtins.isList choices
-              then (findMatchOrUseExtern choices).rename
-              else choices
-            )
-          else
-            extern;
-        in (if lib.any (x: x == "lib" || x == "rlib") dep.crateType then
-           " --extern ${name}=${dep.lib}/lib/lib${extern}-${dep.metadata}.rlib"
-         else
-           " --extern ${name}=${dep.lib}/lib/lib${extern}-${dep.metadata}${stdenv.hostPlatform.extensions.sharedLibrary}")
-      ) dependencies;
+            lib.findFirst
+              (choice:
+                (!(choice ? version)
+                  || choice.version == dep.version or ""))
+              { rename = extern; }
+              choices;
+          name =
+            if lib.hasAttr dep.crateName crateRenames then
+              let choices = crateRenames.${dep.crateName};
+              in
+              normalizeName (
+                if builtins.isList choices
+                then (findMatchOrUseExtern choices).rename
+                else choices
+              )
+            else
+              extern;
+        in
+        (if lib.any (x: x == "lib" || x == "rlib") dep.crateType then
+          " --extern ${name}=${dep.lib}/lib/lib${extern}-${dep.metadata}.rlib"
+        else
+          " --extern ${name}=${dep.lib}/lib/lib${extern}-${dep.metadata}${stdenv.hostPlatform.extensions.sharedLibrary}")
+      )
+      dependencies;
 
-   # Create feature arguments for rustc.
-   mkRustcFeatureArgs = lib.concatMapStringsSep " " (f: ''--cfg feature=\"${f}\"'');
+  # Create feature arguments for rustc.
+  mkRustcFeatureArgs = lib.concatMapStringsSep " " (f: ''--cfg feature=\"${f}\"'');
 
-   inherit (import ./log.nix { inherit lib; }) noisily echo_colored;
+  inherit (import ./log.nix { inherit lib; }) noisily echo_colored;
 
-   configureCrate = import ./configure-crate.nix {
-     inherit lib stdenv rust echo_colored noisily mkRustcDepArgs mkRustcFeatureArgs;
-   };
+  configureCrate = import ./configure-crate.nix {
+    inherit lib stdenv rust echo_colored noisily mkRustcDepArgs mkRustcFeatureArgs;
+  };
 
-   buildCrate = import ./build-crate.nix {
-     inherit lib stdenv mkRustcDepArgs mkRustcFeatureArgs rust;
-   };
+  buildCrate = import ./build-crate.nix {
+    inherit lib stdenv mkRustcDepArgs mkRustcFeatureArgs rust;
+  };
 
-   installCrate = import ./install-crate.nix { inherit stdenv; };
+  installCrate = import ./install-crate.nix { inherit stdenv; };
 
-   # Allow access to the rust attribute set from inside buildRustCrate, which
-   # has a parameter that shadows the name.
-   rustAttrs = rust;
+  # Allow access to the rust attribute set from inside buildRustCrate, which
+  # has a parameter that shadows the name.
+  rustAttrs = rust;
 in
 
-/* The overridable pkgs.buildRustCrate function.
- *
- * Any unrecognized parameters will be passed as to
- * the underlying stdenv.mkDerivation.
- */
- crate_: lib.makeOverridable (
-   # The rust compiler to use.
-   #
-   # Default: pkgs.rustc
-   { rust
-   # Whether to build a release version (`true`) or a debug
-   # version (`false`). Debug versions are faster to build
-   # but might be much slower at runtime.
-   , release
-   # Whether to print rustc invocations etc.
-   #
-   # Example: false
-   # Default: true
-   , verbose
-   # A list of rust/cargo features to enable while building the crate.
-   # Example: [ "std" "async" ]
-   , features
-   # Additional native build inputs for building this crate.
-   , nativeBuildInputs
-   # Additional build inputs for building this crate.
-   #
-   # Example: [ pkgs.openssl ]
-   , buildInputs
-   # Allows to override the parameters to buildRustCrate
-   # for any rust dependency in the transitive build tree.
-   #
-   # Default: pkgs.defaultCrateOverrides
-   #
-   # Example:
-   #
-   # pkgs.defaultCrateOverrides // {
-   #   hello = attrs: { buildInputs = [ openssl ]; };
-   # }
-   , crateOverrides
-   # Rust library dependencies, i.e. other libaries that were built
-   # with buildRustCrate.
-   , dependencies
-   # Rust build dependencies, i.e. other libaries that were built
-   # with buildRustCrate and are used by a build script.
-   , buildDependencies
-   # Specify the "extern" name of a library if it differs from the library target.
-   # See above for an extended explanation.
-   #
-   # Default: no renames.
-   #
-   # Example:
-   #
-   # `crateRenames` supports two formats.
-   #
-   # The simple version is an attrset that maps the
-   # `crateName`s of the dependencies to their alternative
-   # names.
-   #
-   # ```nix
-   # {
-   #   my_crate_name = "my_alternative_name";
-   #   # ...
-   # }
-   # ```
-   #
-   # The extended version is also keyed by the `crateName`s but allows
-   # different names for different crate versions:
-   #
-   # ```nix
-   # {
-   #   my_crate_name = [
-   #       { version = "1.2.3"; rename = "my_alternative_name01"; }
-   #       { version = "3.2.3"; rename = "my_alternative_name03"; }
-   #   ]
-   #   # ...
-   # }
-   # ```
-   #
-   # This roughly corresponds to the following snippet in Cargo.toml:
-   #
-   # ```toml
-   # [dependencies]
-   # my_alternative_name01 = { package = "my_crate_name", version = "0.1" }
-   # my_alternative_name03 = { package = "my_crate_name", version = "0.3" }
-   # ```
-   #
-   # Dependencies which use the lib target name as extern name, do not need
-   # to be specified in the crateRenames, even if their crate name differs.
-   #
-   # Including multiple versions of a crate is very popular during
-   # ecosystem transitions, e.g. from futures 0.1 to futures 0.3.
-   , crateRenames
-   # A list of extra options to pass to rustc.
-   #
-   # Example: [ "-Z debuginfo=2" ]
-   # Default: []
-   , extraRustcOpts
-   # Whether to enable building tests.
-   # Use true to enable.
-   # Default: false
-   , buildTests
-   # Passed to stdenv.mkDerivation.
-   , preUnpack
-   # Passed to stdenv.mkDerivation.
-   , postUnpack
-   # Passed to stdenv.mkDerivation.
-   , prePatch
-   # Passed to stdenv.mkDerivation.
-   , patches
-   # Passed to stdenv.mkDerivation.
-   , postPatch
-   # Passed to stdenv.mkDerivation.
-   , preConfigure
-   # Passed to stdenv.mkDerivation.
-   , postConfigure
-   # Passed to stdenv.mkDerivation.
-   , preBuild
-   # Passed to stdenv.mkDerivation.
-   , postBuild
-   # Passed to stdenv.mkDerivation.
-   , preInstall
-   # Passed to stdenv.mkDerivation.
-   , postInstall
-   }:
+  /* The overridable pkgs.buildRustCrate function.
+    *
+    * Any unrecognized parameters will be passed as to
+    * the underlying stdenv.mkDerivation.
+  */
+crate_: lib.makeOverridable
+  (
+    # The rust compiler to use.
+    #
+    # Default: pkgs.rustc
+    { rust
+      # Whether to build a release version (`true`) or a debug
+      # version (`false`). Debug versions are faster to build
+      # but might be much slower at runtime.
+    , release
+      # Whether to print rustc invocations etc.
+      #
+      # Example: false
+      # Default: true
+    , verbose
+      # A list of rust/cargo features to enable while building the crate.
+      # Example: [ "std" "async" ]
+    , features
+      # Additional native build inputs for building this crate.
+    , nativeBuildInputs
+      # Additional build inputs for building this crate.
+      #
+      # Example: [ pkgs.openssl ]
+    , buildInputs
+      # Allows to override the parameters to buildRustCrate
+      # for any rust dependency in the transitive build tree.
+      #
+      # Default: pkgs.defaultCrateOverrides
+      #
+      # Example:
+      #
+      # pkgs.defaultCrateOverrides // {
+      #   hello = attrs: { buildInputs = [ openssl ]; };
+      # }
+    , crateOverrides
+      # Rust library dependencies, i.e. other libaries that were built
+      # with buildRustCrate.
+    , dependencies
+      # Rust build dependencies, i.e. other libaries that were built
+      # with buildRustCrate and are used by a build script.
+    , buildDependencies
+      # Specify the "extern" name of a library if it differs from the library target.
+      # See above for an extended explanation.
+      #
+      # Default: no renames.
+      #
+      # Example:
+      #
+      # `crateRenames` supports two formats.
+      #
+      # The simple version is an attrset that maps the
+      # `crateName`s of the dependencies to their alternative
+      # names.
+      #
+      # ```nix
+      # {
+      #   my_crate_name = "my_alternative_name";
+      #   # ...
+      # }
+      # ```
+      #
+      # The extended version is also keyed by the `crateName`s but allows
+      # different names for different crate versions:
+      #
+      # ```nix
+      # {
+      #   my_crate_name = [
+      #       { version = "1.2.3"; rename = "my_alternative_name01"; }
+      #       { version = "3.2.3"; rename = "my_alternative_name03"; }
+      #   ]
+      #   # ...
+      # }
+      # ```
+      #
+      # This roughly corresponds to the following snippet in Cargo.toml:
+      #
+      # ```toml
+      # [dependencies]
+      # my_alternative_name01 = { package = "my_crate_name", version = "0.1" }
+      # my_alternative_name03 = { package = "my_crate_name", version = "0.3" }
+      # ```
+      #
+      # Dependencies which use the lib target name as extern name, do not need
+      # to be specified in the crateRenames, even if their crate name differs.
+      #
+      # Including multiple versions of a crate is very popular during
+      # ecosystem transitions, e.g. from futures 0.1 to futures 0.3.
+    , crateRenames
+      # A list of extra options to pass to rustc.
+      #
+      # Example: [ "-Z debuginfo=2" ]
+      # Default: []
+    , extraRustcOpts
+      # Whether to enable building tests.
+      # Use true to enable.
+      # Default: false
+    , buildTests
+      # Passed to stdenv.mkDerivation.
+    , preUnpack
+      # Passed to stdenv.mkDerivation.
+    , postUnpack
+      # Passed to stdenv.mkDerivation.
+    , prePatch
+      # Passed to stdenv.mkDerivation.
+    , patches
+      # Passed to stdenv.mkDerivation.
+    , postPatch
+      # Passed to stdenv.mkDerivation.
+    , preConfigure
+      # Passed to stdenv.mkDerivation.
+    , postConfigure
+      # Passed to stdenv.mkDerivation.
+    , preBuild
+      # Passed to stdenv.mkDerivation.
+    , postBuild
+      # Passed to stdenv.mkDerivation.
+    , preInstall
+      # Passed to stdenv.mkDerivation.
+    , postInstall
+    }:
 
-let crate = crate_ // (lib.attrByPath [ crate_.crateName ] (attr: {}) crateOverrides crate_);
-    dependencies_ = dependencies;
-    buildDependencies_ = buildDependencies;
-    processedAttrs = [
-      "src" "nativeBuildInputs" "buildInputs" "crateBin" "crateLib" "libName" "libPath"
-      "buildDependencies" "dependencies" "features" "crateRenames"
-      "crateName" "version" "build" "authors" "colors" "edition"
-      "buildTests"
-    ];
-    extraDerivationAttrs = builtins.removeAttrs crate processedAttrs;
-    nativeBuildInputs_ = nativeBuildInputs;
-    buildInputs_ = buildInputs;
-    extraRustcOpts_ = extraRustcOpts;
-    buildTests_ = buildTests;
+    let
+      crate = crate_ // (lib.attrByPath [ crate_.crateName ] (attr: { }) crateOverrides crate_);
+      dependencies_ = dependencies;
+      buildDependencies_ = buildDependencies;
+      processedAttrs = [
+        "src"
+        "nativeBuildInputs"
+        "buildInputs"
+        "crateBin"
+        "crateLib"
+        "libName"
+        "libPath"
+        "buildDependencies"
+        "dependencies"
+        "features"
+        "crateRenames"
+        "crateName"
+        "version"
+        "build"
+        "authors"
+        "colors"
+        "edition"
+        "buildTests"
+      ];
+      extraDerivationAttrs = builtins.removeAttrs crate processedAttrs;
+      nativeBuildInputs_ = nativeBuildInputs;
+      buildInputs_ = buildInputs;
+      extraRustcOpts_ = extraRustcOpts;
+      buildTests_ = buildTests;
 
-    # crate2nix has a hack for the old bash based build script that did split
-    # entries at `,`. No we have to work around that hack.
-    # https://github.com/kolloch/crate2nix/blame/5b19c1b14e1b0e5522c3e44e300d0b332dc939e7/crate2nix/templates/build.nix.tera#L89
-    crateBin = lib.filter (bin: !(bin ? name && bin.name == ",")) (crate.crateBin or []);
-    hasCrateBin = crate ? crateBin;
-in
-stdenv.mkDerivation (rec {
+      # crate2nix has a hack for the old bash based build script that did split
+      # entries at `,`. No we have to work around that hack.
+      # https://github.com/kolloch/crate2nix/blame/5b19c1b14e1b0e5522c3e44e300d0b332dc939e7/crate2nix/templates/build.nix.tera#L89
+      crateBin = lib.filter (bin: !(bin ? name && bin.name == ",")) (crate.crateBin or [ ]);
+      hasCrateBin = crate ? crateBin;
+    in
+    stdenv.mkDerivation (rec {
 
-    inherit (crate) crateName;
-    inherit
-      preUnpack
-      postUnpack
-      prePatch
-      patches
-      postPatch
-      preConfigure
-      postConfigure
-      preBuild
-      postBuild
-      preInstall
-      postInstall
-      buildTests
-    ;
+      inherit (crate) crateName;
+      inherit
+        preUnpack
+        postUnpack
+        prePatch
+        patches
+        postPatch
+        preConfigure
+        postConfigure
+        preBuild
+        postBuild
+        preInstall
+        postInstall
+        buildTests
+        ;
 
-    src = crate.src or (fetchCrate { inherit (crate) crateName version sha256; });
-    name = "rust_${crate.crateName}-${crate.version}${lib.optionalString buildTests_ "-test"}";
-    version = crate.version;
-    depsBuildBuild = [ pkgsBuildBuild.stdenv.cc ];
-    nativeBuildInputs = [ rust stdenv.cc cargo jq ] ++ (crate.nativeBuildInputs or []) ++ nativeBuildInputs_;
-    buildInputs = (crate.buildInputs or []) ++ buildInputs_;
-    dependencies = map lib.getLib dependencies_;
-    buildDependencies = map lib.getLib buildDependencies_;
+      src = crate.src or (fetchCrate { inherit (crate) crateName version sha256; });
+      name = "rust_${crate.crateName}-${crate.version}${lib.optionalString buildTests_ "-test"}";
+      version = crate.version;
+      depsBuildBuild = [ pkgsBuildBuild.stdenv.cc ];
+      nativeBuildInputs = [ rust stdenv.cc cargo jq ] ++ (crate.nativeBuildInputs or [ ]) ++ nativeBuildInputs_;
+      buildInputs = lib.optionals stdenv.isDarwin [ libiconv ] ++ (crate.buildInputs or [ ]) ++ buildInputs_;
+      dependencies = map lib.getLib dependencies_;
+      buildDependencies = map lib.getLib buildDependencies_;
 
-    completeDeps = lib.unique (dependencies ++ lib.concatMap (dep: dep.completeDeps) dependencies);
-    completeBuildDeps = lib.unique (
-      buildDependencies
-      ++ lib.concatMap (dep: dep.completeBuildDeps ++ dep.completeDeps) buildDependencies
-    );
+      completeDeps = lib.unique (dependencies ++ lib.concatMap (dep: dep.completeDeps) dependencies);
+      completeBuildDeps = lib.unique (
+        buildDependencies
+          ++ lib.concatMap (dep: dep.completeBuildDeps ++ dep.completeDeps) buildDependencies
+      );
 
-    # Create a list of features that are enabled by the crate itself and
-    # through the features argument of buildRustCrate. Exclude features
-    # with a forward slash, since they are passed through to dependencies.
-    crateFeatures = lib.optionals (crate ? features)
-      (builtins.filter (f: !lib.hasInfix "/" f) (crate.features ++ features));
+      # Create a list of features that are enabled by the crate itself and
+      # through the features argument of buildRustCrate. Exclude features
+      # with a forward slash, since they are passed through to dependencies.
+      crateFeatures = lib.optionals (crate ? features)
+        (builtins.filter (f: !lib.hasInfix "/" f) (crate.features ++ features));
 
-    libName = if crate ? libName then crate.libName else crate.crateName;
-    libPath = if crate ? libPath then crate.libPath else "";
+      libName = if crate ? libName then crate.libName else crate.crateName;
+      libPath = if crate ? libPath then crate.libPath else "";
 
-    # Seed the symbol hashes with something unique every time.
-    # https://doc.rust-lang.org/1.0.0/rustc/metadata/loader/index.html#frobbing-symbols
-    metadata = let
-      depsMetadata = lib.foldl' (str: dep: str + dep.metadata) "" (dependencies ++ buildDependencies);
-      hashedMetadata = builtins.hashString "sha256"
-        (crateName + "-" + crateVersion + "___" + toString (mkRustcFeatureArgs crateFeatures) +
-          "___" + depsMetadata + "___" + rustAttrs.toRustTarget stdenv.hostPlatform);
-      in lib.substring 0 10 hashedMetadata;
+      # Seed the symbol hashes with something unique every time.
+      # https://doc.rust-lang.org/1.0.0/rustc/metadata/loader/index.html#frobbing-symbols
+      metadata =
+        let
+          depsMetadata = lib.foldl' (str: dep: str + dep.metadata) "" (dependencies ++ buildDependencies);
+          hashedMetadata = builtins.hashString "sha256"
+            (crateName + "-" + crateVersion + "___" + toString (mkRustcFeatureArgs crateFeatures) +
+              "___" + depsMetadata + "___" + rustAttrs.toRustTarget stdenv.hostPlatform);
+        in
+        lib.substring 0 10 hashedMetadata;
 
-    build = crate.build or "";
-    # Either set to a concrete sub path to the crate root
-    # or use `null` for auto-detect.
-    workspace_member = crate.workspace_member or ".";
-    crateVersion = crate.version;
-    crateDescription = crate.description or "";
-    crateAuthors = if crate ? authors && lib.isList crate.authors then crate.authors else [];
-    crateHomepage = crate.homepage or "";
-    crateType =
-      if lib.attrByPath ["procMacro"] false crate then ["proc-macro"] else
-      if lib.attrByPath ["plugin"] false crate then ["dylib"] else
-        (crate.type or ["lib"]);
-    colors = lib.attrByPath [ "colors" ] "always" crate;
-    extraLinkFlags = lib.concatStringsSep " " (crate.extraLinkFlags or []);
-    edition = crate.edition or null;
-    extraRustcOpts =
-      lib.optionals (crate ? extraRustcOpts) crate.extraRustcOpts
-      ++ extraRustcOpts_
-      ++ (lib.optional (edition != null) "--edition ${edition}");
+      build = crate.build or "";
+      # Either set to a concrete sub path to the crate root
+      # or use `null` for auto-detect.
+      workspace_member = crate.workspace_member or ".";
+      crateVersion = crate.version;
+      crateDescription = crate.description or "";
+      crateAuthors = if crate ? authors && lib.isList crate.authors then crate.authors else [ ];
+      crateHomepage = crate.homepage or "";
+      crateType =
+        if lib.attrByPath [ "procMacro" ] false crate then [ "proc-macro" ] else
+        if lib.attrByPath [ "plugin" ] false crate then [ "dylib" ] else
+        (crate.type or [ "lib" ]);
+      colors = lib.attrByPath [ "colors" ] "always" crate;
+      extraLinkFlags = lib.concatStringsSep " " (crate.extraLinkFlags or [ ]);
+      edition = crate.edition or null;
+      extraRustcOpts =
+        lib.optionals (crate ? extraRustcOpts) crate.extraRustcOpts
+          ++ extraRustcOpts_
+          ++ (lib.optional (edition != null) "--edition ${edition}");
 
 
-    configurePhase = configureCrate {
-      inherit crateName buildDependencies completeDeps completeBuildDeps crateDescription
-              crateFeatures crateRenames libName build workspace_member release libPath crateVersion
-              extraLinkFlags extraRustcOpts
-              crateAuthors crateHomepage verbose colors;
-    };
-    buildPhase = buildCrate {
-      inherit crateName dependencies
-              crateFeatures crateRenames libName release libPath crateType
-              metadata hasCrateBin crateBin verbose colors
-              extraRustcOpts buildTests;
-    };
-    installPhase = installCrate crateName metadata buildTests;
+      configurePhase = configureCrate {
+        inherit crateName buildDependencies completeDeps completeBuildDeps crateDescription
+          crateFeatures crateRenames libName build workspace_member release libPath crateVersion
+          extraLinkFlags extraRustcOpts
+          crateAuthors crateHomepage verbose colors;
+      };
+      buildPhase = buildCrate {
+        inherit crateName dependencies
+          crateFeatures crateRenames libName release libPath crateType
+          metadata hasCrateBin crateBin verbose colors
+          extraRustcOpts buildTests;
+      };
+      installPhase = installCrate crateName metadata buildTests;
 
-    # depending on the test setting we are either producing something with bins
-    # and libs or just test binaries
-    outputs = if buildTests then [ "out" ] else [ "out" "lib" ];
-    outputDev = if buildTests then [ "out" ] else  [ "lib" ];
+      # depending on the test setting we are either producing something with bins
+      # and libs or just test binaries
+      outputs = if buildTests then [ "out" ] else [ "out" "lib" ];
+      outputDev = if buildTests then [ "out" ] else [ "lib" ];
 
-} // extraDerivationAttrs
-)) {
+    } // extraDerivationAttrs
+    )
+  )
+{
   rust = rustc;
   release = crate_.release or true;
   verbose = crate_.verbose or true;
-  extraRustcOpts = [];
-  features = [];
-  nativeBuildInputs = [];
-  buildInputs = [];
+  extraRustcOpts = [ ];
+  features = [ ];
+  nativeBuildInputs = [ ];
+  buildInputs = [ ];
   crateOverrides = defaultCrateOverrides;
   preUnpack = crate_.preUnpack or "";
   postUnpack = crate_.postUnpack or "";
   prePatch = crate_.prePatch or "";
-  patches = crate_.patches or [];
+  patches = crate_.patches or [ ];
   postPatch = crate_.postPatch or "";
   preConfigure = crate_.preConfigure or "";
   postConfigure = crate_.postConfigure or "";
@@ -318,8 +352,8 @@ stdenv.mkDerivation (rec {
   postBuild = crate_.postBuild or "";
   preInstall = crate_.preInstall or "";
   postInstall = crate_.postInstall or "";
-  dependencies = crate_.dependencies or [];
-  buildDependencies = crate_.buildDependencies or [];
-  crateRenames = crate_.crateRenames or {};
+  dependencies = crate_.dependencies or [ ];
+  buildDependencies = crate_.buildDependencies or [ ];
+  crateRenames = crate_.crateRenames or { };
   buildTests = crate_.buildTests or false;
 }
diff --git a/nixpkgs/pkgs/build-support/rust/default-crate-overrides.nix b/nixpkgs/pkgs/build-support/rust/default-crate-overrides.nix
index 61cec2a6abab..e14cbcc1183d 100644
--- a/nixpkgs/pkgs/build-support/rust/default-crate-overrides.nix
+++ b/nixpkgs/pkgs/build-support/rust/default-crate-overrides.nix
@@ -1,7 +1,29 @@
-{ lib, stdenv, pkg-config, curl, darwin, libiconv, libgit2, libssh2,
-  openssl, sqlite, zlib, dbus, dbus-glib, gdk-pixbuf, cairo, python3,
-  libsodium, postgresql, gmp, foundationdb, capnproto, nettle, clang,
-  llvmPackages, ... }:
+{ lib
+, stdenv
+, pkg-config
+, curl
+, darwin
+, libgit2
+, libssh2
+, openssl
+, sqlite
+, zlib
+, dbus
+, dbus-glib
+, gdk-pixbuf
+, cairo
+, python3
+, libsodium
+, postgresql
+, gmp
+, foundationdb
+, capnproto
+, nettle
+, clang
+, llvmPackages
+, linux-pam
+, ...
+}:
 
 let
   inherit (darwin.apple_sdk.frameworks) CoreFoundation Security;
@@ -17,20 +39,20 @@ in
 
   cargo = attrs: {
     buildInputs = [ openssl zlib curl ]
-      ++ lib.optionals stdenv.isDarwin [ CoreFoundation Security libiconv ];
+      ++ lib.optionals stdenv.isDarwin [ CoreFoundation Security ];
   };
 
   libz-sys = attrs: {
     nativeBuildInputs = [ pkg-config ];
     buildInputs = [ zlib ];
-    extraLinkFlags = ["-L${zlib.out}/lib"];
+    extraLinkFlags = [ "-L${zlib.out}/lib" ];
   };
 
   curl-sys = attrs: {
     nativeBuildInputs = [ pkg-config ];
     buildInputs = [ zlib curl ];
     propagatedBuildInputs = [ curl zlib ];
-    extraLinkFlags = ["-L${zlib.out}/lib"];
+    extraLinkFlags = [ "-L${zlib.out}/lib" ];
   };
 
   dbus = attrs: {
@@ -104,6 +126,10 @@ in
     buildInputs = [ openssl ];
   };
 
+  pam-sys = attr: {
+    buildInputs = [ linux-pam ];
+  };
+
   pq-sys = attr: {
     nativeBuildInputs = [ pkg-config ];
     buildInputs = [ postgresql ];
@@ -111,7 +137,7 @@ in
 
   rink = attrs: {
     buildInputs = [ gmp ];
-    crateBin = [ {  name = "rink"; path = "src/bin/rink.rs"; } ];
+    crateBin = [{ name = "rink"; path = "src/bin/rink.rs"; }];
   };
 
   security-framework-sys = attr: {
diff --git a/nixpkgs/pkgs/build-support/rust/default.nix b/nixpkgs/pkgs/build-support/rust/default.nix
index a3b280050efe..845437d46757 100644
--- a/nixpkgs/pkgs/build-support/rust/default.nix
+++ b/nixpkgs/pkgs/build-support/rust/default.nix
@@ -8,10 +8,8 @@
 , cargoSetupHook
 , fetchCargoTarball
 , importCargoLock
-, runCommandNoCC
 , rustPlatform
 , callPackage
-, remarshal
 , git
 , rust
 , rustc
diff --git a/nixpkgs/pkgs/build-support/setup-hooks/auto-patchelf.sh b/nixpkgs/pkgs/build-support/setup-hooks/auto-patchelf.sh
index 511371931de8..70b1fc802b56 100644
--- a/nixpkgs/pkgs/build-support/setup-hooks/auto-patchelf.sh
+++ b/nixpkgs/pkgs/build-support/setup-hooks/auto-patchelf.sh
@@ -29,6 +29,7 @@ isExecutable() {
     isExeResult="$(LANG=C $READELF -h -l "$1" 2> /dev/null \
         | grep '^ *Type: *EXEC\>\|^ *INTERP\>')"
     # not using grep -q, because it can cause Broken pipe
+    # https://unix.stackexchange.com/questions/305547/broken-pipe-when-grepping-output-but-only-with-i-flag
     [ -n "$isExeResult" ]
 }
 
diff --git a/nixpkgs/pkgs/build-support/skaware/build-skaware-man-pages.nix b/nixpkgs/pkgs/build-support/skaware/build-skaware-man-pages.nix
new file mode 100644
index 000000000000..a1f3977c0d56
--- /dev/null
+++ b/nixpkgs/pkgs/build-support/skaware/build-skaware-man-pages.nix
@@ -0,0 +1,51 @@
+{ lib, stdenv, fetchFromGitHub }:
+
+{
+  # : string
+  pname
+  # : string
+, version
+  # : string
+, sha256
+  # : list (int | string)
+, sections
+  # : string
+, description
+  # : list Maintainer
+, maintainers
+  # : license
+, license ? lib.licenses.isc
+  # : string
+, owner ? "flexibeast"
+  # : string
+, rev ? "v${version}"
+}:
+
+let
+  manDir = "${placeholder "out"}/share/man";
+
+  src = fetchFromGitHub {
+    inherit owner rev sha256;
+    repo = pname;
+  };
+in
+
+stdenv.mkDerivation {
+  inherit pname version src;
+
+  makeFlags = [
+    "MANPATH=${manDir}"
+  ];
+
+  dontBuild = true;
+
+  preInstall = lib.concatMapStringsSep "\n"
+    (section: "mkdir -p \"${manDir}/man${builtins.toString section}\"")
+    sections;
+
+  meta = with lib; {
+    inherit description license maintainers;
+    inherit (src.meta) homepage;
+    platforms = platforms.all;
+  };
+}
diff --git a/nixpkgs/pkgs/build-support/skaware/build-skaware-package.nix b/nixpkgs/pkgs/build-support/skaware/build-skaware-package.nix
index b27b65f48a59..d6f26fe908aa 100644
--- a/nixpkgs/pkgs/build-support/skaware/build-skaware-package.nix
+++ b/nixpkgs/pkgs/build-support/skaware/build-skaware-package.nix
@@ -15,6 +15,8 @@
   # TODO(Profpatsch): automatically infer most of these
   # : list string
 , configureFlags
+  # : string
+, postConfigure ? null
   # mostly for moving and deleting files from the build directory
   # : lines
 , postInstall
@@ -79,6 +81,8 @@ in stdenv.mkDerivation {
     ++ (lib.optional stdenv.isDarwin
          "--build=${stdenv.hostPlatform.system}");
 
+  inherit postConfigure;
+
   # TODO(Profpatsch): ensure that there is always a $doc output!
   postInstall = ''
     echo "Cleaning & moving common files"
diff --git a/nixpkgs/pkgs/build-support/templaterpm/default.nix b/nixpkgs/pkgs/build-support/templaterpm/default.nix
index efe70efe6c44..c98716a3fedb 100644
--- a/nixpkgs/pkgs/build-support/templaterpm/default.nix
+++ b/nixpkgs/pkgs/build-support/templaterpm/default.nix
@@ -7,7 +7,7 @@ stdenv.mkDerivation {
   nativeBuildInputs = [ makeWrapper ];
   buildInputs = [ python toposort rpm ];
 
-  phases = [ "installPhase" "fixupPhase" ];
+  dontUnpack = true;
 
   installPhase = ''
     mkdir -p $out/bin
diff --git a/nixpkgs/pkgs/build-support/trivial-builders.nix b/nixpkgs/pkgs/build-support/trivial-builders.nix
index 6f51ba512c12..f06d2136b8c6 100644
--- a/nixpkgs/pkgs/build-support/trivial-builders.nix
+++ b/nixpkgs/pkgs/build-support/trivial-builders.nix
@@ -24,16 +24,13 @@ rec {
   * `allowSubstitutes = false;`
   * to a derivation’s attributes.
   */
-  runCommand = runCommandNoCC;
-  runCommandLocal = runCommandNoCCLocal;
-
-  runCommandNoCC = name: env: runCommandWith {
+  runCommand = name: env: runCommandWith {
     stdenv = stdenvNoCC;
     runLocal = false;
     inherit name;
     derivationArgs = env;
   };
-  runCommandNoCCLocal = name: env: runCommandWith {
+  runCommandLocal = name: env: runCommandWith {
     stdenv = stdenvNoCC;
     runLocal = true;
     inherit name;
@@ -616,7 +613,7 @@ rec {
       command ? "${package.meta.mainProgram or package.pname or package.name} --version",
       version ? package.version,
     }: runCommand "test-version" { nativeBuildInputs = [ package ]; meta.timeout = 60; } ''
-      ${command} | grep -Fw ${version}
+      ${command} |& grep -Fw ${version}
       touch $out
     '';
 }
diff --git a/nixpkgs/pkgs/build-support/vm/default.nix b/nixpkgs/pkgs/build-support/vm/default.nix
index cfc19c03cfdd..3a52d756571c 100644
--- a/nixpkgs/pkgs/build-support/vm/default.nix
+++ b/nixpkgs/pkgs/build-support/vm/default.nix
@@ -494,7 +494,9 @@ rec {
      tarball must contain an RPM specfile. */
 
   buildRPM = attrs: runInLinuxImage (stdenv.mkDerivation ({
-    phases = "prepareImagePhase sysInfoPhase buildPhase installPhase";
+    prePhases = [ prepareImagePhase sysInfoPhase ];
+    dontUnpack = true;
+    dontConfigure = true;
 
     outDir = "rpms/${attrs.diskImage.name}";
 
diff --git a/nixpkgs/pkgs/build-support/vm/test.nix b/nixpkgs/pkgs/build-support/vm/test.nix
index 698503032671..1a29554ee80c 100644
--- a/nixpkgs/pkgs/build-support/vm/test.nix
+++ b/nixpkgs/pkgs/build-support/vm/test.nix
@@ -1,4 +1,4 @@
-with import ../../.. {};
+with import ../../.. { };
 with vmTools;
 
 {
@@ -30,7 +30,7 @@ with vmTools;
     src = patchelf.src;
     diskImage = diskImages.ubuntu1204i386;
     memSize = 512;
-    phases = "sysInfoPhase unpackPhase patchPhase configurePhase buildPhase checkPhase installPhase fixupPhase distPhase";
+    prePhases = [ sysInfoPhase ];
     sysInfoPhase = ''
       dpkg-query --list
     '';
diff --git a/nixpkgs/pkgs/build-support/writers/default.nix b/nixpkgs/pkgs/build-support/writers/default.nix
index 47919c251af1..111ec68a6021 100644
--- a/nixpkgs/pkgs/build-support/writers/default.nix
+++ b/nixpkgs/pkgs/build-support/writers/default.nix
@@ -99,51 +99,6 @@ rec {
   writeBashBin = name:
     writeBash "/bin/${name}";
 
-  # writeC writes an executable c package called `name` to `destination` using `libraries`.
-  #
-  #  Examples:
-  #    writeC "hello-world-ncurses" { libraries = [ pkgs.ncurses ]; } ''
-  #      #include <ncurses.h>
-  #      int main() {
-  #        initscr();
-  #        printw("Hello World !!!");
-  #        refresh(); endwin();
-  #        return 0;
-  #      }
-  #    ''
-  writeC = name: {
-    libraries ? [],
-    strip ? true
-  }:
-    makeBinWriter {
-      compileScript = ''
-        PATH=${makeBinPath [
-          pkgs.binutils-unwrapped
-          pkgs.coreutils
-          pkgs.findutils
-          pkgs.gcc
-          pkgs.pkg-config
-        ]}
-        export PKG_CONFIG_PATH=${concatMapStringsSep ":" (pkg: "${pkg}/lib/pkgconfig") libraries}
-        gcc \
-            ${optionalString (libraries != [])
-              "$(pkg-config --cflags --libs ${
-                concatMapStringsSep " " (pkg: "$(find ${escapeShellArg pkg}/lib/pkgconfig -name \\*.pc)") libraries
-              })"
-            } \
-            -O \
-            -o "$out" \
-            -Wall \
-            -x c \
-            "$contentPath"
-      '';
-      inherit strip;
-    } name;
-
-  # writeCBin takes the same arguments as writeC but outputs a directory (like writeScriptBin)
-  writeCBin = name:
-    writeC "/bin/${name}";
-
   # Like writeScript but the first line is a shebang to dash
   #
   # Example:
diff --git a/nixpkgs/pkgs/build-support/writers/test.nix b/nixpkgs/pkgs/build-support/writers/test.nix
index 00cad9a96b53..69bc7dd2c61a 100644
--- a/nixpkgs/pkgs/build-support/writers/test.nix
+++ b/nixpkgs/pkgs/build-support/writers/test.nix
@@ -17,14 +17,6 @@ let
      if [[ "test" == "test" ]]; then echo "success"; fi
     '';
 
-    c = writeCBin "test-writers-c" { libraries = [ ]; } ''
-      #include <stdio.h>
-      int main() {
-        printf("success\n");
-        return 0;
-      }
-    '';
-
     dash = writeDashBin "test-writers-dash-bin" ''
      test '~' = '~' && echo 'success'
     '';
@@ -88,24 +80,6 @@ let
      if [[ "test" == "test" ]]; then echo "success"; fi
     '';
 
-    c = writeC "test-writers-c" { libraries = [ glib.dev ]; } ''
-      #include <gio/gio.h>
-      #include <stdio.h>
-      int main() {
-        GApplication *application = g_application_new ("hello.world", G_APPLICATION_FLAGS_NONE);
-        g_application_register (application, NULL, NULL);
-        GNotification *notification = g_notification_new ("Hello world!");
-        g_notification_set_body (notification, "This is an example notification.");
-        GIcon *icon = g_themed_icon_new ("dialog-information");
-        g_notification_set_icon (notification, icon);
-        g_object_unref (icon);
-        g_object_unref (notification);
-        g_object_unref (application);
-        printf("success\n");
-        return 0;
-      }
-    '';
-
     dash = writeDash "test-writers-dash" ''
      test '~' = '~' && echo 'success'
     '';