summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorFrederik Rietdijk <fridh@fridh.nl>2017-07-23 11:23:43 +0200
committerFrederik Rietdijk <fridh@fridh.nl>2017-07-23 11:23:43 +0200
commit29f91c107f5f605dac63552909cbe3c8f0c2e303 (patch)
treedae9c9d722e9bfd3ddf13b3b3b997a6cfcc178c8 /nixos
parent9af77826a97feb9798f700d64cd467864945875c (diff)
parent239b694579ca8a9a64e86609d4e9f49d0b01be2b (diff)
downloadnixlib-29f91c107f5f605dac63552909cbe3c8f0c2e303.tar
nixlib-29f91c107f5f605dac63552909cbe3c8f0c2e303.tar.gz
nixlib-29f91c107f5f605dac63552909cbe3c8f0c2e303.tar.bz2
nixlib-29f91c107f5f605dac63552909cbe3c8f0c2e303.tar.lz
nixlib-29f91c107f5f605dac63552909cbe3c8f0c2e303.tar.xz
nixlib-29f91c107f5f605dac63552909cbe3c8f0c2e303.tar.zst
nixlib-29f91c107f5f605dac63552909cbe3c8f0c2e303.zip
Merge remote-tracking branch 'upstream/master' into HEAD
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/manual.xml2
-rw-r--r--nixos/doc/manual/release-notes/rl-1509.xml2
-rw-r--r--nixos/doc/manual/release-notes/rl-1609.xml4
-rw-r--r--nixos/modules/config/ldap.nix1
-rw-r--r--nixos/modules/module-list.nix2
-rw-r--r--nixos/modules/programs/zsh/oh-my-zsh.nix16
-rw-r--r--nixos/modules/services/games/factorio.nix2
-rw-r--r--nixos/modules/services/misc/exhibitor.nix415
-rw-r--r--nixos/modules/services/misc/gitlab.nix2
-rw-r--r--nixos/modules/services/printing/cupsd.nix2
-rw-r--r--nixos/modules/services/security/oauth2_proxy.nix9
-rw-r--r--nixos/modules/services/system/saslauthd.nix63
-rw-r--r--nixos/modules/virtualisation/docker.nix48
-rw-r--r--nixos/modules/virtualisation/google-compute-image.nix149
-rw-r--r--nixos/tests/postgresql.nix18
15 files changed, 710 insertions, 25 deletions
diff --git a/nixos/doc/manual/manual.xml b/nixos/doc/manual/manual.xml
index 2c28dd448016..9aa332f026da 100644
--- a/nixos/doc/manual/manual.xml
+++ b/nixos/doc/manual/manual.xml
@@ -18,7 +18,7 @@
 
     <para>If you encounter problems, please report them on the
     <literal
-    xlink:href="http://lists.science.uu.nl/mailman/listinfo/nix-dev">nix-dev@lists.science.uu.nl</literal>
+    xlink:href="https://groups.google.com/forum/#!forum/nix-devel">nix-devel</literal>
     mailing list or on the <link
     xlink:href="irc://irc.freenode.net/#nixos">
     <literal>#nixos</literal> channel on Freenode</link>.  Bugs should
diff --git a/nixos/doc/manual/release-notes/rl-1509.xml b/nixos/doc/manual/release-notes/rl-1509.xml
index 967fbcf869db..6c1c46844ccb 100644
--- a/nixos/doc/manual/release-notes/rl-1509.xml
+++ b/nixos/doc/manual/release-notes/rl-1509.xml
@@ -28,7 +28,7 @@ has the following highlights:</para>
     since version 0.0 as well as the most recent <link
     xlink:href="http://www.stackage.org/">Stackage Nightly</link>
     snapshot. The announcement <link
-    xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2015-September/018138.html">&quot;Full
+    xlink:href="https://nixos.org/nix-dev/2015-September/018138.html">&quot;Full
     Stackage Support in Nixpkgs&quot;</link> gives additional
     details.</para>
   </listitem>
diff --git a/nixos/doc/manual/release-notes/rl-1609.xml b/nixos/doc/manual/release-notes/rl-1609.xml
index ade7d5581ced..3abafac97378 100644
--- a/nixos/doc/manual/release-notes/rl-1609.xml
+++ b/nixos/doc/manual/release-notes/rl-1609.xml
@@ -78,13 +78,13 @@ following incompatible changes:</para>
     our package set it loosely based on the latest available LTS release, i.e.
     LTS 7.x at the time of this writing. New releases of NixOS and Nixpkgs will
     drop those old names entirely. <link
-    xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2016-June/020585.html">The
+    xlink:href="https://nixos.org/nix-dev/2016-June/020585.html">The
     motivation for this change</link> has been discussed at length on the
     <literal>nix-dev</literal> mailing list and in <link
     xlink:href="https://github.com/NixOS/nixpkgs/issues/14897">Github issue
     #14897</link>. Development strategies for Haskell hackers who want to rely
     on Nix and NixOS have been described in <link
-    xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2016-June/020642.html">another
+    xlink:href="https://nixos.org/nix-dev/2016-June/020642.html">another
     nix-dev article</link>.</para>
   </listitem>
 
diff --git a/nixos/modules/config/ldap.nix b/nixos/modules/config/ldap.nix
index da875d6e4821..710dfdd01af5 100644
--- a/nixos/modules/config/ldap.nix
+++ b/nixos/modules/config/ldap.nix
@@ -19,7 +19,6 @@ let
       bind_policy ${config.users.ldap.bind.policy}
       ${optionalString config.users.ldap.useTLS ''
         ssl start_tls
-        tls_checkpeer no
       ''}
       ${optionalString (config.users.ldap.bind.distinguishedName != "") ''
         binddn ${config.users.ldap.bind.distinguishedName}
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 726c55539190..b97c3b0d816b 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -287,6 +287,7 @@
   ./services/misc/emby.nix
   ./services/misc/errbot.nix
   ./services/misc/etcd.nix
+  ./services/misc/exhibitor.nix
   ./services/misc/felix.nix
   ./services/misc/folding-at-home.nix
   ./services/misc/fstrim.nix
@@ -567,6 +568,7 @@
   ./services/system/earlyoom.nix
   ./services/system/kerberos.nix
   ./services/system/nscd.nix
+  ./services/system/saslauthd.nix
   ./services/system/uptimed.nix
   ./services/torrent/deluge.nix
   ./services/torrent/flexget.nix
diff --git a/nixos/modules/programs/zsh/oh-my-zsh.nix b/nixos/modules/programs/zsh/oh-my-zsh.nix
index aa5844cdc4d4..9077643c4440 100644
--- a/nixos/modules/programs/zsh/oh-my-zsh.nix
+++ b/nixos/modules/programs/zsh/oh-my-zsh.nix
@@ -15,6 +15,16 @@ in
           '';
         };
 
+        package = mkOption {
+          default = pkgs.oh-my-zsh;
+          defaultText = "pkgs.oh-my-zsh";
+          description = ''
+            Package to install for `oh-my-zsh` usage.
+          '';
+
+          type = types.package;
+        };
+
         plugins = mkOption {
           default = [];
           type = types.listOf(types.str);
@@ -46,11 +56,11 @@ in
       # Prevent zsh from overwriting oh-my-zsh's prompt
       programs.zsh.promptInit = mkDefault "";
 
-      environment.systemPackages = with pkgs; [ oh-my-zsh ];
+      environment.systemPackages = [ cfg.package ];
 
-      programs.zsh.interactiveShellInit = with pkgs; with builtins; ''
+      programs.zsh.interactiveShellInit = with builtins; ''
         # oh-my-zsh configuration generated by NixOS
-        export ZSH=${oh-my-zsh}/share/oh-my-zsh
+        export ZSH=${cfg.package}/share/oh-my-zsh
 
         ${optionalString (length(cfg.plugins) > 0)
           "plugins=(${concatStringsSep " " cfg.plugins})"
diff --git a/nixos/modules/services/games/factorio.nix b/nixos/modules/services/games/factorio.nix
index e7f070d08773..1dc8ce93a0e5 100644
--- a/nixos/modules/services/games/factorio.nix
+++ b/nixos/modules/services/games/factorio.nix
@@ -39,7 +39,7 @@ let
     admins = [];
   };
   serverSettingsFile = pkgs.writeText "server-settings.json" (builtins.toJSON (filterAttrsRecursive (n: v: v != null) serverSettings));
-  modDir = pkgs.factorio-mkModDirDrv cfg.mods;
+  modDir = pkgs.factorio-utils.mkModDirDrv cfg.mods;
 in
 {
   options = {
diff --git a/nixos/modules/services/misc/exhibitor.nix b/nixos/modules/services/misc/exhibitor.nix
new file mode 100644
index 000000000000..1db422758671
--- /dev/null
+++ b/nixos/modules/services/misc/exhibitor.nix
@@ -0,0 +1,415 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.exhibitor;
+  exhibitor = cfg.package;
+  exhibitorConfig = ''
+    zookeeper-install-directory=${cfg.baseDir}/zookeeper
+    zookeeper-data-directory=${cfg.zkDataDir}
+    zookeeper-log-directory=${cfg.zkLogDir}
+    zoo-cfg-extra=${cfg.zkExtraCfg}
+    client-port=${toString cfg.zkClientPort}
+    connect-port=${toString cfg.zkConnectPort}
+    election-port=${toString cfg.zkElectionPort}
+    cleanup-period-ms=${toString cfg.zkCleanupPeriod}
+    servers-spec=${concatStringsSep "," cfg.zkServersSpec}
+    auto-manage-instances=${lib.boolToString cfg.autoManageInstances}
+    ${cfg.extraConf}
+  '';
+  configDir = pkgs.writeTextDir "exhibitor.properties" exhibitorConfig;
+  cliOptionsCommon = {
+    configtype = cfg.configType;
+    defaultconfig = "${configDir}/exhibitor.properties";
+    port = toString cfg.port;
+    hostname = cfg.hostname;
+    headingtext = if (cfg.headingText != null) then (lib.escapeShellArg cfg.headingText) else null;
+    nodemodification = lib.boolToString cfg.nodeModification;
+    configcheckms = toString cfg.configCheckMs;
+    jquerystyle = cfg.jqueryStyle;
+    loglines = toString cfg.logLines;
+    servo = lib.boolToString cfg.servo;
+    timeout = toString cfg.timeout;
+  };
+  s3CommonOptions = { s3region = cfg.s3Region; s3credentials = cfg.s3Credentials; };
+  cliOptionsPerConfig = {
+    s3 = {
+      s3config = "${cfg.s3Config.bucketName}:${cfg.s3Config.objectKey}";
+      s3configprefix = cfg.s3Config.configPrefix;
+    };
+    zookeeper = {
+      zkconfigconnect = concatStringsSep "," cfg.zkConfigConnect;
+      zkconfigexhibitorpath = cfg.zkConfigExhibitorPath;
+      zkconfigpollms = toString cfg.zkConfigPollMs;
+      zkconfigretry = "${toString cfg.zkConfigRetry.sleepMs}:${toString cfg.zkConfigRetry.retryQuantity}";
+      zkconfigzpath = cfg.zkConfigZPath;
+      zkconfigexhibitorport = toString cfg.zkConfigExhibitorPort; # NB: This might be null
+    };
+    file = {
+      fsconfigdir = cfg.fsConfigDir;
+      fsconfiglockprefix = cfg.fsConfigLockPrefix;
+      fsConfigName = fsConfigName;
+    };
+    none = {
+      noneconfigdir = configDir;
+    };
+  };
+  cliOptions = concatStringsSep " " (mapAttrsToList (k: v: "--${k} ${v}") (filterAttrs (k: v: v != null && v != "") (cliOptionsCommon //
+               cliOptionsPerConfig."${cfg.configType}" //
+               s3CommonOptions //
+               optionalAttrs cfg.s3Backup { s3backup = "true"; } //
+               optionalAttrs cfg.fileSystemBackup { filesystembackup = "true"; }
+               )));
+in
+{
+  options = {
+    services.exhibitor = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "
+          Whether to enable the exhibitor server.
+        ";
+      };
+      # See https://github.com/soabase/exhibitor/wiki/Running-Exhibitor for what these mean
+      # General options for any type of config
+      port = mkOption {
+        type = types.int;
+        default = 8080;
+        description = ''
+          The port for exhibitor to listen on and communicate with other exhibitors.
+        '';
+      };
+      baseDir = mkOption {
+        type = types.str;
+        default = "/var/exhibitor";
+        description = ''
+          Baseline directory for exhibitor runtime config.
+        '';
+      };
+      configType = mkOption {
+        type = types.enum [ "file" "s3" "zookeeper" "none" ];
+        description = ''
+          Which configuration type you want to use. Additional config will be
+          required depending on which type you are using.
+        '';
+      };
+      hostname = mkOption {
+        type = types.nullOr types.str;
+        description = ''
+          Hostname to use and advertise
+        '';
+        default = null;
+      };
+      nodeModification = mkOption {
+        type = types.bool;
+        description = ''
+          Whether the Explorer UI will allow nodes to be modified (use with caution).
+        '';
+        default = true;
+      };
+      configCheckMs = mkOption {
+        type = types.int;
+        description = ''
+          Period (ms) to check for shared config updates.
+        '';
+        default = 30000;
+      };
+      headingText = mkOption {
+        type = types.nullOr types.str;
+        description = ''
+          Extra text to display in UI header
+        '';
+        default = null;
+      };
+      jqueryStyle = mkOption {
+        type = types.enum [ "red" "black" "custom" ];
+        description = ''
+          Styling used for the JQuery-based UI.
+        '';
+        default = "red";
+      };
+      logLines = mkOption {
+        type = types.int;
+        description = ''
+        Max lines of logging to keep in memory for display.
+        '';
+        default = 1000;
+      };
+      servo = mkOption {
+        type = types.bool;
+        description = ''
+          ZooKeeper will be queried once a minute for its state via the 'mntr' four
+          letter word (this requires ZooKeeper 3.4.x+). Servo will be used to publish
+          this data via JMX.
+        '';
+        default = false;
+      };
+      timeout = mkOption {
+        type = types.int;
+        description = ''
+          Connection timeout (ms) for ZK connections.
+        '';
+        default = 30000;
+      };
+      autoManageInstances = mkOption {
+        type = types.bool;
+        description = ''
+          Automatically manage ZooKeeper instances in the ensemble
+        '';
+        default = false;
+      };
+      zkDataDir = mkOption {
+        type = types.str;
+        default = "${cfg.baseDir}/zkData";
+        description = ''
+          The Zookeeper data directory
+        '';
+      };
+      zkLogDir = mkOption {
+        type = types.path;
+        default = "${cfg.baseDir}/zkLogs";
+        description = ''
+          The Zookeeper logs directory
+        '';
+      };
+      extraConf = mkOption {
+        type = types.str;
+        default = "";
+        description = ''
+          Extra Exhibitor configuration to put in the ZooKeeper config file.
+        '';
+      };
+      zkExtraCfg = mkOption {
+        type = types.str;
+        default = ''initLimit=5&syncLimit=2&tickTime=2000'';
+        description = ''
+          Extra options to pass into Zookeeper
+        '';
+      };
+      zkClientPort = mkOption {
+        type = types.int;
+        default = 2181;
+        description = ''
+          Zookeeper client port
+        '';
+      };
+      zkConnectPort = mkOption {
+        type = types.int;
+        default = 2888;
+        description = ''
+          The port to use for followers to talk to each other.
+        '';
+      };
+      zkElectionPort = mkOption {
+        type = types.int;
+        default = 3888;
+        description = ''
+          The port for Zookeepers to use for leader election.
+        '';
+      };
+      zkCleanupPeriod = mkOption {
+        type = types.int;
+        default = 0;
+        description = ''
+          How often (in milliseconds) to run the Zookeeper log cleanup task.
+        '';
+      };
+      zkServersSpec = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Zookeeper server spec for all servers in the ensemble.
+        '';
+        example = [ "S:1:zk1.example.com" "S:2:zk2.example.com" "S:3:zk3.example.com" "O:4:zk-observer.example.com" ];
+      };
+
+      # Backup options
+      s3Backup = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable backups to S3
+        '';
+      };
+      fileSystemBackup = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enables file system backup of ZooKeeper log files
+        '';
+      };
+
+      # Options for using zookeeper configType
+      zkConfigConnect = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          The initial connection string for ZooKeeper shared config storage
+        '';
+        example = ["host1:2181" "host2:2181"];
+      };
+      zkConfigExhibitorPath = mkOption {
+        type = types.string;
+        description = ''
+          If the ZooKeeper shared config is also running Exhibitor, the URI path for the REST call
+        '';
+        default = "/";
+      };
+      zkConfigExhibitorPort = mkOption {
+        type = types.nullOr types.int;
+        description = ''
+          If the ZooKeeper shared config is also running Exhibitor, the port that
+          Exhibitor is listening on. IMPORTANT: if this value is not set it implies
+          that Exhibitor is not being used on the ZooKeeper shared config.
+        '';
+      };
+      zkConfigPollMs = mkOption {
+        type = types.int;
+        description = ''
+          The period in ms to check for changes in the config ensemble
+        '';
+        default = 10000;
+      };
+      zkConfigRetry = {
+        sleepMs = mkOption {
+          type = types.int;
+          default = 1000;
+          description = ''
+            Retry sleep time connecting to the ZooKeeper config
+          '';
+        };
+        retryQuantity = mkOption {
+          type = types.int;
+          default = 3;
+          description = ''
+            Retries connecting to the ZooKeeper config
+          '';
+        };
+      };
+      zkConfigZPath = mkOption {
+        type = types.str;
+        description = ''
+          The base ZPath that Exhibitor should use
+        '';
+        example = "/exhibitor/config";
+      };
+
+      # Config options for s3 configType
+      s3Config = {
+        bucketName = mkOption {
+          type = types.str;
+          description = ''
+            Bucket name to store config
+          '';
+        };
+        objectKey = mkOption {
+          type = types.str;
+          description = ''
+            S3 key name to store the config
+          '';
+        };
+        configPrefix = mkOption {
+          type = types.str;
+          description = ''
+            When using AWS S3 shared config files, the prefix to use for values such as locks
+          '';
+          default = "exhibitor-";
+        };
+      };
+
+      # The next two are used for either s3backup or s3 configType
+      s3Credentials = mkOption {
+        type = types.nullOr types.path;
+        description = ''
+          Optional credentials to use for s3backup or s3config. Argument is the path
+          to an AWS credential properties file with two properties:
+          com.netflix.exhibitor.s3.access-key-id and com.netflix.exhibitor.s3.access-secret-key
+        '';
+        default = null;
+      };
+      s3Region = mkOption {
+        type = types.nullOr types.str;
+        description = ''
+          Optional region for S3 calls
+        '';
+        default = null;
+      };
+
+      # Config options for file config type
+      fsConfigDir = mkOption {
+        type = types.path;
+        description = ''
+          Directory to store Exhibitor properties (cannot be used with s3config).
+          Exhibitor uses file system locks so you can specify a shared location
+          so as to enable complete ensemble management.
+        '';
+      };
+      fsConfigLockPrefix = mkOption {
+        type = types.str;
+        description = ''
+          A prefix for a locking mechanism used in conjunction with fsconfigdir
+        '';
+        default = "exhibitor-lock-";
+      };
+      fsConfigName = mkOption {
+        type = types.str;
+        description = ''
+          The name of the file to store config in
+        '';
+        default = "exhibitor.properties";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.exhibitor = {
+      description = "Exhibitor Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment = {
+        ZOO_LOG_DIR = cfg.baseDir;
+      };
+      serviceConfig = {
+        /***
+          Exhibitor is a bit un-nixy. It wants to present to you a user interface in order to
+          mutate the configuration of both itself and ZooKeeper, and to coordinate changes
+          among the members of the Zookeeper ensemble. I'm going for a different approach here,
+          which is to manage all the configuration via nix and have it write out the configuration
+          files that exhibitor will use, and to reduce the amount of inter-exhibitor orchestration.
+        ***/
+        ExecStart = ''
+          ${pkgs.exhibitor}/bin/startExhibitor.sh ${cliOptions}
+        '';
+        User = "zookeeper";
+        PermissionsStartOnly = true;
+      };
+      # This is a bit wonky, but the reason for this is that Exhibitor tries to write to
+      # ${cfg.baseDir}/zookeeper/bin/../conf/zoo.cfg
+      # I want everything but the conf directory to be in the immutable nix store, and I want defaults
+      # from the nix store
+      # If I symlink the bin directory in, then bin/../ will resolve to the parent of the symlink in the
+      # immutable nix store. Bind mounting a writable conf over the existing conf might work, but it gets very
+      # messy with trying to copy the existing out into a mutable store.
+      # Another option is to try to patch upstream exhibitor, but the current package just pulls down the
+      # prebuild JARs off of Maven, rather than building them ourselves, as Maven support in Nix isn't
+      # very mature. So, it seems like a reasonable compromise is to just copy out of the immutable store
+      # just before starting the service, so we're running binaries from the immutable store, but we work around
+      # Exhibitor's desire to mutate its current installation.
+      preStart = ''
+        mkdir -m 0700 -p ${cfg.baseDir}/zookeeper
+        # Not doing a chown -R to keep the base ZK files owned by root
+        chown zookeeper ${cfg.baseDir} ${cfg.baseDir}/zookeeper
+        cp -Rf ${pkgs.zookeeper}/* ${cfg.baseDir}/zookeeper
+        chown -R zookeeper ${cfg.baseDir}/zookeeper/conf
+        chmod -R u+w ${cfg.baseDir}/zookeeper/conf
+      '';
+    };
+    users.extraUsers = singleton {
+      name = "zookeeper";
+      uid = config.ids.uids.zookeeper;
+      description = "Zookeeper daemon user";
+      home = cfg.baseDir;
+    };
+  };
+}
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index 0ed5a539e7dd..ccc1854d2548 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -439,6 +439,8 @@ in {
       environment.GITLAB_SHELL_CONFIG_PATH = gitlabEnv.GITLAB_SHELL_CONFIG_PATH;
       path = with pkgs; [
         gitAndTools.git
+        gnutar
+        gzip
         openssh
         gitlab-workhorse
       ];
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index ba9f99e6a8fb..855c89303847 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -37,7 +37,7 @@ let
       [ cups.out additionalBackends cups-filters pkgs.ghostscript ]
       ++ optional cfg.gutenprint gutenprint
       ++ cfg.drivers;
-    pathsToLink = [ "/lib/cups" "/share/cups" "/bin" ];
+    pathsToLink = [ "/lib" "/share/cups" "/bin" ];
     postBuild = cfg.bindirCmds;
     ignoreCollisions = true;
   };
diff --git a/nixos/modules/services/security/oauth2_proxy.nix b/nixos/modules/services/security/oauth2_proxy.nix
index e292fd9851e3..3e5087766b1c 100644
--- a/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixos/modules/services/security/oauth2_proxy.nix
@@ -21,21 +21,20 @@ let
     '';
 
     github = cfg: ''
-      $(optionalString (!isNull cfg.github.org) "--github-org=${cfg.github.org}") \
-      $(optionalString (!isNull cfg.github.team) "--github-org=${cfg.github.team}") \
+      ${optionalString (!isNull cfg.github.org) "--github-org=${cfg.github.org}"} \
+      ${optionalString (!isNull cfg.github.team) "--github-org=${cfg.github.team}"} \
     '';
 
     google = cfg: ''
       --google-admin-email=${cfg.google.adminEmail} \
       --google-service-account=${cfg.google.serviceAccountJSON} \
-      $(repeatedArgs (group: "--google-group=${group}") cfg.google.groups) \
+      ${repeatedArgs (group: "--google-group=${group}") cfg.google.groups} \
     '';
   };
 
   authenticatedEmailsFile = pkgs.writeText "authenticated-emails" cfg.email.addresses;
 
-  getProviderOptions = cfg: provider:
-    if providerSpecificOptions ? provider then providerSpecificOptions.provider cfg else "";
+  getProviderOptions = cfg: provider: providerSpecificOptions.${provider} or (_: "") cfg;
 
   mkCommandLine = cfg: ''
     --provider='${cfg.provider}' \
diff --git a/nixos/modules/services/system/saslauthd.nix b/nixos/modules/services/system/saslauthd.nix
new file mode 100644
index 000000000000..281716cf1860
--- /dev/null
+++ b/nixos/modules/services/system/saslauthd.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  nssModulesPath = config.system.nssModules.path;
+  cfg = config.services.saslauthd;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.saslauthd = {
+
+      enable = mkEnableOption "Whether to enable the Cyrus SASL authentication daemon.";
+
+      package = mkOption {
+        default = pkgs.cyrus_sasl.bin;
+        defaultText = "pkgs.cyrus_sasl.bin";
+        type = types.package;
+        description = "Cyrus SASL package to use.";
+      };
+
+      mechanism = mkOption {
+        type = types.str;
+        default = "pam";
+        description = "Auth mechanism to use";
+      };
+
+      config = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Configuration to use for Cyrus SASL authentication daemon.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.saslauthd = {
+      description = "Cyrus SASL authentication daemon";
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/sbin/saslauthd saslauthd -a ${cfg.mechanism} -O ${pkgs.writeText "saslauthd.conf" cfg.config}";
+        Type = "forking";
+        PIDFile = "/run/saslauthd/saslauthd.pid";
+        Restart = "always";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/docker.nix b/nixos/modules/virtualisation/docker.nix
index c26cae06cd1d..5a8a0e27436f 100644
--- a/nixos/modules/virtualisation/docker.nix
+++ b/nixos/modules/virtualisation/docker.nix
@@ -94,6 +94,38 @@ in
             <command>docker</command> daemon.
           '';
       };
+
+    autoPrune = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to periodically prune Docker resources. If enabled, a
+          systemd timer will run <literal>docker system prune -f</literal>
+          as specified by the <literal>dates</literal> option.
+        '';
+      };
+
+      flags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--all" ];
+        description = ''
+          Any additional flags passed to <command>docker system prune</command>.
+        '';
+      };
+
+      dates = mkOption {
+        default = "weekly";
+        type = types.str;
+        description = ''
+          Specification (in the format described by
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>7</manvolnum></citerefentry>) of the time at
+          which the prune will occur.
+        '';
+      };
+    };
   };
 
   ###### implementation
@@ -137,6 +169,22 @@ in
           SocketGroup = "docker";
         };
       };
+
+
+      systemd.services.docker-prune = {
+        description = "Prune docker resources";
+
+        restartIfChanged = false;
+        unitConfig.X-StopOnRemoval = false;
+
+        serviceConfig.Type = "oneshot";
+
+        script = ''
+          ${pkgs.docker}/bin/docker system prune -f ${toString cfg.autoPrune.flags}
+        '';
+
+        startAt = optional cfg.autoPrune.enable cfg.autoPrune.dates;
+      };
     }
   ]);
 
diff --git a/nixos/modules/virtualisation/google-compute-image.nix b/nixos/modules/virtualisation/google-compute-image.nix
index 3943a62f8a45..4a8dadaa281d 100644
--- a/nixos/modules/virtualisation/google-compute-image.nix
+++ b/nixos/modules/virtualisation/google-compute-image.nix
@@ -3,13 +3,11 @@
 with lib;
 let
   diskSize = 1024; # MB
+  gce = pkgs.google-compute-engine;
 in
 {
   imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ./grow-partition.nix ];
 
-  # https://cloud.google.com/compute/docs/tutorials/building-images
-  networking.firewall.enable = mkDefault false;
-
   system.build.googleComputeImage = import ../../lib/make-disk-image.nix {
     name = "google-compute-image";
     postVM = ''
@@ -49,12 +47,18 @@ in
   services.openssh.permitRootLogin = "prohibit-password";
   services.openssh.passwordAuthentication = mkDefault false;
 
+  # Use GCE udev rules for dynamic disk volumes
+  services.udev.packages = [ gce ];
+
   # Force getting the hostname from Google Compute.
   networking.hostName = mkDefault "";
 
   # Always include cryptsetup so that NixOps can use it.
   environment.systemPackages = [ pkgs.cryptsetup ];
 
+  # Rely on GCP's firewall instead
+  networking.firewall.enable = mkDefault false;
+
   # Configure default metadata hostnames
   networking.extraHosts = ''
     169.254.169.254 metadata.google.internal metadata
@@ -64,6 +68,132 @@ in
 
   networking.usePredictableInterfaceNames = false;
 
+  # allow the google-accounts-daemon to manage users
+  users.mutableUsers = true;
+  # and allow users to sudo without password
+  security.sudo.enable = true;
+  security.sudo.extraConfig = ''
+  %google-sudoers ALL=(ALL:ALL) NOPASSWD:ALL
+  '';
+
+  # NOTE: google-accounts tries to write to /etc/sudoers.d but the folder doesn't exist
+  # FIXME: not such file or directory on dynamic SSH provisioning
+  systemd.services.google-accounts-daemon = {
+    description = "Google Compute Engine Accounts Daemon";
+    # This daemon creates dynamic users
+    enable = config.users.mutableUsers;
+    after = [
+      "network.target"
+      "google-instance-setup.service"
+      "google-network-setup.service"
+    ];
+    wantedBy = [ "multi-user.target" ];
+    requires = ["network.target"];
+    path = with pkgs; [ shadow ];
+    serviceConfig = {
+      Type = "simple";
+      ExecStart = "${gce}/bin/google_accounts_daemon --debug";
+    };
+  };
+
+  systemd.services.google-clock-skew-daemon = {
+    description = "Google Compute Engine Clock Skew Daemon";
+    after = [
+      "network.target"
+      "google-instance-setup.service"
+      "google-network-setup.service"
+    ];
+    requires = [ "network.target" ];
+    wantedBy = [ "multi-user.target" ];
+    serviceConfig = {
+      Type = "simple";
+      ExecStart = "${gce}/bin/google_clock_skew_daemon --debug";
+    };
+  };
+
+  systemd.services.google-instance-setup = {
+    description = "Google Compute Engine Instance Setup";
+    after = ["fs.target" "network-online.target" "network.target" "rsyslog.service"];
+    before = ["sshd.service"];
+    wants = ["local-fs.target" "network-online.target" "network.target"];
+    wantedBy = [ "sshd.service" "multi-user.target" ];
+    path = with pkgs; [ ethtool ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_instance_setup --debug";
+      Type = "oneshot";
+    };
+  };
+
+  systemd.services.google-ip-forwarding-daemon = {
+    description = "Google Compute Engine IP Forwarding Daemon";
+    after = ["network.target" "google-instance-setup.service" "google-network-setup.service"];
+    requires = ["network.target"];
+    wantedBy = [ "multi-user.target" ];
+    path = with pkgs; [ iproute ];
+    serviceConfig = {
+      Type = "simple";
+      ExecStart = "${gce}/bin/google_ip_forwarding_daemon --debug";
+    };
+  };
+
+  systemd.services.google-shutdown-scripts = {
+    description = "Google Compute Engine Shutdown Scripts";
+    after = [
+      "local-fs.target"
+      "network-online.target"
+      "network.target"
+      "rsyslog.service"
+      "google-instance-setup.service"
+      "google-network-setup.service"
+    ];
+    wants = [ "local-fs.target" "network-online.target" "network.target"];
+    wantedBy = [ "multi-user.target" ];
+    serviceConfig = {
+      ExecStart = "${pkgs.coreutils}/bin/true";
+      ExecStop = "${gce}/bin/google_metadata_script_runner --debug --script-type shutdown";
+      Type = "oneshot";
+      RemainAfterExit = true;
+      TimeoutStopSec = 0;
+    };
+  };
+
+  systemd.services.google-network-setup = {
+    description = "Google Compute Engine Network Setup";
+    after = [
+      "local-fs.target"
+      "network-online.target"
+      "network.target"
+      "rsyslog.service"
+    ];
+    wants = [ "local-fs.target" "network-online.target" "network.target"];
+    wantedBy = [ "multi-user.target" ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_network_setup --debug";
+      KillMode = "process";
+      Type = "oneshot";
+    };
+  };
+
+  systemd.services.google-startup-scripts = {
+    description = "Google Compute Engine Startup Scripts";
+    after = [
+      "local-fs.target"
+      "network-online.target"
+      "network.target"
+      "rsyslog.service"
+      "google-instance-setup.service"
+      "google-network-setup.service"
+    ];
+    wants = [ "local-fs.target" "network-online.target" "network.target"];
+    wantedBy = [ "multi-user.target" ];
+    serviceConfig = {
+      ExecStart = "${gce}/bin/google_metadata_script_runner --debug --script-type startup";
+      KillMode = "process";
+      Type = "oneshot";
+    };
+  };
+
+  # TODO: remove this
   systemd.services.fetch-ssh-keys =
     { description = "Fetch host keys and authorized_keys for root user";
 
@@ -113,9 +243,13 @@ in
       serviceConfig.StandardOutput = "journal+console";
     };
 
-  # Setings taken from https://cloud.google.com/compute/docs/tutorials/building-images#providedkernel
+  # Settings taken from https://github.com/GoogleCloudPlatform/compute-image-packages/blob/master/google_config/sysctl/11-gce-network-security.conf
   boot.kernel.sysctl = {
-    # enables syn flood protection
+    # Turn on SYN-flood protections.  Starting with 2.6.26, there is no loss
+    # of TCP functionality/features under normal conditions.  When flood
+    # protections kick in under high unanswered-SYN load, the system
+    # should remain more stable, with a trade off of some loss of TCP
+    # functionality/features (e.g. TCP Window scaling).
     "net.ipv4.tcp_syncookies" = mkDefault "1";
 
     # ignores source-routed packets
@@ -169,6 +303,11 @@ in
     # randomizes addresses of mmap base, heap, stack and VDSO page
     "kernel.randomize_va_space" = mkDefault "2";
 
+    # Reboot the machine soon after a kernel panic.
+    "kernel.panic" = mkDefault "10";
+
+    ## Not part of the original config
+
     # provides protection from ToCToU races
     "fs.protected_hardlinks" = mkDefault "1";
 
diff --git a/nixos/tests/postgresql.nix b/nixos/tests/postgresql.nix
index 1f4f43a26669..0ce37b55bb7b 100644
--- a/nixos/tests/postgresql.nix
+++ b/nixos/tests/postgresql.nix
@@ -13,8 +13,10 @@ let
     INSERT INTO sth (id) VALUES (1);
     INSERT INTO sth (id) VALUES (1);
     INSERT INTO sth (id) VALUES (1);
+    CREATE TABLE xmltest ( doc xml );
+    INSERT INTO xmltest (doc) VALUES ('<test>ok</test>'); -- check if libxml2 enabled
   '';
-  make-postgresql-test = postgresql-name: postgresql-package: {
+  make-postgresql-test = postgresql-name: postgresql-package: makeTest {
     name = postgresql-name;
     meta = with pkgs.stdenv.lib.maintainers; {
       maintainers = [ zagy ];
@@ -27,17 +29,23 @@ let
       };
 
     testScript = ''
+      sub check_count {
+        my ($select, $nlines) = @_;
+        return 'test $(sudo -u postgres psql postgres -tAc "' . $select . '"|wc -l) -eq ' . $nlines;
+      }
+
       $machine->start;
       $machine->waitForUnit("postgresql");
       # postgresql should be available just after unit start
-      $machine->succeed("cat ${test-sql} | psql postgres");
+      $machine->succeed("cat ${test-sql} | sudo -u postgres psql");
       $machine->shutdown; # make sure that postgresql survive restart (bug #1735)
       sleep(2);
       $machine->start;
       $machine->waitForUnit("postgresql");
-      $machine->fail('test $(psql postgres -tAc "SELECT * FROM sth;"|wc -l) -eq 3');
-      $machine->succeed('test $(psql postgres -tAc "SELECT * FROM sth;"|wc -l) -eq 5');
-      $machine->fail('test $(psql postgres -tAc "SELECT * FROM sth;"|wc -l) -eq 4');
+      $machine->fail(check_count("SELECT * FROM sth;", 3));
+      $machine->succeed(check_count("SELECT * FROM sth;", 5));
+      $machine->fail(check_count("SELECT * FROM sth;", 4));
+      $machine->succeed(check_count("SELECT xpath(\'/test/text()\', doc) FROM xmltest;", 1));
       $machine->shutdown;
     '';