summary refs log tree commit diff
path: root/nixos/modules
diff options
context:
space:
mode:
authorEelco Dolstra <eelco.dolstra@logicblox.com>2014-10-16 15:16:50 +0200
committerEelco Dolstra <eelco.dolstra@logicblox.com>2014-10-16 15:16:50 +0200
commit09dc132e04c8a69581becb56a1c5e1bfee3aef7e (patch)
tree186745725ff42ea953572f38eaca71161fee9406 /nixos/modules
parent3c6efec2c09e9389eb973ec5e45cb8e04b35f8aa (diff)
parente39bf7a7043a466761754c423b3d5783a320a6ce (diff)
downloadnixlib-09dc132e04c8a69581becb56a1c5e1bfee3aef7e.tar
nixlib-09dc132e04c8a69581becb56a1c5e1bfee3aef7e.tar.gz
nixlib-09dc132e04c8a69581becb56a1c5e1bfee3aef7e.tar.bz2
nixlib-09dc132e04c8a69581becb56a1c5e1bfee3aef7e.tar.lz
nixlib-09dc132e04c8a69581becb56a1c5e1bfee3aef7e.tar.xz
nixlib-09dc132e04c8a69581becb56a1c5e1bfee3aef7e.tar.zst
nixlib-09dc132e04c8a69581becb56a1c5e1bfee3aef7e.zip
Merge remote-tracking branch 'origin/master' into staging
Conflicts:
	pkgs/development/libraries/poppler/default.nix
Diffstat (limited to 'nixos/modules')
-rw-r--r--nixos/modules/config/pulseaudio.nix5
-rw-r--r--nixos/modules/misc/ids.nix8
-rwxr-xr-xnixos/modules/module-list.nix3
-rw-r--r--nixos/modules/rename.nix1
-rw-r--r--nixos/modules/services/logging/logrotate.nix1
-rw-r--r--nixos/modules/services/logging/logstash.nix69
-rw-r--r--nixos/modules/services/logging/syslog-ng.nix38
-rw-r--r--nixos/modules/services/misc/redmine.nix222
-rw-r--r--nixos/modules/services/network-filesystems/nfsd.nix10
-rw-r--r--nixos/modules/services/networking/prosody.nix280
-rw-r--r--nixos/modules/services/networking/seeks.nix75
-rw-r--r--nixos/modules/services/scheduling/cron.nix26
-rw-r--r--nixos/modules/services/x11/window-managers/default.nix3
-rw-r--r--nixos/modules/services/x11/window-managers/stumpwm.nix30
-rw-r--r--nixos/modules/system/boot/stage-1-init.sh8
-rw-r--r--nixos/modules/system/boot/stage-1.nix3
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix1
-rw-r--r--nixos/modules/tasks/network-interfaces.nix23
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix4
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix2
20 files changed, 761 insertions, 51 deletions
diff --git a/nixos/modules/config/pulseaudio.nix b/nixos/modules/config/pulseaudio.nix
index 737f0abc52f0..8b38489a8c19 100644
--- a/nixos/modules/config/pulseaudio.nix
+++ b/nixos/modules/config/pulseaudio.nix
@@ -12,7 +12,7 @@ let
 
   # Forces 32bit pulseaudio and alsaPlugins to be built/supported for apps
   # using 32bit alsa on 64bit linux.
-  enable32BitAlsaPlugins = stdenv.isx86_64 && (pkgs_i686.alsaLib != null);
+  enable32BitAlsaPlugins = stdenv.isx86_64 && (pkgs_i686.alsaLib != null && pkgs_i686.pulseaudio != null);
 
   ids = config.ids;
 
@@ -126,8 +126,7 @@ in {
     (mkIf cfg.enable {
       environment.systemPackages = [
         cfg.package
-        (lib.optional enable32BitAlsaPlugins pkgs_i686.pulseaudio)
-      ];
+      ] ++ lib.optionals enable32BitAlsaPlugins [ pkgs_i686.pulseaudio ];
 
       environment.etc = singleton {
         target = "asound.conf";
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index fa51f831481a..d28624c4326f 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -154,6 +154,10 @@
       collectd = 144;
       consul = 145;
       mailpile = 146;
+      redmine = 147;
+      seeks = 148;
+
+      prosody = 148;
 
       # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
 
@@ -275,6 +279,10 @@
       riemanndash = 138;
       uhub = 142;
       mailpile = 146;
+      redmine = 147;
+      seeks = 148;
+
+      prosody = 148;
 
       # When adding a gid, make sure it doesn't match an existing uid. And don't use gids above 399!
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 3db44a4d8de5..17d3140b087f 100755
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -174,6 +174,7 @@
   ./services/misc/nixos-manual.nix
   ./services/misc/nix-ssh-serve.nix
   ./services/misc/phd.nix
+  ./services/misc/redmine.nix
   ./services/misc/rippled.nix
   ./services/misc/rogue.nix
   ./services/misc/siproxd.nix
@@ -250,6 +251,7 @@
   ./services/networking/polipo.nix
   ./services/networking/prayer.nix
   ./services/networking/privoxy.nix
+  ./services/networking/prosody.nix
   ./services/networking/quassel.nix
   ./services/networking/radicale.nix
   ./services/networking/radvd.nix
@@ -257,6 +259,7 @@
   ./services/networking/rpcbind.nix
   ./services/networking/sabnzbd.nix
   ./services/networking/searx.nix
+  ./services/networking/seeks.nix
   ./services/networking/spiped.nix
   ./services/networking/ssh/lshd.nix
   ./services/networking/ssh/sshd.nix
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 019fbc721b17..073a22207652 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -130,5 +130,6 @@ in zipModules ([]
 ++ obsolete' [ "boot" "initrd" "luks" "enable" ]
 ++ obsolete' [ "programs" "bash" "enable" ]
 ++ obsolete' [ "services" "samba" "defaultShare" ]
+++ obsolete' [ "services" "syslog-ng" "serviceName" ]
 
 )
diff --git a/nixos/modules/services/logging/logrotate.nix b/nixos/modules/services/logging/logrotate.nix
index 6887ab1e8052..0186452de95e 100644
--- a/nixos/modules/services/logging/logrotate.nix
+++ b/nixos/modules/services/logging/logrotate.nix
@@ -21,6 +21,7 @@ in
 
       config = mkOption {
         default = "";
+        type = types.lines;
         description = ''
           The contents of the logrotate config file
         '';
diff --git a/nixos/modules/services/logging/logstash.nix b/nixos/modules/services/logging/logstash.nix
index df81ac142dc3..41f71be2365c 100644
--- a/nixos/modules/services/logging/logstash.nix
+++ b/nixos/modules/services/logging/logstash.nix
@@ -4,6 +4,9 @@ with lib;
 
 let
   cfg = config.services.logstash;
+  pluginPath = lib.concatStringsSep ":" cfg.plugins;
+  havePluginPath = lib.length cfg.plugins > 0;
+  ops = lib.optionalString;
 
 in
 
@@ -20,12 +23,50 @@ in
         description = "Enable logstash.";
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.logstash;
+        example = literalExample "pkgs.logstash";
+        description = "Logstash package to use.";
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        example = literalExample "[ pkgs.logstash-contrib ]";
+        description = "The paths to find other logstash plugins in.";
+      };
+
+      watchdogTimeout = mkOption {
+        type = types.int;
+        default = 10;
+        description = "Set watchdog timeout value in seconds.";
+      };
+
+      filterWorkers = mkOption {
+        type = types.int;
+        default = 1;
+        description = "The quantity of filter workers to run.";
+      };
+
       enableWeb = mkOption {
         type = types.bool;
         default = false;
         description = "Enable the logstash web interface.";
       };
 
+      address = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = "Address on which to start webserver.";
+      };
+
+      port = mkOption {
+        type = types.str;
+        default = "9292";
+        description = "Port on which to start webserver.";
+      };
+
       inputConfig = mkOption {
         type = types.lines;
         default = ''stdin { type => "example" }'';
@@ -79,19 +120,25 @@ in
       wantedBy = [ "multi-user.target" ];
       environment = { JAVA_HOME = jre; };
       serviceConfig = {
-        ExecStart = "${logstash}/bin/logstash agent -f ${writeText "logstash.conf" ''
-          input {
-            ${cfg.inputConfig}
-          }
+        ExecStart =
+          "${cfg.package}/bin/logstash agent " +
+          "-w ${toString cfg.filterWorkers} " +
+          ops havePluginPath "--pluginpath ${pluginPath} " +
+          "--watchdog-timeout ${toString cfg.watchdogTimeout} " +
+          "-f ${writeText "logstash.conf" ''
+            input {
+              ${cfg.inputConfig}
+            }
 
-          filter {
-            ${cfg.filterConfig}
-          }
+            filter {
+              ${cfg.filterConfig}
+            }
 
-          output {
-            ${cfg.outputConfig}
-          }
-        ''} ${optionalString cfg.enableWeb "-- web"}";
+            output {
+              ${cfg.outputConfig}
+            }
+          ''} " +
+          ops cfg.enableWeb "-- web -a ${cfg.address} -p ${cfg.port}";
       };
     };
   };
diff --git a/nixos/modules/services/logging/syslog-ng.nix b/nixos/modules/services/logging/syslog-ng.nix
index 4a16b19134a0..f3991a411ec4 100644
--- a/nixos/modules/services/logging/syslog-ng.nix
+++ b/nixos/modules/services/logging/syslog-ng.nix
@@ -7,8 +7,7 @@ let
   cfg = config.services.syslog-ng;
 
   syslogngConfig = pkgs.writeText "syslog-ng.conf" ''
-    @version: 3.5
-    @include "scl.conf"
+    ${cfg.configHeader}
     ${cfg.extraConfig}
   '';
 
@@ -44,13 +43,13 @@ in {
           The package providing syslog-ng binaries.
         '';
       };
-      serviceName = mkOption {
-        type = types.str;
-        default = "syslog-ng";
+      listenToJournal = mkOption {
+        type = types.bool;
+        default = true;
         description = ''
-          The name of the systemd service that runs syslog-ng. Set this to
-          <literal>syslog</literal> if you want journald to automatically
-          forward all logs to syslog-ng.
+          Whether syslog-ng should listen to the syslog socket used
+          by journald, and therefore receive all logs that journald
+          produces.
         '';
       };
       extraModulePaths = mkOption {
@@ -72,16 +71,33 @@ in {
           Configuration added to the end of <literal>syslog-ng.conf</literal>.
         '';
       };
+      configHeader = mkOption {
+        type = types.lines;
+        default = ''
+          @version: 3.5
+          @include "scl.conf"
+        '';
+        description = ''
+          The very first lines of the configuration file. Should usually contain
+          the syslog-ng version header.
+        '';
+      };
     };
   };
 
   config = mkIf cfg.enable {
-    systemd.services."${cfg.serviceName}" = {
-      wantedBy = [ "multi-user.target" ];
+    systemd.sockets.syslog = mkIf cfg.listenToJournal {
+      wantedBy = [ "sockets.target" ];
+      socketConfig.Service = "syslog-ng.service";
+    };
+    systemd.services.syslog-ng = {
+      description = "syslog-ng daemon";
       preStart = "mkdir -p /{var,run}/syslog-ng";
+      wantedBy = optional (!cfg.listenToJournal) "multi-user.target";
+      after = [ "multi-user.target" ]; # makes sure hostname etc is set
       serviceConfig = {
         Type = "notify";
-        Sockets = "syslog.socket";
+        Sockets = if cfg.listenToJournal then "syslog.socket" else null;
         StandardOutput = "null";
         Restart = "on-failure";
         ExecStart = "${cfg.package}/sbin/syslog-ng ${concatStringsSep " " syslogngOptions}";
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
new file mode 100644
index 000000000000..d7e64590f503
--- /dev/null
+++ b/nixos/modules/services/misc/redmine.nix
@@ -0,0 +1,222 @@
+{ config, lib, pkgs, ... }:
+
+# TODO: support non-postgresql
+
+with lib;
+
+let
+  cfg = config.services.redmine;
+
+  ruby = pkgs.ruby;
+  rubyLibs = pkgs.rubyLibs;
+
+  databaseYml = ''
+    production:
+      adapter: postgresql
+      database: ${cfg.databaseName}
+      host: ${cfg.databaseHost}
+      password: ${cfg.databasePassword}
+      username: ${cfg.databaseUsername}
+      encoding: utf8
+  '';
+
+  configurationYml = ''
+    default:
+      # Absolute path to the directory where attachments are stored.
+      # The default is the 'files' directory in your Redmine instance.
+      # Your Redmine instance needs to have write permission on this
+      # directory.
+      # Examples:
+      # attachments_storage_path: /var/redmine/files
+      # attachments_storage_path: D:/redmine/files
+      attachments_storage_path: ${cfg.stateDir}/files
+
+      # Absolute path to the SCM commands errors (stderr) log file.
+      # The default is to log in the 'log' directory of your Redmine instance.
+      # Example:
+      # scm_stderr_log_file: /var/log/redmine_scm_stderr.log
+      scm_stderr_log_file: ${cfg.stateDir}/redmine_scm_stderr.log
+
+      ${cfg.extraConfig}
+  '';
+
+  unpackTheme = unpack "theme";
+  unpackPlugin = unpack "plugin";
+  unpack = id: (name: source:
+    pkgs.stdenv.mkDerivation {
+      name = "redmine-${id}-${name}";
+      buildInputs = [ pkgs.unzip ];
+      buildCommand = ''
+        mkdir -p $out
+        cd $out
+        unpackFile ${source}
+      '';
+    });
+
+in {
+
+  options = {
+    services.redmine = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable the redmine service.
+        '';
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "/var/redmine";
+        description = "The state directory, logs and plugins are stored here";
+      };
+
+      extraConfig = mkOption {
+        type = types.str;
+        default = "";
+        description = "Extra configuration in configuration.yml";
+      };
+
+      themes = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = "Set of themes";
+      };
+
+      plugins = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = "Set of plugins";
+      };
+
+      #databaseType = mkOption {
+      #  type = types.str;
+      #  default = "postgresql";
+      #  description = "Type of database";
+      #};
+
+      databaseHost = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = "Database hostname";
+      };
+
+      databasePassword = mkOption {
+        type = types.str;
+        default = "";
+        description = "Database user password";
+      };
+
+      databaseName = mkOption {
+        type = types.str;
+        default = "redmine";
+        description = "Database name";
+      };
+
+      databaseUsername = mkOption {
+        type = types.str;
+        default = "redmine";
+        description = "Database user";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.databasePassword != "";
+        message = "databasePassword must be set";
+      }
+    ];
+
+    users.extraUsers = [
+      { name = "redmine";
+        group = "redmine";
+        uid = config.ids.uids.redmine;
+      } ];
+
+    users.extraGroups = [
+      { name = "redmine";
+        gid = config.ids.gids.redmine;
+      } ];
+
+    systemd.services.redmine = {
+      after = [ "network.target" "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+      environment.RAILS_ENV = "production";
+      environment.RAILS_ETC = "${cfg.stateDir}/config";
+      environment.RAILS_LOG = "${cfg.stateDir}/log";
+      environment.RAILS_VAR = "${cfg.stateDir}/var";
+      environment.RAILS_CACHE = "${cfg.stateDir}/cache";
+      environment.RAILS_PLUGINS = "${cfg.stateDir}/plugins";
+      environment.RAILS_PUBLIC = "${cfg.stateDir}/public";
+      environment.RAILS_TMP = "${cfg.stateDir}/tmp";
+      environment.SCHEMA = "${cfg.stateDir}/cache/schema.db";
+      environment.HOME = "${pkgs.redmine}/share/redmine";
+      environment.REDMINE_LANG = "en";
+      environment.GEM_HOME = "${pkgs.redmine}/share/redmine/vendor/bundle/ruby/1.9.1";
+      environment.GEM_PATH = "${rubyLibs.bundler}/lib/ruby/gems/1.9";
+      path = with pkgs; [
+        imagemagickBig
+        subversion
+        mercurial
+        cvs
+        config.services.postgresql.package
+        bazaar
+        gitAndTools.git
+        # once we build binaries for darc enable it
+        #darcs
+      ];
+      preStart = ''
+        # TODO: use env vars
+        for i in plugins public/plugin_assets db files log config cache var/files tmp; do
+          mkdir -p ${cfg.stateDir}/$i
+        done
+
+        chown -R redmine:redmine ${cfg.stateDir}
+        chmod -R 755 ${cfg.stateDir}
+
+        rm -rf ${cfg.stateDir}/public/*
+        cp -R ${pkgs.redmine}/share/redmine/public/* ${cfg.stateDir}/public/
+        for theme in ${concatStringsSep " " (mapAttrsToList unpackTheme cfg.themes)}; do
+          ln -fs $theme/* ${cfg.stateDir}/public/themes/
+        done
+
+        rm -rf ${cfg.stateDir}/plugins/*
+        for plugin in ${concatStringsSep " " (mapAttrsToList unpackPlugin cfg.plugins)}; do
+          ln -fs $plugin/* ${cfg.stateDir}/plugins/''${plugin##*-redmine-plugin-}
+        done
+
+        ln -fs ${pkgs.writeText "database.yml" databaseYml} ${cfg.stateDir}/config/database.yml
+        ln -fs ${pkgs.writeText "configuration.yml" configurationYml} ${cfg.stateDir}/config/configuration.yml
+
+        if [ "${cfg.databaseHost}" = "127.0.0.1" ]; then
+          if ! test -e "${cfg.stateDir}/db-created"; then
+            psql postgres -c "CREATE ROLE redmine WITH LOGIN NOCREATEDB NOCREATEROLE NOCREATEUSER ENCRYPTED PASSWORD '${cfg.databasePassword}'"
+            ${config.services.postgresql.package}/bin/createdb --owner redmine redmine || true
+            touch "${cfg.stateDir}/db-created"
+          fi
+        fi
+
+        cd ${pkgs.redmine}/share/redmine/
+        ${ruby}/bin/rake db:migrate
+        ${ruby}/bin/rake redmine:plugins:migrate
+        ${ruby}/bin/rake redmine:load_default_data
+        ${ruby}/bin/rake generate_secret_token
+      '';
+
+      serviceConfig = {
+        PermissionsStartOnly = true; # preStart must be run as root
+        Type = "simple";
+        User = "redmine";
+        Group = "redmine";
+        TimeoutSec = "300";
+        WorkingDirectory = "${pkgs.redmine}/share/redmine";
+        ExecStart="${ruby}/bin/ruby ${pkgs.redmine}/share/redmine/script/rails server webrick -e production -P ${cfg.stateDir}/redmine.pid";
+      };
+
+    };
+
+  };
+
+}
diff --git a/nixos/modules/services/network-filesystems/nfsd.nix b/nixos/modules/services/network-filesystems/nfsd.nix
index 57d56cd72877..893df51fc1f6 100644
--- a/nixos/modules/services/network-filesystems/nfsd.nix
+++ b/nixos/modules/services/network-filesystems/nfsd.nix
@@ -64,6 +64,13 @@ in
             Use fixed port for rpc.mountd, usefull if server is behind firewall.
           '';
         };
+
+        lockdPort = mkOption {
+          default = 0;
+          description = ''
+            Fix the lockd port number. This can help setting firewall rules for NFS.
+          '';
+        };
       };
 
     };
@@ -104,6 +111,9 @@ in
             # Create a state directory required by NFSv4.
             mkdir -p /var/lib/nfs/v4recovery
 
+            ${pkgs.procps}/sbin/sysctl -w fs.nfs.nlm_tcpport=${builtins.toString cfg.lockdPort}
+            ${pkgs.procps}/sbin/sysctl -w fs.nfs.nlm_udpport=${builtins.toString cfg.lockdPort}
+
             rpc.nfsd \
               ${if cfg.hostName != null then "-H ${cfg.hostName}" else ""} \
               ${builtins.toString cfg.nproc}
diff --git a/nixos/modules/services/networking/prosody.nix b/nixos/modules/services/networking/prosody.nix
new file mode 100644
index 000000000000..f82f8bfddbb7
--- /dev/null
+++ b/nixos/modules/services/networking/prosody.nix
@@ -0,0 +1,280 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.prosody;
+
+  sslOpts = { ... }: {
+
+    options = {
+
+      # TODO: require attribute
+      key = mkOption {
+        type = types.str;
+        description = "Path to the key file";
+      };
+
+      # TODO: require attribute
+      cert = mkOption {
+        type = types.str;
+        description = "Path to the certificate file";
+      };
+    };
+  };
+
+  moduleOpts = {
+
+    roster = mkOption {
+      default = true;
+      description = "Allow users to have a roster";
+    };
+
+    saslauth = mkOption {
+      default = true;
+      description = "Authentication for clients and servers. Recommended if you want to log in.";
+    };
+
+    tls = mkOption {
+      default = true;
+      description = "Add support for secure TLS on c2s/s2s connections";
+    };
+
+    dialback = mkOption {
+      default = true;
+      description = "s2s dialback support";
+    };
+
+    disco = mkOption {
+      default = true;
+      description = "Service discovery";
+    };
+
+    legacyauth = mkOption {
+      default = true;
+      description = "Legacy authentication. Only used by some old clients and bots";
+    };
+
+    version = mkOption {
+      default = true;
+      description = "Replies to server version requests";
+    };
+
+    uptime = mkOption {
+      default = true;
+      description = "Report how long server has been running";
+    };
+
+    time = mkOption {
+      default = true;
+      description = "Let others know the time here on this server";
+    };
+
+    ping = mkOption {
+      default = true;
+      description = "Replies to XMPP pings with pongs";
+    };
+
+    console = mkOption {
+      default = false;
+      description = "telnet to port 5582";
+    };
+
+    bosh = mkOption {
+      default = false;
+      description = "Enable BOSH clients, aka 'Jabber over HTTP'";
+    };
+
+    httpserver = mkOption {
+      default = false;
+      description = "Serve static files from a directory over HTTP";
+    };
+
+    websocket = mkOption {
+      default = false;
+      description = "Enable WebSocket support";
+    };
+
+  };
+
+  createSSLOptsStr = o:
+    if o ? key && o ? cert then
+      ''ssl = { key = "${o.key}"; certificate = "${o.cert}"; };''
+    else "";
+
+  vHostOpts = { ... }: {
+
+    options = {
+
+      # TODO: require attribute
+      domain = mkOption {
+        type = types.str;
+        description = "Domain name";
+      };
+
+      enabled = mkOption {
+        default = false;
+        description = "Whether to enable the virtual host";
+      };
+
+      ssl = mkOption {
+        description = "Paths to SSL files";
+        default = null;
+        options = [ sslOpts ];
+      };
+
+      extraConfig = mkOption {
+        default = '''';
+        description = "Additional virtual host specific configuration";
+      };
+
+    };
+
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.prosody = {
+
+      enable = mkOption {
+        default = false;
+        description = "Whether to enable the prosody server";
+      };
+
+      allowRegistration = mkOption {
+        default = false;
+        description = "Allow account creation";
+      };
+
+      modules = moduleOpts;
+
+      extraModules = mkOption {
+        description = "Enable custom modules";
+        default = [];
+      };
+
+      virtualHosts = mkOption {
+
+        description = "Define the virtual hosts";
+
+        type = types.loaOf types.optionSet;
+
+        example = {
+          myhost = {
+            domain = "my-xmpp-example-host.org";
+            enabled = true;
+          };
+        };
+
+        default = {
+          localhost = {
+            domain = "localhost";
+            enabled = true;
+          };
+        };
+
+        options = [ vHostOpts ];
+      };
+
+      ssl = mkOption {
+        description = "Paths to SSL files";
+        default = null;
+        options = [ sslOpts ];
+      };
+
+      admins = mkOption {
+        description = "List of administrators of the current host";
+        example = [ "admin1@example.com" "admin2@example.com" ];
+        default = [];
+      };
+
+      extraConfig = mkOption {
+        default = '''';
+        description = "Additional prosody configuration";
+      };
+
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.prosody ];
+
+    environment.etc."prosody/prosody.cfg.lua".text = ''
+
+      pidfile = "/var/lib/prosody/prosody.pid"
+
+
+      log = "*syslog"
+
+      data_path = "/var/lib/prosody"
+
+      allow_registration = ${ if cfg.allowRegistration then "true" else "false" };
+
+      ${ optionalString cfg.modules.console "console_enabled = true;" }
+
+      ${ optionalString  (cfg.ssl != null) (createSSLOptsStr cfg.ssl) }
+
+      admins = { ${lib.concatStringsSep ", " (map (n: "\"${n}\"") cfg.admins) } };
+
+      modules_enabled = {
+
+        ${ lib.concatStringsSep "\n\ \ " (lib.mapAttrsToList
+          (name: val: optionalString val ''"${name}";'')
+        cfg.modules) }
+
+        ${ optionalString cfg.allowRegistration "\"register\"\;" }
+
+        ${ lib.concatStringsSep "\n" (map (x: "\"${x}\";") cfg.extraModules)}
+
+        "posix";
+      };
+
+      ${ cfg.extraConfig }
+
+      ${ lib.concatStringsSep "\n" (lib.mapAttrsToList (n: v: ''
+        VirtualHost "${v.domain}"
+          enabled = ${if v.enabled then "true" else "false"};
+          ${ optionalString (v.ssl != null) (createSSLOptsStr v.ssl) }
+          ${ v.extraConfig }
+        '') cfg.virtualHosts) }
+    '';
+
+    users.extraUsers.prosody = {
+      uid = config.ids.uids.prosody;
+      description = "Prosody user";
+      createHome = true;
+      group = "prosody";
+      home = "/var/lib/prosody";
+    };
+
+    users.extraGroups.prosody = {
+      gid = config.ids.gids.prosody;
+    };
+
+    systemd.services.prosody = {
+
+      description = "Prosody XMPP server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "prosody";
+        PIDFile = "/var/lib/prosody/prosody.pid";
+        ExecStart = "${pkgs.prosody}/bin/prosodyctl start";
+      };
+
+    };
+
+  };
+
+}
diff --git a/nixos/modules/services/networking/seeks.nix b/nixos/modules/services/networking/seeks.nix
new file mode 100644
index 000000000000..155ecbb98ef3
--- /dev/null
+++ b/nixos/modules/services/networking/seeks.nix
@@ -0,0 +1,75 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.seeks;
+
+  confDir = cfg.confDir;
+
+  seeks = pkgs.seeks.override { seeks_confDir = confDir; };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.seeks = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = "
+          Whether to enable the Seeks server.
+        ";
+      };
+
+      confDir = mkOption {
+        default = "";
+        type = types.str;
+        description = "
+          The Seeks server configuration. If it is not specified,
+          a default configuration is used (${seeks}/etc/seeks).
+        ";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.seeks.enable {
+
+    users.extraUsers.seeks =
+      { uid = config.ids.uids.seeks;
+        description = "Seeks user";
+        createHome = true;
+        home = "/var/lib/seeks";
+      };
+
+    users.extraGroups.seeks =
+      { gid = config.ids.gids.seeks;
+      };
+
+    systemd.services.seeks =
+      {
+        description = "Seeks server, the p2p search engine.";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = "seeks";
+          ExecStart = "${seeks}/bin/seeks";
+        };
+      };
+
+    environment.systemPackages = [ seeks ];
+
+  };
+
+}
diff --git a/nixos/modules/services/scheduling/cron.nix b/nixos/modules/services/scheduling/cron.nix
index 9ce0bcbec7eb..ded3010ec5ae 100644
--- a/nixos/modules/services/scheduling/cron.nix
+++ b/nixos/modules/services/scheduling/cron.nix
@@ -25,6 +25,10 @@ let
     sendmailPath = "/var/setuid-wrappers/sendmail";
   };
 
+  allFiles = map (f: "\"${f}\"") (
+    [ "${systemCronJobsFile}" ] ++ config.services.cron.cronFiles
+  );
+
 in
 
 {
@@ -71,6 +75,15 @@ in
         '';
       };
 
+      cronFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = ''
+          A list of extra crontab files that will be read and appended to the main
+          crontab file when the cron service starts.
+        '';
+      };
+
     };
 
   };
@@ -78,14 +91,7 @@ in
 
   ###### implementation
 
-  config = mkIf config.services.cron.enable {
-
-    environment.etc = singleton
-      # The system-wide crontab.
-      { source = systemCronJobsFile;
-        target = "crontab";
-        mode = "0600"; # Cron requires this.
-      };
+  config = mkIf (config.services.cron.enable && allFiles != []) {
 
     security.setuidPrograms = [ "crontab" ];
 
@@ -100,6 +106,10 @@ in
 
         preStart =
           ''
+            rm -f /etc/crontab
+            cat ${toString allFiles} > /etc/crontab
+            chmod 0600 /etc/crontab
+
             mkdir -m 710 -p /var/cron
 
             # By default, allow all users to create a crontab.  This
diff --git a/nixos/modules/services/x11/window-managers/default.nix b/nixos/modules/services/x11/window-managers/default.nix
index 45a4e947e0aa..4f2a2309b60c 100644
--- a/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixos/modules/services/x11/window-managers/default.nix
@@ -18,6 +18,7 @@ in
       ./i3.nix
       ./herbstluftwm.nix
       ./bspwm.nix
+      ./stumpwm.nix
     ];
 
   options = {
@@ -60,4 +61,4 @@ in
   config = {
     services.xserver.displayManager.session = cfg.session;
   };
-}
+}
\ No newline at end of file
diff --git a/nixos/modules/services/x11/window-managers/stumpwm.nix b/nixos/modules/services/x11/window-managers/stumpwm.nix
new file mode 100644
index 000000000000..a876f13fd214
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/stumpwm.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.stumpwm;
+in
+
+{
+  options = {
+    services.xserver.windowManager.stumpwm = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description = "Enable the stumpwm tiling window manager.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "stumpwm";
+      start = "
+        ${pkgs.stumpwm}/bin/stumpwm
+      ";
+    };
+    environment.systemPackages = [ pkgs.stumpwm ];
+  };
+}
diff --git a/nixos/modules/system/boot/stage-1-init.sh b/nixos/modules/system/boot/stage-1-init.sh
index f14f105ef239..5a9beeeafa1d 100644
--- a/nixos/modules/system/boot/stage-1-init.sh
+++ b/nixos/modules/system/boot/stage-1-init.sh
@@ -368,6 +368,14 @@ exec 3>&-
 @postMountCommands@
 
 
+# Emit a udev rule for /dev/root to prevent systemd from complaining.
+eval $(udevadm info --export --export-prefix=ROOT_ --device-id-of-file=$targetRoot || true)
+if [ "$ROOT_MAJOR" -a "$ROOT_MINOR" -a "$ROOT_MAJOR" != 0 ]; then
+    mkdir -p /run/udev/rules.d
+    echo 'ACTION=="add|change", SUBSYSTEM=="block", ENV{MAJOR}=="'$ROOT_MAJOR'", ENV{MINOR}=="'$ROOT_MINOR'", SYMLINK+="root"' > /run/udev/rules.d/61-dev-root-link.rules
+fi
+
+
 # Stop udevd.
 udevadm control --exit || true
 
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index 1ec11e70e845..6b09559876ca 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -346,9 +346,6 @@ in
       (isYes "BLK_DEV_INITRD")
     ];
 
-    # Prevent systemd from waiting for the /dev/root symlink.
-    systemd.units."dev-root.device".text = "";
-
     boot.initrd.supportedFilesystems = map (fs: fs.fsType) fileSystems;
 
   };
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index 1c4bbc16b499..eb72bfba33c0 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -155,6 +155,7 @@ in
       systemd.services."zpool-import" = {
         description = "Import zpools";
         after = [ "systemd-udev-settle.service" ];
+        wantedBy = [ "local-fs.target" ];
         serviceConfig = {
           Type = "oneshot";
           RemainAfterExit = true;
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index 6f6000cf3397..22b52f77b145 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -773,29 +773,28 @@ in
             wantedBy = [ "network.target" (subsystemDevice n) ];
             bindsTo = deps;
             after = deps;
+            before = [ "${n}-cfg.service" ];
             serviceConfig.Type = "oneshot";
             serviceConfig.RemainAfterExit = true;
             path = [ pkgs.ifenslave pkgs.iproute ];
             script = ''
-              # Remove Dead Interfaces
-              ip link set "${n}" down >/dev/null 2>&1 || true
-              ifenslave -d "${n}" >/dev/null 2>&1 || true
-              ip link del "${n}" >/dev/null 2>&1 || true
-
               ip link add name "${n}" type bond
 
               # !!! There must be a better way to wait for the interface
               while [ ! -d /sys/class/net/${n} ]; do sleep 0.1; done;
 
+              # Ensure the link is down so that we can set options
+              ip link set "${n}" down
+
               # Set the miimon and mode options
               ${optionalString (v.miimon != null)
-                "echo ${toString v.miimon} > /sys/class/net/${n}/bonding/miimon"}
+                "echo \"${toString v.miimon}\" >/sys/class/net/${n}/bonding/miimon"}
               ${optionalString (v.mode != null)
-                "echo \"${v.mode}\" > /sys/class/net/${n}/bonding/mode"}
+                "echo \"${v.mode}\" >/sys/class/net/${n}/bonding/mode"}
               ${optionalString (v.lacp_rate != null)
-                "echo \"${v.lacp_rate}\" > /sys/class/net/${n}/bonding/lacp_rate"}
+                "echo \"${v.lacp_rate}\" >/sys/class/net/${n}/bonding/lacp_rate"}
               ${optionalString (v.xmit_hash_policy != null)
-                "echo \"${v.xmit_hash_policy}\" > /sys/class/net/${n}/bonding/xmit_hash_policy"}
+                "echo \"${v.xmit_hash_policy}\" >/sys/class/net/${n}/bonding/xmit_hash_policy"}
 
               # Bring up the bond and enslave the specified interfaces
               ip link set "${n}" up
@@ -804,8 +803,10 @@ in
               '')}
             '';
             postStop = ''
-              ip link set "${n}" down >dev/null 2>&1 || true
-              ifenslave -d "${n}" >/dev/null 2>&1 || true
+              ${flip concatMapStrings v.interfaces (i: ''
+                ifenslave -d "${n}" "${i}" >/dev/null 2>&1 || true
+              '')}
+              ip link set "${n}" down >/dev/null 2>&1 || true
               ip link del "${n}" >/dev/null 2>&1 || true
             '';
           });
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index e129e496fe36..552d787b4478 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -70,10 +70,10 @@ in
 
             # Register the paths in the Nix database.
             printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
-                chroot /mnt ${config.nix.package}/bin/nix-store --load-db
+                chroot /mnt ${config.nix.package}/bin/nix-store --load-db --option build-users-group ""
 
             # Create the system profile to allow nixos-rebuild to work.
-            chroot /mnt ${config.nix.package}/bin/nix-env \
+            chroot /mnt ${config.nix.package}/bin/nix-env --option build-users-group "" \
                 -p /nix/var/nix/profiles/system --set ${config.system.build.toplevel}
 
             # `nixos-rebuild' requires an /etc/NIXOS.
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index 33f48d65d43e..a7610b3e11a0 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -383,7 +383,7 @@ in
 
     # When building a regular system configuration, override whatever
     # video driver the host uses.
-    services.xserver.videoDrivers = mkVMOverride [ "vesa" ];
+    services.xserver.videoDrivers = mkVMOverride [ "modesetting" ];
     services.xserver.defaultDepth = mkVMOverride 0;
     services.xserver.resolutions = mkVMOverride [ { x = 1024; y = 768; } ];
     services.xserver.monitorSection =