about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/release-notes/rl-2009.xml20
-rw-r--r--nixos/modules/hardware/video/nvidia.nix64
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/security/doas.nix2
-rw-r--r--nixos/modules/security/pam.nix2
-rw-r--r--nixos/modules/services/backup/znapzend.nix11
-rw-r--r--nixos/modules/services/databases/postgresql.nix12
-rw-r--r--nixos/modules/services/network-filesystems/ipfs.nix3
-rw-r--r--nixos/modules/services/networking/pixiecore.nix2
-rw-r--r--nixos/modules/services/security/privacyidea.nix279
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix29
-rw-r--r--nixos/modules/services/x11/window-managers/default.nix1
-rw-r--r--nixos/modules/services/x11/window-managers/tinywm.nix25
-rw-r--r--nixos/modules/system/boot/systemd.nix55
-rw-r--r--nixos/modules/virtualisation/libvirtd.nix15
-rw-r--r--nixos/tests/all-tests.nix2
-rw-r--r--nixos/tests/installer.nix28
-rw-r--r--nixos/tests/nginx-pubhtml.nix1
-rw-r--r--nixos/tests/nginx-sandbox.nix66
-rw-r--r--nixos/tests/privacyidea.nix36
20 files changed, 604 insertions, 50 deletions
diff --git a/nixos/doc/manual/release-notes/rl-2009.xml b/nixos/doc/manual/release-notes/rl-2009.xml
index 3673f6e0d9c9..5b1d04e4bc16 100644
--- a/nixos/doc/manual/release-notes/rl-2009.xml
+++ b/nixos/doc/manual/release-notes/rl-2009.xml
@@ -235,7 +235,16 @@ php.override {
        Be aware that backwards state migrations are not supported by Deluge.
      </para>
    </listitem>
-
+   <listitem>
+     <para>
+       Add option <literal>services.nginx.enableSandbox</literal> to starting Nginx web server with additional sandbox/hardening options.
+       By default, write access to <literal>services.nginx.stateDir</literal> is allowed. To allow writing to other folders,
+       use <literal>systemd.services.nginx.serviceConfig.ReadWritePaths</literal>
+       <programlisting>
+systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
+       </programlisting>
+     </para>
+   </listitem>
    <listitem>
     <para>
       The NixOS options <literal>nesting.clone</literal> and
@@ -335,6 +344,15 @@ php.override {
       your configuration simply remove the quotes around the numbers.
     </para>
    </listitem>
+   <listitem>
+    <para>
+      When using <literal>buildBazelPackage</literal> from Nixpkgs,
+      <literal>flat</literal> hash mode is now used for dependencies
+      instead of <literal>recursive</literal>. This is to better allow
+      using hashed mirrors where needed. As a result, these hashes
+      will have changed.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index 8c3d64fceb9c..6328971492c5 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -52,6 +52,15 @@ in
     ];
 
   options = {
+    hardware.nvidia.powerManagement.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Experimental power management through systemd. For more information, see
+        the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
+      '';
+    };
+
     hardware.nvidia.modesetting.enable = mkOption {
       type = types.bool;
       default = false;
@@ -226,23 +235,51 @@ in
     environment.systemPackages = [ nvidia_x11.bin nvidia_x11.settings ]
       ++ filter (p: p != null) [ nvidia_x11.persistenced ];
 
+    systemd.packages = optional cfg.powerManagement.enable nvidia_x11.out;
+
+    systemd.services = let
+      baseNvidiaService = state: {
+        description = "NVIDIA system ${state} actions";
+
+        path = with pkgs; [ kbd ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
+        };
+      };
+
+      nvidiaService = sleepState: (baseNvidiaService sleepState) // {
+        before = [ "systemd-${sleepState}.service" ];
+        requiredBy = [ "systemd-${sleepState}.service" ];
+      };
+
+      services = (builtins.listToAttrs (map (t: nameValuePair "nvidia-${t}" (nvidiaService t)) ["hibernate" "suspend"]))
+        // {
+          nvidia-resume = (baseNvidiaService "resume") // {
+            after = [ "systemd-suspend.service" "systemd-hibernate.service" ];
+            requiredBy = [ "systemd-suspend.service" "systemd-hibernate.service" ];
+          };
+        };
+    in optionalAttrs cfg.powerManagement.enable services
+      // optionalAttrs nvidiaPersistencedEnabled {
+        "nvidia-persistenced" = mkIf nvidiaPersistencedEnabled {
+          description = "NVIDIA Persistence Daemon";
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            Type = "forking";
+            Restart = "always";
+            PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+            ExecStart = "${nvidia_x11.persistenced}/bin/nvidia-persistenced --verbose";
+            ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+          };
+        };
+      };
+
     systemd.tmpfiles.rules = optional config.virtualisation.docker.enableNvidia
         "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
       ++ optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
         "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
 
-    systemd.services."nvidia-persistenced" = mkIf nvidiaPersistencedEnabled {
-      description = "NVIDIA Persistence Daemon";
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig = {
-        Type = "forking";
-        Restart = "always";
-        PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
-        ExecStart = "${nvidia_x11.persistenced}/bin/nvidia-persistenced --verbose";
-        ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
-      };
-    };
-
     boot.extraModulePackages = [ nvidia_x11.bin ];
 
     # nvidia-uvm is required by CUDA applications.
@@ -250,7 +287,8 @@ in
       optionals config.services.xserver.enable [ "nvidia" "nvidia_modeset" "nvidia_drm" ];
 
     # If requested enable modesetting via kernel parameter.
-    boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1";
+    boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
+      ++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1";
 
     # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
     services.udev.extraRules =
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 40904ef0c175..5adbc26522cf 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -792,6 +792,7 @@
   ./services/security/nginx-sso.nix
   ./services/security/oauth2_proxy.nix
   ./services/security/oauth2_proxy_nginx.nix
+  ./services/security/privacyidea.nix
   ./services/security/physlock.nix
   ./services/security/shibboleth-sp.nix
   ./services/security/sks.nix
diff --git a/nixos/modules/security/doas.nix b/nixos/modules/security/doas.nix
index 1991a58db60d..b81f2d0c2d52 100644
--- a/nixos/modules/security/doas.nix
+++ b/nixos/modules/security/doas.nix
@@ -223,7 +223,7 @@ in
 
   config = mkIf cfg.enable {
 
-    security.doas.extraRules = [
+    security.doas.extraRules = mkOrder 600 [
       {
         groups = [ "wheel" ];
         noPass = !cfg.wheelNeedsPassword;
diff --git a/nixos/modules/security/pam.nix b/nixos/modules/security/pam.nix
index b99316803f35..e1a94b0121ac 100644
--- a/nixos/modules/security/pam.nix
+++ b/nixos/modules/security/pam.nix
@@ -54,7 +54,7 @@ let
         description = ''
           If set, users listed in
           <filename>~/.yubico/authorized_yubikeys</filename>
-          are able to log in with the asociated Yubikey tokens.
+          are able to log in with the associated Yubikey tokens.
         '';
       };
 
diff --git a/nixos/modules/services/backup/znapzend.nix b/nixos/modules/services/backup/znapzend.nix
index 98cd647f61ac..8098617d11f3 100644
--- a/nixos/modules/services/backup/znapzend.nix
+++ b/nixos/modules/services/backup/znapzend.nix
@@ -268,7 +268,8 @@ let
 
   mkSrcAttrs = srcCfg: with srcCfg; {
     enabled = onOff enable;
-    mbuffer = with mbuffer; if enable then "${pkgs.mbuffer}/bin/mbuffer"
+    # mbuffer is not referenced by its full path to accomodate non-NixOS systems or differing mbuffer versions between source and target
+    mbuffer = with mbuffer; if enable then "mbuffer"
         + optionalString (port != null) ":${toString port}" else "off";
     mbuffer_size = mbuffer.size;
     post_znap_cmd = nullOff postsnap;
@@ -357,6 +358,12 @@ in
         default = false;
       };
 
+      features.oracleMode = mkEnableOption ''
+        Destroy snapshots one by one instead of using one long argument list.
+        If source and destination are out of sync for a long time, you may have
+        so many snapshots to destroy that the argument gets is too long and the
+        command fails.
+      '';
       features.recvu = mkEnableOption ''
         recvu feature which uses <literal>-u</literal> on the receiving end to keep the destination
         filesystem unmounted.
@@ -458,5 +465,5 @@ in
     };
   };
 
-  meta.maintainers = with maintainers; [ infinisil ];
+  meta.maintainers = with maintainers; [ infinisil SlothOfAnarchy ];
 }
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 93f5c1ca5f55..982480fbd99c 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -17,6 +17,7 @@ let
       hba_file = '${pkgs.writeText "pg_hba.conf" cfg.authentication}'
       ident_file = '${pkgs.writeText "pg_ident.conf" cfg.identMap}'
       log_destination = 'stderr'
+      log_line_prefix = '${cfg.logLinePrefix}'
       listen_addresses = '${if cfg.enableTCPIP then "*" else "localhost"}'
       port = ${toString cfg.port}
       ${cfg.extraConfig}
@@ -186,6 +187,17 @@ in
         '';
       };
 
+      logLinePrefix = mkOption {
+        type = types.str;
+        default = "[%p] ";
+        example = "%m [%p] ";
+        description = ''
+          A printf-style string that is output at the beginning of each log line.
+          Upstream default is <literal>'%m [%p] '</literal>, i.e. it includes the timestamp. We do
+          not include the timestamp, because journal has it anyway.
+        '';
+      };
+
       extraPlugins = mkOption {
         type = types.listOf types.path;
         default = [];
diff --git a/nixos/modules/services/network-filesystems/ipfs.nix b/nixos/modules/services/network-filesystems/ipfs.nix
index 880f70ae1410..1f5c14d777d7 100644
--- a/nixos/modules/services/network-filesystems/ipfs.nix
+++ b/nixos/modules/services/network-filesystems/ipfs.nix
@@ -217,6 +217,9 @@ in {
         createHome = false;
         uid = config.ids.uids.ipfs;
         description = "IPFS daemon user";
+        packages = [
+          pkgs.ipfs-migrator
+        ];
       };
     };
 
diff --git a/nixos/modules/services/networking/pixiecore.nix b/nixos/modules/services/networking/pixiecore.nix
index 0e32f182e2a1..85aa40784af8 100644
--- a/nixos/modules/services/networking/pixiecore.nix
+++ b/nixos/modules/services/networking/pixiecore.nix
@@ -115,7 +115,7 @@ in
               if cfg.mode == "boot"
               then [ "boot" cfg.kernel ]
                    ++ optional (cfg.initrd != "") cfg.initrd
-                   ++ optional (cfg.cmdLine != "") "--cmdline=${lib.escapeShellArg cfg.cmdLine}"
+                   ++ optionals (cfg.cmdLine != "") [ "--cmdline" cfg.cmdLine ]
               else [ "api" cfg.apiServer ];
           in
             ''
diff --git a/nixos/modules/services/security/privacyidea.nix b/nixos/modules/services/security/privacyidea.nix
new file mode 100644
index 000000000000..d6abfd0e2718
--- /dev/null
+++ b/nixos/modules/services/security/privacyidea.nix
@@ -0,0 +1,279 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.privacyidea;
+
+  uwsgi = pkgs.uwsgi.override { plugins = [ "python3" ]; };
+  python = uwsgi.python3;
+  penv = python.withPackages (ps: [ ps.privacyidea ]);
+  logCfg = pkgs.writeText "privacyidea-log.cfg" ''
+    [formatters]
+    keys=detail
+
+    [handlers]
+    keys=stream
+
+    [formatter_detail]
+    class=privacyidea.lib.log.SecureFormatter
+    format=[%(asctime)s][%(process)d][%(thread)d][%(levelname)s][%(name)s:%(lineno)d] %(message)s
+
+    [handler_stream]
+    class=StreamHandler
+    level=NOTSET
+    formatter=detail
+    args=(sys.stdout,)
+
+    [loggers]
+    keys=root,privacyidea
+
+    [logger_privacyidea]
+    handlers=stream
+    qualname=privacyidea
+    level=INFO
+
+    [logger_root]
+    handlers=stream
+    level=ERROR
+  '';
+
+  piCfgFile = pkgs.writeText "privacyidea.cfg" ''
+    SUPERUSER_REALM = [ '${concatStringsSep "', '" cfg.superuserRealm}' ]
+    SQLALCHEMY_DATABASE_URI = 'postgresql:///privacyidea'
+    SECRET_KEY = '${cfg.secretKey}'
+    PI_PEPPER = '${cfg.pepper}'
+    PI_ENCFILE = '${cfg.encFile}'
+    PI_AUDIT_KEY_PRIVATE = '${cfg.auditKeyPrivate}'
+    PI_AUDIT_KEY_PUBLIC = '${cfg.auditKeyPublic}'
+    PI_LOGCONFIG = '${logCfg}'
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+  options = {
+    services.privacyidea = {
+      enable = mkEnableOption "PrivacyIDEA";
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "/var/lib/privacyidea";
+        description = ''
+          Directory where all PrivacyIDEA files will be placed by default.
+        '';
+      };
+
+      superuserRealm = mkOption {
+        type = types.listOf types.str;
+        default = [ "super" "administrators" ];
+        description = ''
+          The realm where users are allowed to login as administrators.
+        '';
+      };
+
+      secretKey = mkOption {
+        type = types.str;
+        example = "t0p s3cr3t";
+        description = ''
+          This is used to encrypt the auth_token.
+        '';
+      };
+
+      pepper = mkOption {
+        type = types.str;
+        example = "Never know...";
+        description = ''
+          This is used to encrypt the admin passwords.
+        '';
+      };
+
+      encFile = mkOption {
+        type = types.str;
+        default = "${cfg.stateDir}/enckey";
+        description = ''
+          This is used to encrypt the token data and token passwords
+        '';
+      };
+
+      auditKeyPrivate = mkOption {
+        type = types.str;
+        default = "${cfg.stateDir}/private.pem";
+        description = ''
+          Private Key for signing the audit log.
+        '';
+      };
+
+      auditKeyPublic = mkOption {
+        type = types.str;
+        default = "${cfg.stateDir}/public.pem";
+        description = ''
+          Public key for checking signatures of the audit log.
+        '';
+      };
+
+      adminPasswordFile = mkOption {
+        type = types.path;
+        description = "File containing password for the admin user";
+      };
+
+      adminEmail = mkOption {
+        type = types.str;
+        example = "admin@example.com";
+        description = "Mail address for the admin user";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra configuration options for pi.cfg.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "privacyidea";
+        description = "User account under which PrivacyIDEA runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "privacyidea";
+        description = "Group account under which PrivacyIDEA runs.";
+      };
+
+      ldap-proxy = {
+        enable = mkEnableOption "PrivacyIDEA LDAP Proxy";
+
+        configFile = mkOption {
+          type = types.path;
+          default = "";
+          description = ''
+            Path to PrivacyIDEA LDAP Proxy configuration (proxy.ini).
+          '';
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "pi-ldap-proxy";
+          description = "User account under which PrivacyIDEA LDAP proxy runs.";
+        };
+
+        group = mkOption {
+          type = types.str;
+          default = "pi-ldap-proxy";
+          description = "Group account under which PrivacyIDEA LDAP proxy runs.";
+        };
+      };
+    };
+  };
+
+  config = mkMerge [
+
+    (mkIf cfg.enable {
+
+      environment.systemPackages = [ python.pkgs.privacyidea ];
+
+      services.postgresql.enable = mkDefault true;
+
+      systemd.services.privacyidea = let
+        piuwsgi = pkgs.writeText "uwsgi.json" (builtins.toJSON {
+          uwsgi = {
+            plugins = [ "python3" ];
+            pythonpath = "${penv}/${uwsgi.python3.sitePackages}";
+            socket = "/run/privacyidea/socket";
+            uid = cfg.user;
+            gid = cfg.group;
+            chmod-socket = 770;
+            chown-socket = "${cfg.user}:nginx";
+            chdir = cfg.stateDir;
+            wsgi-file = "${penv}/etc/privacyidea/privacyideaapp.wsgi";
+            processes = 4;
+            harakiri = 60;
+            reload-mercy = 8;
+            stats = "/run/privacyidea/stats.socket";
+            max-requests = 2000;
+            limit-as = 1024;
+            reload-on-as = 512;
+            reload-on-rss = 256;
+            no-orphans = true;
+            vacuum = true;
+          };
+        });
+      in {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "postgresql.service" ];
+        path = with pkgs; [ openssl ];
+        environment.PRIVACYIDEA_CONFIGFILE = piCfgFile;
+        preStart = let
+          pi-manage = "${pkgs.sudo}/bin/sudo -u privacyidea -HE ${penv}/bin/pi-manage";
+          pgsu = config.services.postgresql.superUser;
+          psql = config.services.postgresql.package;
+        in ''
+          mkdir -p ${cfg.stateDir} /run/privacyidea
+          chown ${cfg.user}:${cfg.group} -R ${cfg.stateDir} /run/privacyidea
+          if ! test -e "${cfg.stateDir}/db-created"; then
+            ${pkgs.sudo}/bin/sudo -u ${pgsu} ${psql}/bin/createuser --no-superuser --no-createdb --no-createrole ${cfg.user}
+            ${pkgs.sudo}/bin/sudo -u ${pgsu} ${psql}/bin/createdb --owner ${cfg.user} privacyidea
+            ${pi-manage} create_enckey
+            ${pi-manage} create_audit_keys
+            ${pi-manage} createdb
+            ${pi-manage} admin add admin -e ${cfg.adminEmail} -p "$(cat ${cfg.adminPasswordFile})"
+            ${pi-manage} db stamp head -d ${penv}/lib/privacyidea/migrations
+            touch "${cfg.stateDir}/db-created"
+            chmod g+r "${cfg.stateDir}/enckey" "${cfg.stateDir}/private.pem"
+          fi
+          ${pi-manage} db upgrade -d ${penv}/lib/privacyidea/migrations
+        '';
+        serviceConfig = {
+          Type = "notify";
+          ExecStart = "${uwsgi}/bin/uwsgi --json ${piuwsgi}";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          ExecStop = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
+          NotifyAccess = "main";
+          KillSignal = "SIGQUIT";
+          StandardError = "syslog";
+        };
+      };
+
+      users.users.privacyidea = mkIf (cfg.user == "privacyidea") {
+        group = cfg.group;
+      };
+
+      users.groups.privacyidea = mkIf (cfg.group == "privacyidea") {};
+    })
+
+    (mkIf cfg.ldap-proxy.enable {
+
+      systemd.services.privacyidea-ldap-proxy = let
+        ldap-proxy-env = pkgs.python2.withPackages (ps: [ ps.privacyidea-ldap-proxy ]);
+      in {
+        description = "privacyIDEA LDAP proxy";
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = cfg.ldap-proxy.user;
+          Group = cfg.ldap-proxy.group;
+          ExecStart = ''
+            ${ldap-proxy-env}/bin/twistd \
+              --nodaemon \
+              --pidfile= \
+              -u ${cfg.ldap-proxy.user} \
+              -g ${cfg.ldap-proxy.group} \
+              ldap-proxy \
+              -c ${cfg.ldap-proxy.configFile}
+          '';
+          Restart = "always";
+        };
+      };
+
+      users.users.pi-ldap-proxy = mkIf (cfg.ldap-proxy.user == "pi-ldap-proxy") {
+        group = cfg.ldap-proxy.group;
+      };
+
+      users.groups.pi-ldap-proxy = mkIf (cfg.ldap-proxy.group == "pi-ldap-proxy") {};
+    })
+  ];
+
+}
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 1e9cda7e4785..312d2b0a21a7 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -463,6 +463,14 @@ in
         '';
       };
 
+      enableSandbox = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Starting Nginx web server with additional sandbox/hardening options.
+        '';
+      };
+
       user = mkOption {
         type = types.str;
         default = "nginx";
@@ -710,6 +718,27 @@ in
         LogsDirectoryMode = "0750";
         # Capabilities
         AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" "CAP_SYS_RESOURCE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" "CAP_SYS_RESOURCE" ];
+        # Security
+        NoNewPrivileges = true;
+      } // optionalAttrs cfg.enableSandbox {
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = mkDefault true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = !(builtins.any (mod: (mod.allowMemoryWriteExecute or false)) pkgs.nginx.modules);
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
       };
     };
 
diff --git a/nixos/modules/services/x11/window-managers/default.nix b/nixos/modules/services/x11/window-managers/default.nix
index 04a9fc46628c..b815c5f16a1e 100644
--- a/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixos/modules/services/x11/window-managers/default.nix
@@ -30,6 +30,7 @@ in
     ./sawfish.nix
     ./stumpwm.nix
     ./spectrwm.nix
+    ./tinywm.nix
     ./twm.nix
     ./windowmaker.nix
     ./wmii.nix
diff --git a/nixos/modules/services/x11/window-managers/tinywm.nix b/nixos/modules/services/x11/window-managers/tinywm.nix
new file mode 100644
index 000000000000..8e5d9b9170ca
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/tinywm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.tinywm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.tinywm.enable = mkEnableOption "tinywm";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "tinywm";
+      start = ''
+        ${pkgs.tinywm}/bin/tinywm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.tinywm ];
+  };
+}
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index ffc5387e8102..36a25c4e6c3a 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -201,8 +201,23 @@ let
     ];
 
   makeJobScript = name: text:
-    let mkScriptName =  s: "unit-script-" + (replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape s) );
-    in  pkgs.writeTextFile { name = mkScriptName name; executable = true; inherit text; };
+    let
+      scriptName = replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
+      out = pkgs.writeTextFile {
+        # The derivation name is different from the script file name
+        # to keep the script file name short to avoid cluttering logs.
+        name = "unit-script-${scriptName}";
+        executable = true;
+        destination = "/bin/${scriptName}";
+        text = ''
+          #!${pkgs.runtimeShell} -e
+          ${text}
+        '';
+        checkPhase = ''
+          ${pkgs.stdenv.shell} -n "$out/bin/${scriptName}"
+        '';
+      };
+    in "${out}/bin/${scriptName}";
 
   unitConfig = { config, options, ... }: {
     config = {
@@ -250,40 +265,28 @@ let
           environment.PATH = config.path;
         }
         (mkIf (config.preStart != "")
-          { serviceConfig.ExecStartPre = makeJobScript "${name}-pre-start" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.preStart}
-            '';
+          { serviceConfig.ExecStartPre =
+              makeJobScript "${name}-pre-start" config.preStart;
           })
         (mkIf (config.script != "")
-          { serviceConfig.ExecStart = makeJobScript "${name}-start" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.script}
-            '' + " " + config.scriptArgs;
+          { serviceConfig.ExecStart =
+              makeJobScript "${name}-start" config.script + " " + config.scriptArgs;
           })
         (mkIf (config.postStart != "")
-          { serviceConfig.ExecStartPost = makeJobScript "${name}-post-start" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.postStart}
-            '';
+          { serviceConfig.ExecStartPost =
+              makeJobScript "${name}-post-start" config.postStart;
           })
         (mkIf (config.reload != "")
-          { serviceConfig.ExecReload = makeJobScript "${name}-reload" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.reload}
-            '';
+          { serviceConfig.ExecReload =
+              makeJobScript "${name}-reload" config.reload;
           })
         (mkIf (config.preStop != "")
-          { serviceConfig.ExecStop = makeJobScript "${name}-pre-stop" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.preStop}
-            '';
+          { serviceConfig.ExecStop =
+              makeJobScript "${name}-pre-stop" config.preStop;
           })
         (mkIf (config.postStop != "")
-          { serviceConfig.ExecStopPost = makeJobScript "${name}-post-stop" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.postStop}
-            '';
+          { serviceConfig.ExecStopPost =
+              makeJobScript "${name}-post-stop" config.postStop;
           })
       ];
   };
diff --git a/nixos/modules/virtualisation/libvirtd.nix b/nixos/modules/virtualisation/libvirtd.nix
index 4f22099443f4..f89e5d544b22 100644
--- a/nixos/modules/virtualisation/libvirtd.nix
+++ b/nixos/modules/virtualisation/libvirtd.nix
@@ -7,10 +7,8 @@ let
   cfg = config.virtualisation.libvirtd;
   vswitch = config.virtualisation.vswitch;
   configFile = pkgs.writeText "libvirtd.conf" ''
-    unix_sock_group = "libvirtd"
-    unix_sock_rw_perms = "0770"
-    auth_unix_ro = "none"
-    auth_unix_rw = "none"
+    auth_unix_ro = "polkit"
+    auth_unix_rw = "polkit"
     ${cfg.extraConfig}
   '';
   qemuConfigFile = pkgs.writeText "qemu.conf" ''
@@ -269,5 +267,14 @@ in {
 
     systemd.sockets.libvirtd    .wantedBy = [ "sockets.target" ];
     systemd.sockets.libvirtd-tcp.wantedBy = [ "sockets.target" ];
+
+    security.polkit.extraConfig = ''
+      polkit.addRule(function(action, subject) {
+        if (action.id == "org.libvirt.unix.manage" &&
+          subject.isInGroup("libvirtd")) {
+          return polkit.Result.YES;
+        }
+      });
+    '';
   };
 }
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 5a0c9d1afae1..f3e90f9bfa70 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -225,6 +225,7 @@ in
   nginx = handleTest ./nginx.nix {};
   nginx-etag = handleTest ./nginx-etag.nix {};
   nginx-pubhtml = handleTest ./nginx-pubhtml.nix {};
+  nginx-sandbox = handleTestOn ["x86_64-linux"] ./nginx-sandbox.nix {};
   nginx-sso = handleTest ./nginx-sso.nix {};
   nix-ssh-serve = handleTest ./nix-ssh-serve.nix {};
   nixos-generate-config = handleTest ./nixos-generate-config.nix {};
@@ -260,6 +261,7 @@ in
   pppd = handleTest ./pppd.nix {};
   predictable-interface-names = handleTest ./predictable-interface-names.nix {};
   printing = handleTest ./printing.nix {};
+  privacyidea = handleTest ./privacyidea.nix {};
   prometheus = handleTest ./prometheus.nix {};
   prometheus-exporters = handleTest ./prometheus-exporters.nix {};
   prosody = handleTest ./xmpp/prosody.nix {};
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index 8d1bfa96d03d..eef9abebf9f2 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -97,7 +97,7 @@ let
 
 
       def create_machine_named(name):
-          return create_machine({**default_flags, "name": "boot-after-install"})
+          return create_machine({**default_flags, "name": name})
 
 
       machine.start()
@@ -650,6 +650,32 @@ in {
     '';
   };
 
+  bcache = makeInstallerTest "bcache" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda --"
+          + " mklabel msdos"
+          + " mkpart primary ext2 1M 50MB"  # /boot
+          + " mkpart primary 50MB 512MB  "  # swap
+          + " mkpart primary 512MB 1024MB"  # Cache (typically SSD)
+          + " mkpart primary 1024MB -1s ",  # Backing device (typically HDD)
+          "modprobe bcache",
+          "udevadm settle",
+          "make-bcache -B /dev/vda4 -C /dev/vda3",
+          "echo /dev/vda3 > /sys/fs/bcache/register",
+          "echo /dev/vda4 > /sys/fs/bcache/register",
+          "udevadm settle",
+          "mkfs.ext3 -L nixos /dev/bcache0",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+          "mkswap -f /dev/vda2 -L swap",
+          "swapon -L swap",
+      )
+    '';
+  };
+
   # Test a basic install using GRUB 1.
   grub1 = makeInstallerTest "grub1" {
     createPartitions = ''
diff --git a/nixos/tests/nginx-pubhtml.nix b/nixos/tests/nginx-pubhtml.nix
index 432913cb42d2..6e1e605628e9 100644
--- a/nixos/tests/nginx-pubhtml.nix
+++ b/nixos/tests/nginx-pubhtml.nix
@@ -2,6 +2,7 @@ import ./make-test-python.nix {
   name = "nginx-pubhtml";
 
   machine = { pkgs, ... }: {
+    systemd.services.nginx.serviceConfig.ProtectHome = "read-only";
     services.nginx.enable = true;
     services.nginx.virtualHosts.localhost = {
       locations."~ ^/\\~([a-z0-9_]+)(/.*)?$".alias = "/home/$1/public_html$2";
diff --git a/nixos/tests/nginx-sandbox.nix b/nixos/tests/nginx-sandbox.nix
new file mode 100644
index 000000000000..bc9d3ba8add7
--- /dev/null
+++ b/nixos/tests/nginx-sandbox.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx-sandbox";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ izorkin ];
+  };
+
+  # This test checks the creation and reading of a file in sandbox mode. Used simple lua script.
+
+  machine = { pkgs, ... }: {
+    nixpkgs.overlays = [
+      (self: super: {
+        nginx-lua = super.nginx.override {
+          modules = [
+            pkgs.nginxModules.lua
+          ];
+        };
+      })
+    ];
+    services.nginx.enable = true;
+    services.nginx.package = pkgs.nginx-lua;
+    services.nginx.enableSandbox = true;
+    services.nginx.virtualHosts.localhost = {
+      extraConfig = ''
+        location /test1-write {
+          content_by_lua_block {
+            local create = os.execute('${pkgs.coreutils}/bin/mkdir /tmp/test1-read')
+            local create = os.execute('${pkgs.coreutils}/bin/touch /tmp/test1-read/foo.txt')
+            local echo = os.execute('${pkgs.coreutils}/bin/echo worked > /tmp/test1-read/foo.txt')
+          }
+        }
+        location /test1-read {
+          root /tmp;
+        }
+        location /test2-write {
+          content_by_lua_block {
+            local create = os.execute('${pkgs.coreutils}/bin/mkdir /var/web/test2-read')
+            local create = os.execute('${pkgs.coreutils}/bin/touch /var/web/test2-read/bar.txt')
+            local echo = os.execute('${pkgs.coreutils}/bin/echo error-worked > /var/web/test2-read/bar.txt')
+          }
+        }
+        location /test2-read {
+          root /var/web;
+        }
+      '';
+    };
+    users.users.foo.isNormalUser = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nginx")
+    machine.wait_for_open_port(80)
+
+    # Checking write in temporary folder
+    machine.succeed("$(curl -vvv http://localhost/test1-write)")
+    machine.succeed('test "$(curl -fvvv http://localhost/test1-read/foo.txt)" = worked')
+
+    # Checking write in protected folder. In sandbox mode for the nginx service, the folder /var/web is mounted
+    # in read-only mode.
+    machine.succeed("mkdir -p /var/web")
+    machine.succeed("chown nginx:nginx /var/web")
+    machine.succeed("$(curl -vvv http://localhost/test2-write)")
+    assert "404 Not Found" in machine.succeed(
+        "curl -vvv -s http://localhost/test2-read/bar.txt"
+    )
+  '';
+})
diff --git a/nixos/tests/privacyidea.nix b/nixos/tests/privacyidea.nix
new file mode 100644
index 000000000000..45c7cd37c241
--- /dev/null
+++ b/nixos/tests/privacyidea.nix
@@ -0,0 +1,36 @@
+# Miscellaneous small tests that don't warrant their own VM run.
+
+import ./make-test-python.nix ({ pkgs, ...} : rec {
+  name = "privacyidea";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ fpletz ];
+  };
+
+  machine = { ... }: {
+    virtualisation.cores = 2;
+    virtualisation.memorySize = 512;
+
+    services.privacyidea = {
+      enable = true;
+      secretKey = "testing";
+      pepper = "testing";
+      adminPasswordFile = pkgs.writeText "admin-password" "testing";
+      adminEmail = "root@localhost";
+    };
+    services.nginx = {
+      enable = true;
+      virtualHosts."_".locations."/".extraConfig = ''
+        uwsgi_pass unix:/run/privacyidea/socket;
+      '';
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("curl --fail http://localhost | grep privacyIDEA")
+    machine.succeed(
+        "curl --fail http://localhost/auth -F username=admin -F password=testing | grep token"
+    )
+  '';
+})