summary refs log tree commit diff
path: root/nixos/modules/services
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/services')
-rw-r--r--nixos/modules/services/admin/oxidized.nix116
-rw-r--r--nixos/modules/services/amqp/activemq/default.nix4
-rw-r--r--nixos/modules/services/amqp/rabbitmq.nix4
-rw-r--r--nixos/modules/services/audio/liquidsoap.nix4
-rw-r--r--nixos/modules/services/audio/mopidy.nix4
-rw-r--r--nixos/modules/services/audio/mpd.nix13
-rw-r--r--nixos/modules/services/audio/slimserver.nix3
-rw-r--r--nixos/modules/services/audio/squeezelite.nix2
-rw-r--r--nixos/modules/services/backup/bacula.nix15
-rw-r--r--nixos/modules/services/backup/borgbackup.nix50
-rw-r--r--nixos/modules/services/backup/crashplan-small-business.nix1
-rw-r--r--nixos/modules/services/backup/crashplan.nix1
-rw-r--r--nixos/modules/services/backup/duplicati.nix23
-rw-r--r--nixos/modules/services/backup/mysql-backup.nix2
-rw-r--r--nixos/modules/services/backup/postgresql-backup.nix62
-rw-r--r--nixos/modules/services/backup/restic-rest-server.nix4
-rw-r--r--nixos/modules/services/backup/restic.nix11
-rw-r--r--nixos/modules/services/backup/znapzend.nix7
-rw-r--r--nixos/modules/services/cluster/hadoop/conf.nix31
-rw-r--r--nixos/modules/services/cluster/hadoop/default.nix60
-rw-r--r--nixos/modules/services/cluster/hadoop/hdfs.nix73
-rw-r--r--nixos/modules/services/cluster/hadoop/yarn.nix74
-rw-r--r--nixos/modules/services/cluster/kubernetes/dashboard.nix239
-rw-r--r--nixos/modules/services/cluster/kubernetes/default.nix133
-rw-r--r--nixos/modules/services/cluster/kubernetes/dns.nix165
-rw-r--r--nixos/modules/services/computing/slurm/slurm.nix92
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/master.nix4
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/worker.nix4
-rw-r--r--nixos/modules/services/continuous-integration/buildkite-agent.nix2
-rw-r--r--nixos/modules/services/continuous-integration/gitlab-runner.nix4
-rw-r--r--nixos/modules/services/continuous-integration/gocd-agent/default.nix4
-rw-r--r--nixos/modules/services/continuous-integration/gocd-server/default.nix8
-rw-r--r--nixos/modules/services/continuous-integration/hydra/default.nix8
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/default.nix4
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/slave.nix4
-rw-r--r--nixos/modules/services/databases/4store-endpoint.nix2
-rw-r--r--nixos/modules/services/databases/4store.nix2
-rw-r--r--nixos/modules/services/databases/aerospike.nix155
-rw-r--r--nixos/modules/services/databases/cassandra.nix2
-rw-r--r--nixos/modules/services/databases/clickhouse.nix4
-rw-r--r--nixos/modules/services/databases/couchdb.nix4
-rw-r--r--nixos/modules/services/databases/firebird.nix4
-rw-r--r--nixos/modules/services/databases/foundationdb.nix4
-rw-r--r--nixos/modules/services/databases/hbase.nix4
-rw-r--r--nixos/modules/services/databases/influxdb.nix4
-rw-r--r--nixos/modules/services/databases/memcached.nix2
-rw-r--r--nixos/modules/services/databases/mongodb.nix2
-rw-r--r--nixos/modules/services/databases/mysql.nix26
-rw-r--r--nixos/modules/services/databases/neo4j.nix677
-rw-r--r--nixos/modules/services/databases/openldap.nix4
-rw-r--r--nixos/modules/services/databases/opentsdb.nix4
-rw-r--r--nixos/modules/services/databases/pgmanage.nix68
-rw-r--r--nixos/modules/services/databases/postgresql.nix9
-rw-r--r--nixos/modules/services/databases/redis.nix2
-rw-r--r--nixos/modules/services/databases/rethinkdb.nix4
-rw-r--r--nixos/modules/services/databases/riak-cs.nix2
-rw-r--r--nixos/modules/services/databases/riak.nix4
-rw-r--r--nixos/modules/services/databases/stanchion.nix4
-rw-r--r--nixos/modules/services/databases/virtuoso.nix2
-rw-r--r--nixos/modules/services/desktops/bamf.nix23
-rw-r--r--nixos/modules/services/desktops/flatpak.nix10
-rw-r--r--nixos/modules/services/desktops/pipewire.nix22
-rw-r--r--nixos/modules/services/development/bloop.nix37
-rw-r--r--nixos/modules/services/editors/infinoted.nix4
-rw-r--r--nixos/modules/services/games/minecraft-server.nix2
-rw-r--r--nixos/modules/services/games/minetest-server.nix2
-rw-r--r--nixos/modules/services/games/terraria.nix4
-rw-r--r--nixos/modules/services/hardware/fwupd.nix2
-rw-r--r--nixos/modules/services/hardware/pcscd.nix4
-rw-r--r--nixos/modules/services/hardware/sane.nix4
-rw-r--r--nixos/modules/services/hardware/tcsd.nix4
-rw-r--r--nixos/modules/services/hardware/udisks2.nix11
-rw-r--r--nixos/modules/services/hardware/usbmuxd.nix6
-rw-r--r--nixos/modules/services/logging/graylog.nix2
-rw-r--r--nixos/modules/services/logging/journaldriver.nix112
-rw-r--r--nixos/modules/services/logging/journalwatch.nix4
-rw-r--r--nixos/modules/services/logging/logcheck.nix2
-rw-r--r--nixos/modules/services/mail/dovecot.nix6
-rw-r--r--nixos/modules/services/mail/dspam.nix4
-rw-r--r--nixos/modules/services/mail/exim.nix5
-rw-r--r--nixos/modules/services/mail/mailhog.nix2
-rw-r--r--nixos/modules/services/mail/mlmmj.nix4
-rw-r--r--nixos/modules/services/mail/nullmailer.nix4
-rw-r--r--nixos/modules/services/mail/opendkim.nix4
-rw-r--r--nixos/modules/services/mail/opensmtpd.nix17
-rw-r--r--nixos/modules/services/mail/postfix.nix4
-rw-r--r--nixos/modules/services/mail/postgrey.nix4
-rw-r--r--nixos/modules/services/mail/postsrsd.nix4
-rw-r--r--nixos/modules/services/mail/rmilter.nix8
-rw-r--r--nixos/modules/services/mail/rspamd.nix4
-rw-r--r--nixos/modules/services/mail/spamassassin.nix4
-rw-r--r--nixos/modules/services/misc/airsonic.nix2
-rw-r--r--nixos/modules/services/misc/apache-kafka.nix2
-rw-r--r--nixos/modules/services/misc/autorandr.nix24
-rw-r--r--nixos/modules/services/misc/bepasty.nix4
-rw-r--r--nixos/modules/services/misc/calibre-server.nix4
-rw-r--r--nixos/modules/services/misc/cfdyndns.nix4
-rw-r--r--nixos/modules/services/misc/cgminer.nix2
-rw-r--r--nixos/modules/services/misc/clipmenu.nix31
-rw-r--r--nixos/modules/services/misc/couchpotato.nix4
-rw-r--r--nixos/modules/services/misc/dictd.nix4
-rw-r--r--nixos/modules/services/misc/disnix.nix12
-rw-r--r--nixos/modules/services/misc/docker-registry.nix44
-rw-r--r--nixos/modules/services/misc/dysnomia.nix3
-rw-r--r--nixos/modules/services/misc/emby.nix5
-rw-r--r--nixos/modules/services/misc/errbot.nix4
-rw-r--r--nixos/modules/services/misc/etcd.nix2
-rw-r--r--nixos/modules/services/misc/exhibitor.nix3
-rw-r--r--nixos/modules/services/misc/felix.nix4
-rw-r--r--nixos/modules/services/misc/folding-at-home.nix2
-rw-r--r--nixos/modules/services/misc/gammu-smsd.nix2
-rw-r--r--nixos/modules/services/misc/geoip-updater.nix2
-rw-r--r--nixos/modules/services/misc/gitea.nix78
-rw-r--r--nixos/modules/services/misc/gitit.nix10
-rw-r--r--nixos/modules/services/misc/gitlab.nix17
-rw-r--r--nixos/modules/services/misc/gitolite.nix4
-rw-r--r--nixos/modules/services/misc/gogs.nix4
-rw-r--r--nixos/modules/services/misc/gollum.nix4
-rw-r--r--nixos/modules/services/misc/gpsd.nix15
-rw-r--r--nixos/modules/services/misc/home-assistant.nix18
-rw-r--r--nixos/modules/services/misc/ihaskell.nix10
-rw-r--r--nixos/modules/services/misc/jackett.nix4
-rw-r--r--nixos/modules/services/misc/mathics.nix10
-rw-r--r--nixos/modules/services/misc/matrix-synapse.nix4
-rw-r--r--nixos/modules/services/misc/mediatomb.nix5
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix31
-rw-r--r--nixos/modules/services/misc/nix-ssh-serve.nix4
-rw-r--r--nixos/modules/services/misc/nixos-manual.nix2
-rw-r--r--nixos/modules/services/misc/nzbget.nix6
-rw-r--r--nixos/modules/services/misc/octoprint.nix4
-rw-r--r--nixos/modules/services/misc/osrm.nix2
-rw-r--r--nixos/modules/services/misc/plex.nix5
-rw-r--r--nixos/modules/services/misc/plexpy.nix2
-rw-r--r--nixos/modules/services/misc/pykms.nix4
-rw-r--r--nixos/modules/services/misc/radarr.nix4
-rw-r--r--nixos/modules/services/misc/redmine.nix4
-rw-r--r--nixos/modules/services/misc/ripple-data-api.nix2
-rw-r--r--nixos/modules/services/misc/rippled.nix2
-rw-r--r--nixos/modules/services/misc/serviio.nix4
-rw-r--r--nixos/modules/services/misc/siproxd.nix2
-rw-r--r--nixos/modules/services/misc/sonarr.nix4
-rw-r--r--nixos/modules/services/misc/subsonic.nix6
-rw-r--r--nixos/modules/services/misc/taskserver/default.nix10
-rw-r--r--nixos/modules/services/misc/uhub.nix4
-rw-r--r--nixos/modules/services/misc/xmr-stak.nix60
-rw-r--r--nixos/modules/services/misc/zookeeper.nix2
-rw-r--r--nixos/modules/services/monitoring/bosun.nix4
-rw-r--r--nixos/modules/services/monitoring/collectd.nix2
-rw-r--r--nixos/modules/services/monitoring/dd-agent/dd-agent.nix16
-rw-r--r--nixos/modules/services/monitoring/fusion-inventory.nix2
-rw-r--r--nixos/modules/services/monitoring/grafana.nix2
-rw-r--r--nixos/modules/services/monitoring/graphite.nix4
-rw-r--r--nixos/modules/services/monitoring/heapster.nix2
-rw-r--r--nixos/modules/services/monitoring/munin.nix4
-rw-r--r--nixos/modules/services/monitoring/nagios.nix2
-rw-r--r--nixos/modules/services/monitoring/netdata.nix4
-rw-r--r--nixos/modules/services/monitoring/prometheus/default.nix4
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix39
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/node.nix1
-rw-r--r--nixos/modules/services/monitoring/riemann-dash.nix4
-rw-r--r--nixos/modules/services/monitoring/riemann-tools.nix4
-rw-r--r--nixos/modules/services/monitoring/riemann.nix4
-rw-r--r--nixos/modules/services/monitoring/scollector.nix4
-rw-r--r--nixos/modules/services/monitoring/statsd.nix2
-rw-r--r--nixos/modules/services/monitoring/telegraf.nix2
-rw-r--r--nixos/modules/services/monitoring/ups.nix4
-rw-r--r--nixos/modules/services/monitoring/vnstat.nix2
-rw-r--r--nixos/modules/services/monitoring/zabbix-agent.nix2
-rw-r--r--nixos/modules/services/monitoring/zabbix-server.nix2
-rw-r--r--nixos/modules/services/network-filesystems/beegfs.nix2
-rw-r--r--nixos/modules/services/network-filesystems/ceph.nix4
-rw-r--r--nixos/modules/services/network-filesystems/davfs2.nix4
-rw-r--r--nixos/modules/services/network-filesystems/ipfs.nix20
-rw-r--r--nixos/modules/services/network-filesystems/openafs/client.nix26
-rw-r--r--nixos/modules/services/network-filesystems/openafs/lib.nix11
-rw-r--r--nixos/modules/services/network-filesystems/openafs/server.nix34
-rw-r--r--nixos/modules/services/network-filesystems/tahoe.nix4
-rw-r--r--nixos/modules/services/network-filesystems/xtreemfs.nix4
-rw-r--r--nixos/modules/services/network-filesystems/yandex-disk.nix2
-rw-r--r--nixos/modules/services/networking/amuled.nix4
-rw-r--r--nixos/modules/services/networking/aria2.nix4
-rw-r--r--nixos/modules/services/networking/asterisk.nix4
-rw-r--r--nixos/modules/services/networking/avahi-daemon.nix4
-rw-r--r--nixos/modules/services/networking/bind.nix12
-rw-r--r--nixos/modules/services/networking/bird.nix4
-rw-r--r--nixos/modules/services/networking/bitlbee.nix4
-rw-r--r--nixos/modules/services/networking/btsync.nix4
-rw-r--r--nixos/modules/services/networking/charybdis.nix4
-rw-r--r--nixos/modules/services/networking/chrony.nix6
-rw-r--r--nixos/modules/services/networking/cjdns.nix3
-rw-r--r--nixos/modules/services/networking/cntlm.nix2
-rw-r--r--nixos/modules/services/networking/consul.nix2
-rw-r--r--nixos/modules/services/networking/coturn.nix4
-rw-r--r--nixos/modules/services/networking/dhcpd.nix2
-rw-r--r--nixos/modules/services/networking/dnscache.nix2
-rw-r--r--nixos/modules/services/networking/dnschain.nix2
-rw-r--r--nixos/modules/services/networking/dnscrypt-proxy.nix7
-rw-r--r--nixos/modules/services/networking/dnsdist.nix61
-rw-r--r--nixos/modules/services/networking/dnsmasq.nix2
-rw-r--r--nixos/modules/services/networking/ejabberd.nix4
-rw-r--r--nixos/modules/services/networking/firewall.nix439
-rw-r--r--nixos/modules/services/networking/freenet.nix4
-rw-r--r--nixos/modules/services/networking/freeradius.nix2
-rw-r--r--nixos/modules/services/networking/gale.nix4
-rw-r--r--nixos/modules/services/networking/gateone.nix4
-rw-r--r--nixos/modules/services/networking/gdomap.nix3
-rw-r--r--nixos/modules/services/networking/git-daemon.nix4
-rw-r--r--nixos/modules/services/networking/gnunet.nix4
-rw-r--r--nixos/modules/services/networking/hans.nix2
-rw-r--r--nixos/modules/services/networking/haproxy.nix4
-rw-r--r--nixos/modules/services/networking/hostapd.nix2
-rw-r--r--nixos/modules/services/networking/i2p.nix4
-rw-r--r--nixos/modules/services/networking/i2pd.nix8
-rw-r--r--nixos/modules/services/networking/iodine.nix4
-rw-r--r--nixos/modules/services/networking/ircd-hybrid/default.nix4
-rw-r--r--nixos/modules/services/networking/iwd.nix4
-rw-r--r--nixos/modules/services/networking/kippo.nix4
-rw-r--r--nixos/modules/services/networking/kresd.nix4
-rw-r--r--nixos/modules/services/networking/lambdabot.nix4
-rw-r--r--nixos/modules/services/networking/lldpd.nix4
-rw-r--r--nixos/modules/services/networking/mailpile.nix4
-rw-r--r--nixos/modules/services/networking/matterbridge.nix4
-rw-r--r--nixos/modules/services/networking/minidlna.nix4
-rw-r--r--nixos/modules/services/networking/mjpg-streamer.nix2
-rw-r--r--nixos/modules/services/networking/monero.nix4
-rw-r--r--nixos/modules/services/networking/morty.nix96
-rw-r--r--nixos/modules/services/networking/mosquitto.nix4
-rw-r--r--nixos/modules/services/networking/murmur.nix4
-rw-r--r--nixos/modules/services/networking/namecoind.nix4
-rw-r--r--nixos/modules/services/networking/nat.nix20
-rw-r--r--nixos/modules/services/networking/networkmanager.nix186
-rw-r--r--nixos/modules/services/networking/ngircd.nix2
-rw-r--r--nixos/modules/services/networking/nix-serve.nix2
-rw-r--r--nixos/modules/services/networking/nntp-proxy.nix2
-rw-r--r--nixos/modules/services/networking/nsd.nix4
-rw-r--r--nixos/modules/services/networking/ntpd.nix2
-rw-r--r--nixos/modules/services/networking/nylon.nix4
-rw-r--r--nixos/modules/services/networking/oidentd.nix4
-rw-r--r--nixos/modules/services/networking/openntpd.nix12
-rw-r--r--nixos/modules/services/networking/openvpn.nix3
-rw-r--r--nixos/modules/services/networking/owamp.nix47
-rw-r--r--nixos/modules/services/networking/pdns-recursor.nix2
-rw-r--r--nixos/modules/services/networking/pdnsd.nix4
-rw-r--r--nixos/modules/services/networking/polipo.nix4
-rw-r--r--nixos/modules/services/networking/prayer.nix4
-rw-r--r--nixos/modules/services/networking/prosody.nix4
-rw-r--r--nixos/modules/services/networking/quagga.nix33
-rw-r--r--nixos/modules/services/networking/quassel.nix4
-rw-r--r--nixos/modules/services/networking/radicale.nix4
-rw-r--r--nixos/modules/services/networking/radvd.nix2
-rw-r--r--nixos/modules/services/networking/rdnssd.nix2
-rw-r--r--nixos/modules/services/networking/resilio.nix4
-rw-r--r--nixos/modules/services/networking/rpcbind.nix2
-rw-r--r--nixos/modules/services/networking/sabnzbd.nix4
-rw-r--r--nixos/modules/services/networking/searx.nix4
-rw-r--r--nixos/modules/services/networking/seeks.nix4
-rw-r--r--nixos/modules/services/networking/shairport-sync.nix2
-rw-r--r--nixos/modules/services/networking/shout.nix2
-rw-r--r--nixos/modules/services/networking/smokeping.nix2
-rw-r--r--nixos/modules/services/networking/sniproxy.nix4
-rw-r--r--nixos/modules/services/networking/spiped.nix4
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix62
-rw-r--r--nixos/modules/services/networking/sslh.nix114
-rw-r--r--nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix7
-rw-r--r--nixos/modules/services/networking/supplicant.nix2
-rw-r--r--nixos/modules/services/networking/supybot.nix4
-rw-r--r--nixos/modules/services/networking/syncthing.nix4
-rw-r--r--nixos/modules/services/networking/tcpcrypt.nix2
-rw-r--r--nixos/modules/services/networking/tinc.nix12
-rw-r--r--nixos/modules/services/networking/tinydns.nix2
-rw-r--r--nixos/modules/services/networking/tox-bootstrapd.nix2
-rw-r--r--nixos/modules/services/networking/toxvpn.nix2
-rw-r--r--nixos/modules/services/networking/tvheadend.nix4
-rw-r--r--nixos/modules/services/networking/unbound.nix11
-rw-r--r--nixos/modules/services/networking/unifi.nix2
-rw-r--r--nixos/modules/services/networking/vsftpd.nix4
-rw-r--r--nixos/modules/services/networking/wireguard.nix4
-rw-r--r--nixos/modules/services/networking/xrdp.nix1
-rw-r--r--nixos/modules/services/networking/zerotierone.nix2
-rw-r--r--nixos/modules/services/networking/znc.nix16
-rw-r--r--nixos/modules/services/printing/cupsd.nix19
-rw-r--r--nixos/modules/services/scheduling/atd.nix4
-rw-r--r--nixos/modules/services/scheduling/chronos.nix2
-rw-r--r--nixos/modules/services/scheduling/fcron.nix3
-rw-r--r--nixos/modules/services/scheduling/marathon.nix2
-rw-r--r--nixos/modules/services/search/hound.nix4
-rw-r--r--nixos/modules/services/search/kibana.nix2
-rw-r--r--nixos/modules/services/security/clamav.nix4
-rw-r--r--nixos/modules/services/security/fprot.nix4
-rw-r--r--nixos/modules/services/security/munge.nix16
-rw-r--r--nixos/modules/services/security/oauth2_proxy.nix3
-rw-r--r--nixos/modules/services/security/oauth2_proxy_nginx.nix64
-rw-r--r--nixos/modules/services/security/tor.nix55
-rw-r--r--nixos/modules/services/security/vault.nix4
-rw-r--r--nixos/modules/services/system/dbus.nix5
-rw-r--r--nixos/modules/services/system/kerberos.nix8
-rw-r--r--nixos/modules/services/system/nscd.nix2
-rw-r--r--nixos/modules/services/system/saslauthd.nix1
-rw-r--r--nixos/modules/services/system/uptimed.nix2
-rw-r--r--nixos/modules/services/torrent/deluge.nix4
-rw-r--r--nixos/modules/services/torrent/peerflix.nix2
-rw-r--r--nixos/modules/services/torrent/transmission.nix10
-rw-r--r--nixos/modules/services/web-apps/atlassian/confluence.nix4
-rw-r--r--nixos/modules/services/web-apps/atlassian/crowd.nix9
-rw-r--r--nixos/modules/services/web-apps/atlassian/jira.nix4
-rw-r--r--nixos/modules/services/web-apps/frab.nix5
-rw-r--r--nixos/modules/services/web-apps/matomo.nix6
-rw-r--r--nixos/modules/services/web-apps/mattermost.nix12
-rw-r--r--nixos/modules/services/web-apps/nexus.nix22
-rw-r--r--nixos/modules/services/web-apps/restya-board.nix4
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix44
-rw-r--r--nixos/modules/services/web-apps/virtlyst.nix72
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/default.nix11
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/owncloud.nix11
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/trac.nix2
-rw-r--r--nixos/modules/services/web-servers/caddy.nix4
-rw-r--r--nixos/modules/services/web-servers/hitch/default.nix4
-rw-r--r--nixos/modules/services/web-servers/hydron.nix105
-rw-r--r--nixos/modules/services/web-servers/lighttpd/cgit.nix28
-rw-r--r--nixos/modules/services/web-servers/lighttpd/default.nix4
-rw-r--r--nixos/modules/services/web-servers/lighttpd/inginious.nix261
-rw-r--r--nixos/modules/services/web-servers/meguca.nix159
-rw-r--r--nixos/modules/services/web-servers/mighttpd2.nix4
-rw-r--r--nixos/modules/services/web-servers/minio.nix6
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix4
-rw-r--r--nixos/modules/services/web-servers/tomcat.nix6
-rw-r--r--nixos/modules/services/web-servers/traefik.nix4
-rw-r--r--nixos/modules/services/web-servers/uwsgi.nix16
-rw-r--r--nixos/modules/services/web-servers/varnish/default.nix4
-rw-r--r--nixos/modules/services/web-servers/zope2.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix5
-rw-r--r--nixos/modules/services/x11/desktop-managers/xfce.nix8
-rw-r--r--nixos/modules/services/x11/display-managers/gdm.nix4
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix100
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix7
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix45
-rw-r--r--nixos/modules/services/x11/hardware/libinput.nix2
-rw-r--r--nixos/modules/services/x11/window-managers/awesome.nix7
-rw-r--r--nixos/modules/services/x11/xserver.nix7
341 files changed, 4277 insertions, 1663 deletions
diff --git a/nixos/modules/services/admin/oxidized.nix b/nixos/modules/services/admin/oxidized.nix
new file mode 100644
index 000000000000..70f7dd9e3647
--- /dev/null
+++ b/nixos/modules/services/admin/oxidized.nix
@@ -0,0 +1,116 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.oxidized;
+in
+{
+  options.services.oxidized = {
+    enable = mkEnableOption "the oxidized configuation backup service.";
+
+    user = mkOption {
+      type = types.str;
+      default = "oxidized";
+      description = ''
+        User under which the oxidized service runs.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "oxidized";
+      description = ''
+        Group under which the oxidized service runs.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/oxidized";
+      description = "State directory for the oxidized service.";
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      example = literalExample ''
+        pkgs.writeText "oxidized-config.yml" '''
+          ---
+          debug: true
+          use_syslog: true
+          input:
+            default: ssh
+            ssh:
+              secure: true
+          interval: 3600
+          model_map:
+            dell: powerconnect
+            hp: procurve
+          source:
+            default: csv
+            csv:
+              delimiter: !ruby/regexp /:/
+              file: "/var/lib/oxidized/.config/oxidized/router.db"
+              map:
+                name: 0
+                model: 1
+                username: 2
+                password: 3
+          pid: "/var/lib/oxidized/.config/oxidized/pid"
+          rest: 127.0.0.1:8888
+          retries: 3
+          # ... additional config
+        ''';
+      '';
+      description = ''
+        Path to the oxidized configuration file.
+      '';
+    };
+
+    routerDB = mkOption {
+      type = types.path;
+      example = literalExample ''
+        pkgs.writeText "oxidized-router.db" '''
+          hostname-sw1:powerconnect:username1:password2
+          hostname-sw2:procurve:username2:password2
+          # ... additional hosts
+        '''
+      '';
+      description = ''
+        Path to the file/database which contains the targets for oxidized.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.${cfg.group} = { };
+    users.users.${cfg.user} = {
+      description = "Oxidized service user";
+      group = cfg.group;
+      home = cfg.dataDir;
+      createHome = true;
+    };
+
+    systemd.services.oxidized = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        mkdir -p ${cfg.dataDir}/.config/oxidized
+        cp -v ${cfg.routerDB} ${cfg.dataDir}/.config/oxidized/router.db
+        cp -v ${cfg.configFile} ${cfg.dataDir}/.config/oxidized/config
+      '';
+
+      serviceConfig = {
+        ExecStart = "${pkgs.oxidized}/bin/oxidized";
+        User = cfg.user;
+        Group = cfg.group;
+        UMask = "0077";
+        NoNewPrivileges = true;
+        Restart  = "always";
+        WorkingDirectory = cfg.dataDir;
+        KillSignal = "SIGKILL";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/amqp/activemq/default.nix b/nixos/modules/services/amqp/activemq/default.nix
index 261f97617664..27bfd91cd2d5 100644
--- a/nixos/modules/services/amqp/activemq/default.nix
+++ b/nixos/modules/services/amqp/activemq/default.nix
@@ -93,13 +93,13 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers.activemq = {
+    users.users.activemq = {
       description = "ActiveMQ server user";
       group = "activemq";
       uid = config.ids.uids.activemq;
     };
 
-    users.extraGroups.activemq.gid = config.ids.gids.activemq;
+    users.groups.activemq.gid = config.ids.gids.activemq;
 
     systemd.services.activemq_init = {
       wantedBy = [ "activemq.service" ];
diff --git a/nixos/modules/services/amqp/rabbitmq.nix b/nixos/modules/services/amqp/rabbitmq.nix
index f536d56d7c63..bb6fc0a104df 100644
--- a/nixos/modules/services/amqp/rabbitmq.nix
+++ b/nixos/modules/services/amqp/rabbitmq.nix
@@ -83,7 +83,7 @@ in {
 
     environment.systemPackages = [ pkgs.rabbitmq_server ];
 
-    users.extraUsers.rabbitmq = {
+    users.users.rabbitmq = {
       description = "RabbitMQ server user";
       home = "${cfg.dataDir}";
       createHome = true;
@@ -91,7 +91,7 @@ in {
       uid = config.ids.uids.rabbitmq;
     };
 
-    users.extraGroups.rabbitmq.gid = config.ids.gids.rabbitmq;
+    users.groups.rabbitmq.gid = config.ids.gids.rabbitmq;
 
     systemd.services.rabbitmq = {
       description = "RabbitMQ Server";
diff --git a/nixos/modules/services/audio/liquidsoap.nix b/nixos/modules/services/audio/liquidsoap.nix
index 1c19ed36bdc7..66f84ef20762 100644
--- a/nixos/modules/services/audio/liquidsoap.nix
+++ b/nixos/modules/services/audio/liquidsoap.nix
@@ -57,7 +57,7 @@ in
 
   config = mkIf (builtins.length streams != 0) {
 
-    users.extraUsers.liquidsoap = {
+    users.users.liquidsoap = {
       uid = config.ids.uids.liquidsoap;
       group = "liquidsoap";
       extraGroups = [ "audio" ];
@@ -66,7 +66,7 @@ in
       createHome = true;
     };
 
-    users.extraGroups.liquidsoap.gid = config.ids.gids.liquidsoap;
+    users.groups.liquidsoap.gid = config.ids.gids.liquidsoap;
 
     systemd.services = builtins.listToAttrs ( map streamService streams );
   };
diff --git a/nixos/modules/services/audio/mopidy.nix b/nixos/modules/services/audio/mopidy.nix
index 52613d450b51..e2f4ec39f94c 100644
--- a/nixos/modules/services/audio/mopidy.nix
+++ b/nixos/modules/services/audio/mopidy.nix
@@ -93,7 +93,7 @@ in {
       };
     };
 
-    users.extraUsers.mopidy = {
+    users.users.mopidy = {
       inherit uid;
       group = "mopidy";
       extraGroups = [ "audio" ];
@@ -101,7 +101,7 @@ in {
       home = "${cfg.dataDir}";
     };
 
-    users.extraGroups.mopidy.gid = gid;
+    users.groups.mopidy.gid = gid;
 
   };
 
diff --git a/nixos/modules/services/audio/mpd.nix b/nixos/modules/services/audio/mpd.nix
index 5f379b392ea8..3add6556d0df 100644
--- a/nixos/modules/services/audio/mpd.nix
+++ b/nixos/modules/services/audio/mpd.nix
@@ -13,7 +13,9 @@ let
   mpdConf = pkgs.writeText "mpd.conf" ''
     music_directory     "${cfg.musicDirectory}"
     playlist_directory  "${cfg.playlistDirectory}"
-    db_file             "${cfg.dbFile}"
+    ${lib.optionalString (cfg.dbFile != null) ''
+      db_file             "${cfg.dbFile}"
+    ''}
     state_file          "${cfg.dataDir}/state"
     sticker_file        "${cfg.dataDir}/sticker.sql"
     log_file            "syslog"
@@ -126,11 +128,12 @@ in {
       };
 
       dbFile = mkOption {
-        type = types.str;
+        type = types.nullOr types.str;
         default = "${cfg.dataDir}/tag_cache";
         defaultText = ''''${dataDir}/tag_cache'';
         description = ''
-          The path to MPD's database.
+          The path to MPD's database. If set to <literal>null</literal> the
+          parameter is omitted from the configuration.
         '';
       };
     };
@@ -181,7 +184,7 @@ in {
       };
     };
 
-    users.extraUsers = optionalAttrs (cfg.user == name) (singleton {
+    users.users = optionalAttrs (cfg.user == name) (singleton {
       inherit uid;
       inherit name;
       group = cfg.group;
@@ -190,7 +193,7 @@ in {
       home = "${cfg.dataDir}";
     });
 
-    users.extraGroups = optionalAttrs (cfg.group == name) (singleton {
+    users.groups = optionalAttrs (cfg.group == name) (singleton {
       inherit name;
       gid = gid;
     });
diff --git a/nixos/modules/services/audio/slimserver.nix b/nixos/modules/services/audio/slimserver.nix
index 7d661dd60408..640403d2c97d 100644
--- a/nixos/modules/services/audio/slimserver.nix
+++ b/nixos/modules/services/audio/slimserver.nix
@@ -51,7 +51,8 @@ in {
       serviceConfig = {
         User = "slimserver";
         PermissionsStartOnly = true;
-        ExecStart = "${cfg.package}/slimserver.pl --logdir ${cfg.dataDir}/logs --prefsdir ${cfg.dataDir}/prefs --cachedir ${cfg.dataDir}/cache";
+        # Issue 40589: Disable broken image/video support (audio still works!)
+        ExecStart = "${cfg.package}/slimserver.pl --logdir ${cfg.dataDir}/logs --prefsdir ${cfg.dataDir}/prefs --cachedir ${cfg.dataDir}/cache --noimage --novideo";
       };
     };
 
diff --git a/nixos/modules/services/audio/squeezelite.nix b/nixos/modules/services/audio/squeezelite.nix
index f1a60be992d8..57ae38559939 100644
--- a/nixos/modules/services/audio/squeezelite.nix
+++ b/nixos/modules/services/audio/squeezelite.nix
@@ -54,7 +54,7 @@ in {
       };
     };
 
-    users.extraUsers.squeezelite= {
+    users.users.squeezelite= {
       inherit uid;
       group = "nogroup";
       extraGroups = [ "audio" ];
diff --git a/nixos/modules/services/backup/bacula.nix b/nixos/modules/services/backup/bacula.nix
index 340b0cf07234..be02ba567956 100644
--- a/nixos/modules/services/backup/bacula.nix
+++ b/nixos/modules/services/backup/bacula.nix
@@ -97,17 +97,6 @@ let
     ${dir_cfg.extraConfig}
     '';
 
-  # TODO: by default use this config
-  bconsole_conf = pkgs.writeText "bconsole.conf"
-    ''
-    Director {
-      Name = ${dir_cfg.name};
-      Address = "localhost";
-      DirPort = ${toString dir_cfg.port};
-      Password = "${dir_cfg.password}";
-    }
-    '';
-
   directorOptions = {name, config, ...}:
   {
     options = {
@@ -397,7 +386,7 @@ in {
 
     environment.systemPackages = [ pkgs.bacula ];
 
-    users.extraUsers.bacula = {
+    users.users.bacula = {
       group = "bacula";
       uid = config.ids.uids.bacula;
       home = "${libDir}";
@@ -406,6 +395,6 @@ in {
       shell = "${pkgs.bash}/bin/bash";
     };
 
-    users.extraGroups.bacula.gid = config.ids.gids.bacula;
+    users.groups.bacula.gid = config.ids.gids.bacula;
   };
 }
diff --git a/nixos/modules/services/backup/borgbackup.nix b/nixos/modules/services/backup/borgbackup.nix
index 1b730e0c2b76..0c3fc9af6f88 100644
--- a/nixos/modules/services/backup/borgbackup.nix
+++ b/nixos/modules/services/backup/borgbackup.nix
@@ -35,25 +35,26 @@ let
     ${cfg.preHook}
   '' + optionalString cfg.doInit ''
     # Run borg init if the repo doesn't exist yet
-    if ! borg list > /dev/null; then
-      borg init \
+    if ! borg list $extraArgs > /dev/null; then
+      borg init $extraArgs \
         --encryption ${cfg.encryption.mode} \
         $extraInitArgs
       ${cfg.postInit}
     fi
   '' + ''
-    borg create \
+    borg create $extraArgs \
       --compression ${cfg.compression} \
       --exclude-from ${mkExcludeFile cfg} \
       $extraCreateArgs \
       "::$archiveName$archiveSuffix" \
       ${escapeShellArgs cfg.paths}
   '' + optionalString cfg.appendFailedSuffix ''
-    borg rename "::$archiveName$archiveSuffix" "$archiveName"
+    borg rename $extraArgs \
+      "::$archiveName$archiveSuffix" "$archiveName"
   '' + ''
     ${cfg.postCreate}
   '' + optionalString (cfg.prune.keep != { }) ''
-    borg prune \
+    borg prune $extraArgs \
       ${mkKeepArgs cfg} \
       --prefix ${escapeShellArg cfg.prune.prefix} \
       $extraPruneArgs
@@ -85,13 +86,14 @@ let
         ProtectSystem = "strict";
         ReadWritePaths =
           [ "${userHome}/.config/borg" "${userHome}/.cache/borg" ]
+          ++ cfg.readWritePaths
           # Borg needs write access to repo if it is not remote
           ++ optional (isLocalPath cfg.repo) cfg.repo;
-        PrivateTmp = true;
+        PrivateTmp = cfg.privateTmp;
       };
       environment = {
         BORG_REPO = cfg.repo;
-        inherit (cfg) extraInitArgs extraCreateArgs extraPruneArgs;
+        inherit (cfg) extraArgs extraInitArgs extraCreateArgs extraPruneArgs;
       } // (mkPassEnv cfg) // cfg.environment;
       inherit (cfg) startAt;
     };
@@ -318,6 +320,30 @@ in {
             ];
           };
 
+          readWritePaths = mkOption {
+            type = with types; listOf path;
+            description = ''
+              By default, borg cannot write anywhere on the system but
+              <literal>$HOME/.config/borg</literal> and <literal>$HOME/.cache/borg</literal>.
+              If, for example, your preHook script needs to dump files
+              somewhere, put those directories here.
+            '';
+            default = [ ];
+            example = [
+              "/var/backup/mysqldump"
+            ];
+          };
+
+          privateTmp = mkOption {
+            type = types.bool;
+            description = ''
+              Set the <literal>PrivateTmp</literal> option for
+              the systemd-service. Set to false if you need sockets
+              or other files from global /tmp.
+            '';
+            default = true;
+          };
+
           doInit = mkOption {
             type = types.bool;
             description = ''
@@ -430,6 +456,16 @@ in {
             default = "";
           };
 
+          extraArgs = mkOption {
+            type = types.str;
+            description = ''
+              Additional arguments for all <command>borg</command> calls the
+              service has. Handle with care.
+            '';
+            default = "";
+            example = "--remote-path=/path/to/borg";
+          };
+
           extraInitArgs = mkOption {
             type = types.str;
             description = ''
diff --git a/nixos/modules/services/backup/crashplan-small-business.nix b/nixos/modules/services/backup/crashplan-small-business.nix
index 9497d8c18bb7..790dafefe66f 100644
--- a/nixos/modules/services/backup/crashplan-small-business.nix
+++ b/nixos/modules/services/backup/crashplan-small-business.nix
@@ -3,7 +3,6 @@
 let
   cfg = config.services.crashplansb;
   crashplansb = pkgs.crashplansb.override { maxRam = cfg.maxRam; };
-  varDir = "/var/lib/crashplan";
 in
 
 with lib;
diff --git a/nixos/modules/services/backup/crashplan.nix b/nixos/modules/services/backup/crashplan.nix
index d0af2e416b63..c540cc6e2aee 100644
--- a/nixos/modules/services/backup/crashplan.nix
+++ b/nixos/modules/services/backup/crashplan.nix
@@ -3,7 +3,6 @@
 let
   cfg = config.services.crashplan;
   crashplan = pkgs.crashplan;
-  varDir = "/var/lib/crashplan";
 in
 
 with lib;
diff --git a/nixos/modules/services/backup/duplicati.nix b/nixos/modules/services/backup/duplicati.nix
index 9772ca4d20a7..80287f30b813 100644
--- a/nixos/modules/services/backup/duplicati.nix
+++ b/nixos/modules/services/backup/duplicati.nix
@@ -9,6 +9,23 @@ in
   options = {
     services.duplicati = {
       enable = mkEnableOption "Duplicati";
+
+      port = mkOption {
+        default = 8200;
+        type = types.int;
+        description = ''
+          Port serving the web interface
+        '';
+      };
+
+      interface = mkOption {
+        default = "lo";
+        type = types.str;
+        description = ''
+          Listening interface for the web UI
+          Set it to "any" to listen on all available interfaces
+        '';
+      };
     };
   };
 
@@ -22,18 +39,18 @@ in
       serviceConfig = {
         User = "duplicati";
         Group = "duplicati";
-        ExecStart = "${pkgs.duplicati}/bin/duplicati-server --webservice-interface=any --webservice-port=8200 --server-datafolder=/var/lib/duplicati";
+        ExecStart = "${pkgs.duplicati}/bin/duplicati-server --webservice-interface=${cfg.interface} --webservice-port=${toString cfg.port} --server-datafolder=/var/lib/duplicati";
         Restart = "on-failure";
       };
     };
 
-    users.extraUsers.duplicati = {
+    users.users.duplicati = {
       uid = config.ids.uids.duplicati;
       home = "/var/lib/duplicati";
       createHome = true;
       group = "duplicati";
     };
-    users.extraGroups.duplicati.gid = config.ids.gids.duplicati;
+    users.groups.duplicati.gid = config.ids.gids.duplicati;
 
   };
 }
diff --git a/nixos/modules/services/backup/mysql-backup.nix b/nixos/modules/services/backup/mysql-backup.nix
index 3f533fa457dc..f0c273ffebf1 100644
--- a/nixos/modules/services/backup/mysql-backup.nix
+++ b/nixos/modules/services/backup/mysql-backup.nix
@@ -84,7 +84,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers = optionalAttrs (cfg.user == defaultUser) (singleton
+    users.users = optionalAttrs (cfg.user == defaultUser) (singleton
       { name = defaultUser;
         isSystemUser = true;
         createHome = false;
diff --git a/nixos/modules/services/backup/postgresql-backup.nix b/nixos/modules/services/backup/postgresql-backup.nix
index 4a5ebebc682e..2ec78ce6f2cf 100644
--- a/nixos/modules/services/backup/postgresql-backup.nix
+++ b/nixos/modules/services/backup/postgresql-backup.nix
@@ -3,18 +3,41 @@
 with lib;
 
 let
-  inherit (pkgs) gzip;
 
-  location = config.services.postgresqlBackup.location;
+  cfg = config.services.postgresqlBackup;
 
-  postgresqlBackupCron = db:
-    ''
-      ${config.services.postgresqlBackup.period} root ${config.services.postgresql.package}/bin/pg_dump ${db} | ${gzip}/bin/gzip -c > ${location}/${db}.gz
-    '';
+  postgresqlBackupService = db :
+    {
+      enable = true;
 
-in
+      description = "Backup of database ${db}";
 
-{
+      requires = [ "postgresql.service" ];
+
+      preStart = ''
+        mkdir -m 0700 -p ${cfg.location}
+        chown postgres ${cfg.location}
+      '';
+
+      script = ''
+        if [ -e ${cfg.location}/${db}.sql.gz ]; then
+          ${pkgs.coreutils}/bin/mv ${cfg.location}/${db}.sql.gz ${cfg.location}/${db}.prev.sql.gz
+        fi
+
+        ${config.services.postgresql.package}/bin/pg_dump ${cfg.pgdumpOptions} ${db} | \
+          ${pkgs.gzip}/bin/gzip -c > ${cfg.location}/${db}.sql.gz
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        PermissionsStartOnly = "true";
+        User = "postgres";
+      };
+
+      startAt = cfg.startAt;
+    };
+
+in {
 
   options = {
 
@@ -27,10 +50,10 @@ in
         '';
       };
 
-      period = mkOption {
-        default = "15 01 * * *";
+      startAt = mkOption {
+        default = "*-*-* 01:15:00";
         description = ''
-          This option defines (in the format used by cron) when the
+          This option defines (see <literal>systemd.time</literal> for format) when the
           databases should be dumped.
           The default is to update at 01:15 (at night) every day.
         '';
@@ -49,18 +72,23 @@ in
           Location to put the gzipped PostgreSQL database dumps.
         '';
       };
+
+      pgdumpOptions = mkOption {
+        type = types.string;
+        default = "-Cbo";
+        description = ''
+          Command line options for pg_dump.
+        '';
+      };
     };
 
   };
 
   config = mkIf config.services.postgresqlBackup.enable {
-    services.cron.systemCronJobs = map postgresqlBackupCron config.services.postgresqlBackup.databases;
 
-    system.activationScripts.postgresqlBackup = stringAfter [ "stdio" "users" ]
-      ''
-        mkdir -m 0700 -p ${config.services.postgresqlBackup.location}
-        chown root ${config.services.postgresqlBackup.location}
-      '';
+    systemd.services = listToAttrs (map (db : {
+          name = "postgresqlBackup-${db}";
+          value = postgresqlBackupService db; } ) cfg.databases);
   };
 
 }
diff --git a/nixos/modules/services/backup/restic-rest-server.nix b/nixos/modules/services/backup/restic-rest-server.nix
index d4b47a099410..d1b775f150dc 100644
--- a/nixos/modules/services/backup/restic-rest-server.nix
+++ b/nixos/modules/services/backup/restic-rest-server.nix
@@ -95,13 +95,13 @@ in
       };
     };
 
-    users.extraUsers.restic = {
+    users.users.restic = {
       group = "restic";
       home = cfg.dataDir;
       createHome = true;
       uid = config.ids.uids.restic;
     };
 
-    users.extraGroups.restic.gid = config.ids.uids.restic;
+    users.groups.restic.gid = config.ids.uids.restic;
   };
 }
diff --git a/nixos/modules/services/backup/restic.nix b/nixos/modules/services/backup/restic.nix
index 21d82469c605..409c05221d03 100644
--- a/nixos/modules/services/backup/restic.nix
+++ b/nixos/modules/services/backup/restic.nix
@@ -14,7 +14,15 @@ with lib;
             Read the repository password from a file.
           '';
           example = "/etc/nixos/restic-password";
+        };
 
+        s3CredentialsFile = mkOption {
+          type = with types; nullOr str;
+          description = ''
+            file containing the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
+            for an S3-hosted repository, in the format of an EnvironmentFile
+            as described by systemd.exec(5)
+          '';
         };
 
         repository = mkOption {
@@ -119,7 +127,6 @@ with lib;
       mapAttrs' (name: backup:
         let
           extraOptions = concatMapStrings (arg: " -o ${arg}") backup.extraOptions;
-          connectTo = elemAt (splitString ":" backup.repository) 1;
           resticCmd = "${pkgs.restic}/bin/restic${extraOptions}";
         in nameValuePair "restic-backups-${name}" ({
           environment = {
@@ -134,6 +141,8 @@ with lib;
             Type = "oneshot";
             ExecStart = "${resticCmd} backup ${concatStringsSep " " backup.extraBackupArgs} ${concatStringsSep " " backup.paths}";
             User = backup.user;
+          } // optionalAttrs (backup.s3CredentialsFile != null) {
+            EnvironmentFile = backup.s3CredentialsFile;
           };
         } // optionalAttrs backup.initialize {
           preStart = ''
diff --git a/nixos/modules/services/backup/znapzend.nix b/nixos/modules/services/backup/znapzend.nix
index 3d133f82d204..fc8a424190f7 100644
--- a/nixos/modules/services/backup/znapzend.nix
+++ b/nixos/modules/services/backup/znapzend.nix
@@ -5,13 +5,6 @@ with types;
 
 let
 
-  # Converts a plan like
-  #   { "1d" = "1h"; "1w" = "1d"; }
-  # into
-  #   "1d=>1h,1w=>1d"
-  attrToPlan = attrs: concatStringsSep "," (builtins.attrValues (
-    mapAttrs (n: v: "${n}=>${v}") attrs));
-
   planDescription = ''
       The znapzend backup plan to use for the source.
     </para>
diff --git a/nixos/modules/services/cluster/hadoop/conf.nix b/nixos/modules/services/cluster/hadoop/conf.nix
new file mode 100644
index 000000000000..38db10406b9a
--- /dev/null
+++ b/nixos/modules/services/cluster/hadoop/conf.nix
@@ -0,0 +1,31 @@
+{ hadoop, pkgs }:
+let
+  propertyXml = name: value: ''
+    <property>
+      <name>${name}</name>
+      <value>${builtins.toString value}</value>
+    </property>
+  '';
+  siteXml = fileName: properties: pkgs.writeTextDir fileName ''
+    <?xml version="1.0" encoding="UTF-8" standalone="no"?>
+    <!-- generated by NixOS -->
+    <configuration>
+      ${builtins.concatStringsSep "\n" (pkgs.lib.mapAttrsToList propertyXml properties)}
+    </configuration>
+  '';
+  userFunctions = ''
+    hadoop_verify_logdir() {
+      echo Skipping verification of log directory
+    }
+  '';
+in
+pkgs.buildEnv {
+  name = "hadoop-conf";
+  paths = [
+    (siteXml "core-site.xml" hadoop.coreSite)
+    (siteXml "hdfs-site.xml" hadoop.hdfsSite)
+    (siteXml "mapred-site.xml" hadoop.mapredSite)
+    (siteXml "yarn-site.xml" hadoop.yarnSite)
+    (pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions)
+  ];
+}
diff --git a/nixos/modules/services/cluster/hadoop/default.nix b/nixos/modules/services/cluster/hadoop/default.nix
new file mode 100644
index 000000000000..f0f5a6ecbfc5
--- /dev/null
+++ b/nixos/modules/services/cluster/hadoop/default.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ...}:
+
+with lib;
+{
+  imports = [ ./yarn.nix ./hdfs.nix ];
+
+  options.services.hadoop = {
+    coreSite = mkOption {
+      default = {};
+      example = {
+        "fs.defaultFS" = "hdfs://localhost";
+      };
+      description = "Hadoop core-site.xml definition";
+    };
+
+    hdfsSite = mkOption {
+      default = {};
+      example = {
+        "dfs.nameservices" = "namenode1";
+      };
+      description = "Hadoop hdfs-site.xml definition";
+    };
+
+    mapredSite = mkOption {
+      default = {};
+      example = {
+        "mapreduce.map.cpu.vcores" = "1";
+      };
+      description = "Hadoop mapred-site.xml definition";
+    };
+
+    yarnSite = mkOption {
+      default = {};
+      example = {
+        "yarn.resourcemanager.ha.id" = "resourcemanager1";
+      };
+      description = "Hadoop yarn-site.xml definition";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.hadoop;
+      defaultText = "pkgs.hadoop";
+      example = literalExample "pkgs.hadoop";
+      description = ''
+      '';
+    };
+  };
+
+
+  config = mkMerge [
+    (mkIf (builtins.hasAttr "yarn" config.users.users ||
+           builtins.hasAttr "hdfs" config.users.users) {
+      users.groups.hadoop = {
+        gid = config.ids.gids.hadoop;
+      };
+    })
+
+  ];
+}
diff --git a/nixos/modules/services/cluster/hadoop/hdfs.nix b/nixos/modules/services/cluster/hadoop/hdfs.nix
new file mode 100644
index 000000000000..a38b6a78d3a5
--- /dev/null
+++ b/nixos/modules/services/cluster/hadoop/hdfs.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, ...}:
+let
+  cfg = config.services.hadoop;
+  hadoopConf = import ./conf.nix { hadoop = cfg; pkgs = pkgs; };
+in
+with lib;
+{
+  options.services.hadoop.hdfs = {
+    namenode.enabled = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to run the Hadoop YARN NameNode
+      '';
+    };
+    datanode.enabled = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to run the Hadoop YARN DataNode
+      '';
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.hdfs.namenode.enabled {
+      systemd.services."hdfs-namenode" = {
+        description = "Hadoop HDFS NameNode";
+        wantedBy = [ "multi-user.target" ];
+
+        environment = {
+          HADOOP_HOME = "${cfg.package}";
+        };
+
+        preStart = ''
+          ${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true
+        '';
+
+        serviceConfig = {
+          User = "hdfs";
+          SyslogIdentifier = "hdfs-namenode";
+          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode";
+        };
+      };
+    })
+    (mkIf cfg.hdfs.datanode.enabled {
+      systemd.services."hdfs-datanode" = {
+        description = "Hadoop HDFS DataNode";
+        wantedBy = [ "multi-user.target" ];
+
+        environment = {
+          HADOOP_HOME = "${cfg.package}";
+        };
+
+        serviceConfig = {
+          User = "hdfs";
+          SyslogIdentifier = "hdfs-datanode";
+          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} datanode";
+        };
+      };
+    })
+    (mkIf (
+        cfg.hdfs.namenode.enabled || cfg.hdfs.datanode.enabled
+    ) {
+      users.users.hdfs = {
+        description = "Hadoop HDFS user";
+        group = "hadoop";
+        uid = config.ids.uids.hdfs;
+      };
+    })
+
+  ];
+}
diff --git a/nixos/modules/services/cluster/hadoop/yarn.nix b/nixos/modules/services/cluster/hadoop/yarn.nix
new file mode 100644
index 000000000000..5345a2732d7e
--- /dev/null
+++ b/nixos/modules/services/cluster/hadoop/yarn.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ...}:
+let
+  cfg = config.services.hadoop;
+  hadoopConf = import ./conf.nix { hadoop = cfg; pkgs = pkgs; };
+in
+with lib;
+{
+  options.services.hadoop.yarn = {
+    resourcemanager.enabled = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to run the Hadoop YARN ResourceManager
+      '';
+    };
+    nodemanager.enabled = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to run the Hadoop YARN NodeManager
+      '';
+    };
+  };
+
+  config = mkMerge [
+    (mkIf (
+        cfg.yarn.resourcemanager.enabled || cfg.yarn.nodemanager.enabled
+    ) {
+
+      users.users.yarn = {
+        description = "Hadoop YARN user";
+        group = "hadoop";
+        uid = config.ids.uids.yarn;
+      };
+    })
+
+    (mkIf cfg.yarn.resourcemanager.enabled {
+      systemd.services."yarn-resourcemanager" = {
+        description = "Hadoop YARN ResourceManager";
+        wantedBy = [ "multi-user.target" ];
+
+        environment = {
+          HADOOP_HOME = "${cfg.package}";
+        };
+
+        serviceConfig = {
+          User = "yarn";
+          SyslogIdentifier = "yarn-resourcemanager";
+          ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
+                      " resourcemanager";
+        };
+      };
+    })
+
+    (mkIf cfg.yarn.nodemanager.enabled {
+      systemd.services."yarn-nodemanager" = {
+        description = "Hadoop YARN NodeManager";
+        wantedBy = [ "multi-user.target" ];
+
+        environment = {
+          HADOOP_HOME = "${cfg.package}";
+        };
+
+        serviceConfig = {
+          User = "yarn";
+          SyslogIdentifier = "yarn-nodemanager";
+          ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
+                      " nodemanager";
+        };
+      };
+    })
+
+  ];
+}
diff --git a/nixos/modules/services/cluster/kubernetes/dashboard.nix b/nixos/modules/services/cluster/kubernetes/dashboard.nix
index e331889b9dd5..cbd6e8f7bf73 100644
--- a/nixos/modules/services/cluster/kubernetes/dashboard.nix
+++ b/nixos/modules/services/cluster/kubernetes/dashboard.nix
@@ -4,38 +4,60 @@ with lib;
 
 let
   cfg = config.services.kubernetes.addons.dashboard;
-
-  name = "gcr.io/google_containers/kubernetes-dashboard-amd64";
-	version = "v1.8.2";
-
-  image = pkgs.dockerTools.pullImage {
-    imageName = name;
-    imageTag = version;
-    sha256 = "11h0fz3wxp0f10fsyqaxjm7l2qg7xws50dv5iwlck5gb1fjmajad";
-  };
 in {
   options.services.kubernetes.addons.dashboard = {
     enable = mkEnableOption "kubernetes dashboard addon";
 
-    enableRBAC = mkOption {
-      description = "Whether to enable role based access control is enabled for kubernetes dashboard";
-      type = types.bool;
-      default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode;
+    rbac = mkOption {
+      description = "Role-based access control (RBAC) options";
+      default = {};
+      type = types.submodule {
+        options = {
+          enable = mkOption {
+            description = "Whether to enable role based access control is enabled for kubernetes dashboard";
+            type = types.bool;
+            default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode;
+          };
+
+          clusterAdmin = mkOption {
+            description = "Whether to assign cluster admin rights to the kubernetes dashboard";
+            type = types.bool;
+            default = false;
+          };
+        };
+      };
+    };
+
+    version = mkOption {
+      description = "Which version of the kubernetes dashboard to deploy";
+      type = types.str;
+      default = "v1.8.3";
+    };
+
+    image = mkOption {
+      description = "Docker image to seed for the kubernetes dashboard container.";
+      type = types.attrs;
+      default = {
+        imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
+        imageDigest = "sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0";
+        finalImageTag = cfg.version;
+        sha256 = "18ajcg0q1vignfjk2sm4xj4wzphfz8wah69ps8dklqfvv0164mc8";
+      };
     };
   };
 
   config = mkIf cfg.enable {
-    services.kubernetes.kubelet.seedDockerImages = [image];
+    services.kubernetes.kubelet.seedDockerImages = [(pkgs.dockerTools.pullImage cfg.image)];
 
     services.kubernetes.addonManager.addons = {
       kubernetes-dashboard-deployment = {
         kind = "Deployment";
-        apiVersion = "apps/v1beta1";
+        apiVersion = "apps/v1";
         metadata = {
           labels = {
             k8s-addon = "kubernetes-dashboard.addons.k8s.io";
             k8s-app = "kubernetes-dashboard";
-            version = version;
+            version = cfg.version;
             "kubernetes.io/cluster-service" = "true";
             "addonmanager.kubernetes.io/mode" = "Reconcile";
           };
@@ -51,45 +73,66 @@ in {
               labels = {
                 k8s-addon = "kubernetes-dashboard.addons.k8s.io";
                 k8s-app = "kubernetes-dashboard";
-                version = version;
+                version = cfg.version;
                 "kubernetes.io/cluster-service" = "true";
               };
               annotations = {
                 "scheduler.alpha.kubernetes.io/critical-pod" = "";
-                #"scheduler.alpha.kubernetes.io/tolerations" = ''[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'';
               };
             };
             spec = {
+              priorityClassName = "system-cluster-critical";
               containers = [{
                 name = "kubernetes-dashboard";
-                image = "${name}:${version}";
+                image = with cfg.image; "${imageName}:${finalImageTag}";
                 ports = [{
-                  containerPort = 9090;
+                  containerPort = 8443;
                   protocol = "TCP";
                 }];
                 resources = {
                   limits = {
                     cpu = "100m";
-                    memory = "50Mi";
+                    memory = "300Mi";
                   };
                   requests = {
                     cpu = "100m";
-                    memory = "50Mi";
+                    memory = "100Mi";
                   };
                 };
+                args = ["--auto-generate-certificates"];
+                volumeMounts = [{
+                  name = "tmp-volume";
+                  mountPath = "/tmp";
+                } {
+                  name = "kubernetes-dashboard-certs";
+                  mountPath = "/certs";
+                }];
                 livenessProbe = {
                   httpGet = {
+                    scheme = "HTTPS";
                     path = "/";
-                    port = 9090;
+                    port = 8443;
                   };
                   initialDelaySeconds = 30;
                   timeoutSeconds = 30;
                 };
               }];
+              volumes = [{
+                name = "kubernetes-dashboard-certs";
+                secret = {
+                  secretName = "kubernetes-dashboard-certs";
+                };
+              } {
+                name = "tmp-volume";
+                emptyDir = {};
+              }];
               serviceAccountName = "kubernetes-dashboard";
               tolerations = [{
                 key = "node-role.kubernetes.io/master";
                 effect = "NoSchedule";
+              } {
+                key = "CriticalAddonsOnly";
+                operator = "Exists";
               }];
             };
           };
@@ -112,8 +155,8 @@ in {
         };
         spec = {
           ports = [{
-            port = 80;
-            targetPort = 9090;
+            port = 443;
+            targetPort = 8443;
           }];
           selector.k8s-app = "kubernetes-dashboard";
         };
@@ -126,35 +169,153 @@ in {
           labels = {
             k8s-app = "kubernetes-dashboard";
             k8s-addon = "kubernetes-dashboard.addons.k8s.io";
-						"addonmanager.kubernetes.io/mode" = "Reconcile";
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
           };
           name = "kubernetes-dashboard";
           namespace = "kube-system";
         };
       };
-    } // (optionalAttrs cfg.enableRBAC {
-      kubernetes-dashboard-crb = {
-        apiVersion = "rbac.authorization.k8s.io/v1beta1";
-        kind = "ClusterRoleBinding";
+      kubernetes-dashboard-sec-certs = {
+        apiVersion = "v1";
+        kind = "Secret";
         metadata = {
-          name = "kubernetes-dashboard";
           labels = {
             k8s-app = "kubernetes-dashboard";
-            k8s-addon = "kubernetes-dashboard.addons.k8s.io";
-            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-certs";
+          namespace = "kube-system";
+        };
+        type = "Opaque";
+      };
+      kubernetes-dashboard-sec-kholder = {
+        apiVersion = "v1";
+        kind = "Secret";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
           };
+          name = "kubernetes-dashboard-key-holder";
+          namespace = "kube-system";
         };
-        roleRef = {
-          apiGroup = "rbac.authorization.k8s.io";
-          kind = "ClusterRole";
-          name = "cluster-admin";
+        type = "Opaque";
+      };
+      kubernetes-dashboard-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-settings";
+          namespace = "kube-system";
         };
+      };
+    } // (optionalAttrs cfg.rbac.enable
+      (let
         subjects = [{
           kind = "ServiceAccount";
           name = "kubernetes-dashboard";
           namespace = "kube-system";
         }];
-      };
-    });
+        labels = {
+          k8s-app = "kubernetes-dashboard";
+          k8s-addon = "kubernetes-dashboard.addons.k8s.io";
+          "addonmanager.kubernetes.io/mode" = "Reconcile";
+        };
+      in
+        (if cfg.rbac.clusterAdmin then {
+          kubernetes-dashboard-crb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "ClusterRoleBinding";
+            metadata = {
+              name = "kubernetes-dashboard";
+              inherit labels;
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "ClusterRole";
+              name = "cluster-admin";
+            };
+            inherit subjects;
+          };
+        }
+        else
+        {
+          # Upstream role- and rolebinding as per:
+          # https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/alternative/kubernetes-dashboard.yaml
+          kubernetes-dashboard-role = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "Role";
+            metadata = {
+              name = "kubernetes-dashboard-minimal";
+              namespace = "kube-system";
+              inherit labels;
+            };
+            rules = [
+              # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
+              {
+                apiGroups = [""];
+                resources = ["secrets"];
+                verbs = ["create"];
+              }
+              # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
+              {
+                apiGroups = [""];
+                resources = ["configmaps"];
+                verbs = ["create"];
+              }
+              # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
+              {
+                apiGroups = [""];
+                resources = ["secrets"];
+                resourceNames = ["kubernetes-dashboard-key-holder"];
+                verbs = ["get" "update" "delete"];
+              }
+              # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
+              {
+                apiGroups = [""];
+                resources = ["configmaps"];
+                resourceNames = ["kubernetes-dashboard-settings"];
+                verbs = ["get" "update"];
+              }
+              # Allow Dashboard to get metrics from heapster.
+              {
+                apiGroups = [""];
+                resources = ["services"];
+                resourceNames = ["heapster"];
+                verbs = ["proxy"];
+              }
+              {
+                apiGroups = [""];
+                resources = ["services/proxy"];
+                resourceNames = ["heapster" "http:heapster:" "https:heapster:"];
+                verbs = ["get"];
+              }
+            ];
+          };
+
+          kubernetes-dashboard-rb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "RoleBinding";
+            metadata = {
+              name = "kubernetes-dashboard-minimal";
+              namespace = "kube-system";
+              inherit labels;
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "Role";
+              name = "kubernetes-dashboard-minimal";
+            };
+            inherit subjects;
+          };
+        })
+    ));
   };
 }
diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix
index aeb0a0d2432d..f56a529afdf6 100644
--- a/nixos/modules/services/cluster/kubernetes/default.nix
+++ b/nixos/modules/services/cluster/kubernetes/default.nix
@@ -5,8 +5,36 @@ with lib;
 let
   cfg = config.services.kubernetes;
 
-  skipAttrs = attrs: map (filterAttrs (k: v: k != "enable"))
-    (filter (v: !(hasAttr "enable" v) || v.enable) attrs);
+  # YAML config; see:
+  #   https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/
+  #   https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go
+  #
+  # TODO: migrate the following flags to this config file
+  #
+  #   --pod-manifest-path
+  #   --address
+  #   --port
+  #   --tls-cert-file
+  #   --tls-private-key-file
+  #   --client-ca-file
+  #   --authentication-token-webhook
+  #   --authentication-token-webhook-cache-ttl
+  #   --authorization-mode
+  #   --healthz-bind-address
+  #   --healthz-port
+  #   --allow-privileged
+  #   --cluster-dns
+  #   --cluster-domain
+  #   --hairpin-mode
+  #   --feature-gates
+  kubeletConfig = pkgs.runCommand "kubelet-config.yaml" { } ''
+    echo > $out ${pkgs.lib.escapeShellArg (builtins.toJSON {
+      kind = "KubeletConfiguration";
+      apiVersion = "kubelet.config.k8s.io/v1beta1";
+      ${if cfg.kubelet.applyManifests then "staticPodPath" else null} =
+        manifests;
+    })}
+  '';
 
   infraContainer = pkgs.dockerTools.buildImage {
     name = "pause";
@@ -42,12 +70,14 @@ let
   mkKubeConfigOptions = prefix: {
     server = mkOption {
       description = "${prefix} kube-apiserver server address.";
-      default = "http://${cfg.apiserver.address}:${toString cfg.apiserver.port}";
+      default = "http://${if cfg.apiserver.advertiseAddress != null
+                          then cfg.apiserver.advertiseAddress
+                          else "127.0.0.1"}:${toString cfg.apiserver.port}";
       type = types.str;
     };
 
     caFile = mkOption {
-      description = "${prefix} certificate authrority file used to connect to kube-apiserver.";
+      description = "${prefix} certificate authority file used to connect to kube-apiserver.";
       type = types.nullOr types.path;
       default = cfg.caFile;
     };
@@ -72,12 +102,18 @@ let
     keyFile = mkDefault cfg.kubeconfig.keyFile;
   };
 
-  cniConfig = pkgs.buildEnv {
-    name = "kubernetes-cni-config";
-    paths = imap (i: entry:
-      pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
-    ) cfg.kubelet.cni.config;
-  };
+  cniConfig =
+    if cfg.kubelet.cni.config != [] && !(isNull cfg.kubelet.cni.configDir) then
+      throw "Verbatim CNI-config and CNI configDir cannot both be set."
+    else if !(isNull cfg.kubelet.cni.configDir) then
+      cfg.kubelet.cni.configDir
+    else
+      (pkgs.buildEnv {
+        name = "kubernetes-cni-config";
+        paths = imap (i: entry:
+          pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
+        ) cfg.kubelet.cni.config;
+      });
 
   manifests = pkgs.buildEnv {
     name = "kubernetes-manifests";
@@ -213,18 +249,13 @@ in {
         type = types.listOf types.str;
       };
 
-      address = mkOption {
-        description = "Kubernetes apiserver listening address.";
-        default = "127.0.0.1";
-        type = types.str;
-      };
-
-      publicAddress = mkOption {
+      bindAddress = mkOption {
         description = ''
-          Kubernetes apiserver public listening address used for read only and
-          secure port.
+          The IP address on which to listen for the --secure-port port.
+          The associated interface(s) must be reachable by the rest
+          of the cluster, and by CLI/web clients.
         '';
-        default = cfg.apiserver.address;
+        default = "0.0.0.0";
         type = types.str;
       };
 
@@ -279,7 +310,7 @@ in {
       tokenAuthFile = mkOption {
         description = ''
           Kubernetes apiserver token authentication file. See
-          <link xlink:href="https://kubernetes.io/docs/admin/authentication.html"/>
+          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
         '';
         default = null;
         type = types.nullOr types.path;
@@ -288,7 +319,7 @@ in {
       basicAuthFile = mkOption {
         description = ''
           Kubernetes apiserver basic authentication file. See
-          <link xlink:href="https://kubernetes.io/docs/admin/authentication.html"/>
+          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
         '';
         default = pkgs.writeText "users" ''
           kubernetes,admin,0
@@ -298,22 +329,31 @@ in {
 
       authorizationMode = mkOption {
         description = ''
-          Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/RBAC). See
-          <link xlink:href="https://kubernetes.io/docs/admin/authorization.html"/>
+          Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
+          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
         '';
         default = ["RBAC" "Node"];
-        type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "RBAC" "Node"]);
+        type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
       };
 
       authorizationPolicy = mkOption {
         description = ''
           Kubernetes apiserver authorization policy file. See
-          <link xlink:href="https://kubernetes.io/docs/admin/authorization.html"/>
+          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
         '';
         default = [];
         type = types.listOf types.attrs;
       };
 
+      webhookConfig = mkOption {
+        description = ''
+          Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
+          See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
+        '';
+        default = null;
+        type = types.nullOr types.path;
+      };
+
       allowPrivileged = mkOption {
         description = "Whether to allow privileged containers on Kubernetes.";
         default = true;
@@ -332,16 +372,16 @@ in {
       runtimeConfig = mkOption {
         description = ''
           Api runtime configuration. See
-          <link xlink:href="https://kubernetes.io/docs/admin/cluster-management.html"/>
+          <link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
         '';
         default = "authentication.k8s.io/v1beta1=true";
         example = "api/all=false,api/v1=true";
         type = types.str;
       };
 
-      admissionControl = mkOption {
+      enableAdmissionPlugins = mkOption {
         description = ''
-          Kubernetes admission control plugins to use. See
+          Kubernetes admission control plugins to enable. See
           <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
         '';
         default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds" "NodeRestriction"];
@@ -353,6 +393,15 @@ in {
         type = types.listOf types.str;
       };
 
+      disableAdmissionPlugins = mkOption {
+        description = ''
+          Kubernetes admission control plugins to disable. See
+          <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+        '';
+        default = [];
+        type = types.listOf types.str;
+      };
+
       serviceAccountKeyFile = mkOption {
         description = ''
           Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
@@ -573,6 +622,7 @@ in {
         type = types.bool;
       };
 
+      # TODO: remove this deprecated flag
       cadvisorPort = mkOption {
         description = "Kubernetes kubelet local cadvisor port.";
         default = 4194;
@@ -629,6 +679,12 @@ in {
             }]
           '';
         };
+
+        configDir = mkOption {
+          description = "Path to Kubernetes CNI configuration directory.";
+          type = types.nullOr types.path;
+          default = null;
+        };
       };
 
       manifests = mkOption {
@@ -783,12 +839,10 @@ in {
         serviceConfig = {
           Slice = "kubernetes.slice";
           ExecStart = ''${cfg.package}/bin/kubelet \
-            ${optionalString cfg.kubelet.applyManifests
-              "--pod-manifest-path=${manifests}"} \
             ${optionalString (taints != "")
               "--register-with-taints=${taints}"} \
             --kubeconfig=${mkKubeConfig "kubelet" cfg.kubelet.kubeconfig} \
-            --require-kubeconfig \
+            --config=${kubeletConfig} \
             --address=${cfg.kubelet.address} \
             --port=${toString cfg.kubelet.port} \
             --register-node=${boolToString cfg.kubelet.registerNode} \
@@ -853,7 +907,7 @@ in {
 
     (mkIf cfg.apiserver.enable {
       systemd.services.kube-apiserver = {
-        description = "Kubernetes Kubelet Service";
+        description = "Kubernetes APIServer Service";
         wantedBy = [ "kubernetes.target" ];
         after = [ "network.target" "docker.service" ];
         serviceConfig = {
@@ -867,7 +921,7 @@ in {
             ${optionalString (cfg.etcd.keyFile != null)
               "--etcd-keyfile=${cfg.etcd.keyFile}"} \
             --insecure-port=${toString cfg.apiserver.port} \
-            --bind-address=0.0.0.0 \
+            --bind-address=${cfg.apiserver.bindAddress} \
             ${optionalString (cfg.apiserver.advertiseAddress != null)
               "--advertise-address=${cfg.apiserver.advertiseAddress}"} \
             --allow-privileged=${boolToString cfg.apiserver.allowPrivileged}\
@@ -895,11 +949,15 @@ in {
                 (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.apiserver.authorizationPolicy)
               }"
             } \
+            ${optionalString (elem "Webhook" cfg.apiserver.authorizationMode)
+              "--authorization-webhook-config-file=${cfg.apiserver.webhookConfig}"
+            } \
             --secure-port=${toString cfg.apiserver.securePort} \
             --service-cluster-ip-range=${cfg.apiserver.serviceClusterIpRange} \
             ${optionalString (cfg.apiserver.runtimeConfig != "")
               "--runtime-config=${cfg.apiserver.runtimeConfig}"} \
-            --admission_control=${concatStringsSep "," cfg.apiserver.admissionControl} \
+            --enable-admission-plugins=${concatStringsSep "," cfg.apiserver.enableAdmissionPlugins} \
+            --disable-admission-plugins=${concatStringsSep "," cfg.apiserver.disableAdmissionPlugins} \
             ${optionalString (cfg.apiserver.serviceAccountKeyFile!=null)
               "--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \
             ${optionalString cfg.verbose "--v=6"} \
@@ -1055,6 +1113,7 @@ in {
         wantedBy = [ "kubernetes.target" ];
         after = [ "kube-apiserver.service" ];
         environment.ADDON_PATH = "/etc/kubernetes/addons/";
+        path = [ pkgs.gawk ];
         serviceConfig = {
           Slice = "kubernetes.slice";
           ExecStart = "${cfg.package}/bin/kube-addons";
@@ -1084,7 +1143,7 @@ in {
       ];
 
       environment.systemPackages = [ cfg.package ];
-      users.extraUsers = singleton {
+      users.users = singleton {
         name = "kubernetes";
         uid = config.ids.uids.kubernetes;
         description = "Kubernetes user";
@@ -1093,7 +1152,7 @@ in {
         home = cfg.dataDir;
         createHome = true;
       };
-      users.extraGroups.kubernetes.gid = config.ids.gids.kubernetes;
+      users.groups.kubernetes.gid = config.ids.gids.kubernetes;
 
 			# dns addon is enabled by default
       services.kubernetes.addons.dns.enable = mkDefault true;
diff --git a/nixos/modules/services/cluster/kubernetes/dns.nix b/nixos/modules/services/cluster/kubernetes/dns.nix
index 226fdadffd1a..43bbb50a48d4 100644
--- a/nixos/modules/services/cluster/kubernetes/dns.nix
+++ b/nixos/modules/services/cluster/kubernetes/dns.nix
@@ -3,26 +3,7 @@
 with lib;
 
 let
-  version = "1.14.4";
-
-  k8s-dns-kube-dns = pkgs.dockerTools.pullImage {
-    imageName = "gcr.io/google_containers/k8s-dns-kube-dns-amd64";
-    imageTag = version;
-    sha256 = "0q97xfqrigrfjl2a9cxl5in619py0zv44gch09jm8gqjkxl80imp";
-  };
-
-  k8s-dns-dnsmasq-nanny = pkgs.dockerTools.pullImage {
-    imageName = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64";
-    imageTag = version;
-    sha256 = "051w5ca4qb88mwva4hbnh9xzlsvv7k1mbk3wz50lmig2mqrqqx6c";
-  };
-
-  k8s-dns-sidecar = pkgs.dockerTools.pullImage {
-    imageName = "gcr.io/google_containers/k8s-dns-sidecar-amd64";
-    imageTag = version;
-    sha256 = "1z0d129bcm8i2cqq36x5jhnrv9hirj8c6kjrmdav8vgf7py78vsm";
-  };
-
+  version = "1.14.10";
   cfg = config.services.kubernetes.addons.dns;
 in {
   options.services.kubernetes.addons.dns = {
@@ -45,18 +26,51 @@ in {
       default = "cluster.local";
       type = types.str;
     };
+
+    kube-dns = mkOption {
+      description = "Docker image to seed for the kube-dns main container.";
+      type = types.attrs;
+      default = {
+        imageName = "k8s.gcr.io/k8s-dns-kube-dns-amd64";
+        imageDigest = "sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8";
+        finalImageTag = version;
+        sha256 = "0x583znk9smqn0fix7ld8sm5jgaxhqhx3fq97b1wkqm7iwhvl3pj";
+      };
+    };
+
+    dnsmasq-nanny = mkOption {
+      description = "Docker image to seed for the kube-dns dnsmasq container.";
+      type = types.attrs;
+      default = {
+        imageName = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64";
+        imageDigest = "sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8";
+        finalImageTag = version;
+        sha256 = "1fihml7s2mfwgac51cbqpylkwbivc8nyhgi4vb820s83zvl8a6y1";
+      };
+    };
+
+    sidecar = mkOption {
+      description = "Docker image to seed for the kube-dns sidecar container.";
+      type = types.attrs;
+      default = {
+        imageName = "k8s.gcr.io/k8s-dns-sidecar-amd64";
+        imageDigest = "sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4";
+        finalImageTag = version;
+        sha256 = "08l1bv5jgrhvjzpqpbinrkgvv52snc4fzyd8ya9v18ns2klyz7m0";
+      };
+    };
   };
 
   config = mkIf cfg.enable {
-    services.kubernetes.kubelet.seedDockerImages = [
-      k8s-dns-kube-dns
-      k8s-dns-dnsmasq-nanny
-      k8s-dns-sidecar
+    services.kubernetes.kubelet.seedDockerImages = with pkgs.dockerTools; [
+      (pullImage cfg.kube-dns)
+      (pullImage cfg.dnsmasq-nanny)
+      (pullImage cfg.sidecar)
     ];
 
     services.kubernetes.addonManager.addons = {
       kubedns-deployment = {
-        apiVersion = "apps/v1beta1";
+        apiVersion = "extensions/v1beta1";
         kind = "Deployment";
         metadata = {
           labels = {
@@ -81,9 +95,38 @@ in {
               labels.k8s-app = "kube-dns";
             };
             spec = {
+              priorityClassName = "system-cluster-critical";
               containers = [
                 {
                   name = "kubedns";
+                  image = with cfg.kube-dns; "${imageName}:${finalImageTag}";
+                  resources = {
+                    limits.memory = "170Mi";
+                    requests = {
+                      cpu = "100m";
+                      memory = "70Mi";
+                    };
+                  };
+                  livenessProbe = {
+                    failureThreshold = 5;
+                    httpGet = {
+                      path = "/healthcheck/kubedns";
+                      port = 10054;
+                      scheme = "HTTP";
+                    };
+                    initialDelaySeconds = 60;
+                    successThreshold = 1;
+                    timeoutSeconds = 5;
+                  };
+                  readinessProbe = {
+                    httpGet = {
+                      path = "/readiness";
+                      port = 8081;
+                      scheme = "HTTP";
+                    };
+                    initialDelaySeconds = 3;
+                    timeoutSeconds = 5;
+                  };
                   args = [
                     "--domain=${cfg.clusterDomain}"
                     "--dns-port=10053"
@@ -96,18 +139,6 @@ in {
                       value = "10055";
                     }
                   ];
-                  image = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:${version}";
-                  livenessProbe = {
-                    failureThreshold = 5;
-                    httpGet = {
-                      path = "/healthcheck/kubedns";
-                      port = 10054;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 60;
-                    successThreshold = 1;
-                    timeoutSeconds = 5;
-                  };
                   ports = [
                     {
                       containerPort = 10053;
@@ -125,22 +156,6 @@ in {
                       protocol = "TCP";
                     }
                   ];
-                  readinessProbe = {
-                    httpGet = {
-                      path = "/readiness";
-                      port = 8081;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 3;
-                    timeoutSeconds = 5;
-                  };
-                  resources = {
-                    limits.memory = "170Mi";
-                    requests = {
-                      cpu = "100m";
-                      memory = "70Mi";
-                    };
-                  };
                   volumeMounts = [
                     {
                       mountPath = "/kube-dns-config";
@@ -149,6 +164,19 @@ in {
                   ];
                 }
                 {
+                  name = "dnsmasq";
+                  image = with cfg.dnsmasq-nanny; "${imageName}:${finalImageTag}";
+                  livenessProbe = {
+                    httpGet = {
+                      path = "/healthcheck/dnsmasq";
+                      port = 10054;
+                      scheme = "HTTP";
+                    };
+                    initialDelaySeconds = 60;
+                    timeoutSeconds = 5;
+                    successThreshold = 1;
+                    failureThreshold = 5;
+                  };
                   args = [
                     "-v=2"
                     "-logtostderr"
@@ -162,19 +190,6 @@ in {
                     "--server=/in-addr.arpa/127.0.0.1#10053"
                     "--server=/ip6.arpa/127.0.0.1#10053"
                   ];
-                  image = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:${version}";
-                  livenessProbe = {
-                    failureThreshold = 5;
-                    httpGet = {
-                      path = "/healthcheck/dnsmasq";
-                      port = 10054;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 60;
-                    successThreshold = 1;
-                    timeoutSeconds = 5;
-                  };
-                  name = "dnsmasq";
                   ports = [
                     {
                       containerPort = 53;
@@ -202,24 +217,24 @@ in {
                 }
                 {
                   name = "sidecar";
-                  image = "gcr.io/google_containers/k8s-dns-sidecar-amd64:${version}";
-                  args = [
-                    "--v=2"
-                    "--logtostderr"
-                    "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
-                    "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
-                  ];
+                  image = with cfg.sidecar; "${imageName}:${finalImageTag}";
                   livenessProbe = {
-                    failureThreshold = 5;
                     httpGet = {
                       path = "/metrics";
                       port = 10054;
                       scheme = "HTTP";
                     };
                     initialDelaySeconds = 60;
-                    successThreshold = 1;
                     timeoutSeconds = 5;
+                    successThreshold = 1;
+                    failureThreshold = 5;
                   };
+                  args = [
+                    "--v=2"
+                    "--logtostderr"
+                    "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
+                    "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
+                  ];
                   ports = [
                     {
                       containerPort = 10054;
diff --git a/nixos/modules/services/computing/slurm/slurm.nix b/nixos/modules/services/computing/slurm/slurm.nix
index 45d34f5b76f5..1e1c5bc9f035 100644
--- a/nixos/modules/services/computing/slurm/slurm.nix
+++ b/nixos/modules/services/computing/slurm/slurm.nix
@@ -6,20 +6,36 @@ let
 
   cfg = config.services.slurm;
   # configuration file can be generated by http://slurm.schedmd.com/configurator.html
-  configFile = pkgs.writeText "slurm.conf"
+  configFile = pkgs.writeTextDir "slurm.conf"
     ''
       ${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
       ${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
       ${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
       ${optionalString (cfg.partitionName != null) ''partitionName=${cfg.partitionName}''}
       PlugStackConfig=${plugStackConfig}
+      ProctrackType=${cfg.procTrackType}
       ${cfg.extraConfig}
     '';
 
-  plugStackConfig = pkgs.writeText "plugstack.conf"
+  plugStackConfig = pkgs.writeTextDir "plugstack.conf"
     ''
       ${optionalString cfg.enableSrunX11 ''optional ${pkgs.slurm-spank-x11}/lib/x11.so''}
+      ${cfg.extraPlugstackConfig}
     '';
+
+
+  cgroupConfig = pkgs.writeTextDir "cgroup.conf"
+   ''
+     ${cfg.extraCgroupConfig}
+   '';
+
+  # slurm expects some additional config files to be
+  # in the same directory as slurm.conf
+  etcSlurm = pkgs.symlinkJoin {
+    name = "etc-slurm";
+    paths = [ configFile cgroupConfig plugStackConfig ];
+  };
+
 in
 
 {
@@ -31,13 +47,31 @@ in
     services.slurm = {
 
       server = {
-        enable = mkEnableOption "slurm control daemon";
-
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            Wether to enable the slurm control daemon.
+            Note that the standard authentication method is "munge".
+            The "munge" service needs to be provided with a password file in order for
+            slurm to work properly (see <literal>services.munge.password</literal>).
+          '';
+        };
       };
 
       client = {
-        enable = mkEnableOption "slurm rlient daemon";
+        enable = mkEnableOption "slurm client daemon";
+      };
 
+      enableStools = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Wether to provide a slurm.conf file.
+          Enable this option if you do not run a slurm daemon on this host
+          (i.e. <literal>server.enable</literal> and <literal>client.enable</literal> are <literal>false</literal>)
+          but you still want to run slurm commands from this host.
+        '';
       };
 
       package = mkOption {
@@ -88,7 +122,7 @@ in
         example = "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP";
         description = ''
           Name by which the partition may be referenced. Note that now you have
-          to write patrition's parameters after the name.
+          to write the partition's parameters after the name.
         '';
       };
 
@@ -98,8 +132,20 @@ in
         description = ''
           If enabled srun will accept the option "--x11" to allow for X11 forwarding
           from within an interactive session or a batch job. This activates the
-          slurm-spank-x11 module. Note that this requires 'services.openssh.forwardX11'
-          to be enabled on the compute nodes.
+          slurm-spank-x11 module. Note that this option also enables
+          'services.openssh.forwardX11' on the client.
+
+          This option requires slurm to be compiled without native X11 support.
+        '';
+      };
+
+      procTrackType = mkOption {
+        type = types.string;
+        default = "proctrack/linuxproc";
+        description = ''
+          Plugin to be used for process tracking on a job step basis.
+          The slurmd daemon uses this mechanism to identify all processes
+          which are children of processes it spawns for a user job step.
         '';
       };
 
@@ -111,6 +157,23 @@ in
           the end of the slurm configuration file.
         '';
       };
+
+      extraPlugstackConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Extra configuration that will be added to the end of <literal>plugstack.conf</literal>.
+        '';
+      };
+
+      extraCgroupConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Extra configuration for <literal>cgroup.conf</literal>. This file is
+          used when <literal>procTrackType=proctrack/cgroup</literal>.
+        '';
+      };
     };
 
   };
@@ -123,8 +186,6 @@ in
       wrappedSlurm = pkgs.stdenv.mkDerivation {
         name = "wrappedSlurm";
 
-        propagatedBuildInputs = [ cfg.package configFile ];
-
         builder = pkgs.writeText "builder.sh" ''
           source $stdenv/setup
           mkdir -p $out/bin
@@ -136,20 +197,25 @@ in
           #!/bin/sh
           if [ -z "$SLURM_CONF" ]
           then
-            SLURM_CONF="${configFile}" "$EXE" "\$@"
+            SLURM_CONF="${etcSlurm}/slurm.conf" "$EXE" "\$@"
           else
             "$EXE" "\$0"
           fi
           EOT
             chmod +x "$wrappername"
           done
+
+          mkdir -p $out/share
+          ln -s ${getBin cfg.package}/share/man $out/share/man
         '';
       };
 
-  in mkIf (cfg.client.enable || cfg.server.enable) {
+  in mkIf (cfg.enableStools || cfg.client.enable || cfg.server.enable) {
 
     environment.systemPackages = [ wrappedSlurm ];
 
+    services.munge.enable = mkDefault true;
+
     systemd.services.slurmd = mkIf (cfg.client.enable) {
       path = with pkgs; [ wrappedSlurm coreutils ]
         ++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
@@ -169,6 +235,8 @@ in
       '';
     };
 
+    services.openssh.forwardX11 = mkIf cfg.client.enable (mkDefault true);
+
     systemd.services.slurmctld = mkIf (cfg.server.enable) {
       path = with pkgs; [ wrappedSlurm munge coreutils ]
         ++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
diff --git a/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixos/modules/services/continuous-integration/buildbot/master.nix
index 846efc8b5b9a..8d767de37f00 100644
--- a/nixos/modules/services/continuous-integration/buildbot/master.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -191,11 +191,11 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraGroups = optional (cfg.group == "buildbot") {
+    users.groups = optional (cfg.group == "buildbot") {
       name = "buildbot";
     };
 
-    users.extraUsers = optional (cfg.user == "buildbot") {
+    users.users = optional (cfg.user == "buildbot") {
       name = "buildbot";
       description = "Buildbot User.";
       isNormalUser = true;
diff --git a/nixos/modules/services/continuous-integration/buildbot/worker.nix b/nixos/modules/services/continuous-integration/buildbot/worker.nix
index a97f571e89df..67c541570b97 100644
--- a/nixos/modules/services/continuous-integration/buildbot/worker.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/worker.nix
@@ -84,11 +84,11 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraGroups = optional (cfg.group == "bbworker") {
+    users.groups = optional (cfg.group == "bbworker") {
       name = "bbworker";
     };
 
-    users.extraUsers = optional (cfg.user == "bbworker") {
+    users.users = optional (cfg.user == "bbworker") {
       name = "bbworker";
       description = "Buildbot Worker User.";
       isNormalUser = true;
diff --git a/nixos/modules/services/continuous-integration/buildkite-agent.nix b/nixos/modules/services/continuous-integration/buildkite-agent.nix
index d647b7b9fa49..9daf391c73c4 100644
--- a/nixos/modules/services/continuous-integration/buildkite-agent.nix
+++ b/nixos/modules/services/continuous-integration/buildkite-agent.nix
@@ -185,7 +185,7 @@ in
   };
 
   config = mkIf config.services.buildkite-agent.enable {
-    users.extraUsers.buildkite-agent =
+    users.users.buildkite-agent =
       { name = "buildkite-agent";
         home = cfg.dataDir;
         createHome = true;
diff --git a/nixos/modules/services/continuous-integration/gitlab-runner.nix b/nixos/modules/services/continuous-integration/gitlab-runner.nix
index 6d5cea4f77a5..a0aff1b8b5b9 100644
--- a/nixos/modules/services/continuous-integration/gitlab-runner.nix
+++ b/nixos/modules/services/continuous-integration/gitlab-runner.nix
@@ -134,7 +134,7 @@ in
     # Make the gitlab-runner command availabe so users can query the runner
     environment.systemPackages = [ cfg.package ];
 
-    users.extraUsers.gitlab-runner = {
+    users.users.gitlab-runner = {
       group = "gitlab-runner";
       extraGroups = optional hasDocker "docker";
       uid = config.ids.uids.gitlab-runner;
@@ -142,6 +142,6 @@ in
       createHome = true;
     };
 
-    users.extraGroups.gitlab-runner.gid = config.ids.gids.gitlab-runner;
+    users.groups.gitlab-runner.gid = config.ids.gids.gitlab-runner;
   };
 }
diff --git a/nixos/modules/services/continuous-integration/gocd-agent/default.nix b/nixos/modules/services/continuous-integration/gocd-agent/default.nix
index 05adb18fbe91..8126f27c2b0c 100644
--- a/nixos/modules/services/continuous-integration/gocd-agent/default.nix
+++ b/nixos/modules/services/continuous-integration/gocd-agent/default.nix
@@ -135,12 +135,12 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraGroups = optional (cfg.group == "gocd-agent") {
+    users.groups = optional (cfg.group == "gocd-agent") {
       name = "gocd-agent";
       gid = config.ids.gids.gocd-agent;
     };
 
-    users.extraUsers = optional (cfg.user == "gocd-agent") {
+    users.users = optional (cfg.user == "gocd-agent") {
       name = "gocd-agent";
       description = "gocd-agent user";
       createHome = true;
diff --git a/nixos/modules/services/continuous-integration/gocd-server/default.nix b/nixos/modules/services/continuous-integration/gocd-server/default.nix
index 07e00f17f1e8..8f177da129e5 100644
--- a/nixos/modules/services/continuous-integration/gocd-server/default.nix
+++ b/nixos/modules/services/continuous-integration/gocd-server/default.nix
@@ -113,8 +113,8 @@ in {
 
       extraOptions = mkOption {
         default = [ ];
-        example = [ 
-          "-X debug" 
+        example = [
+          "-X debug"
           "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"
           "-verbose:gc"
           "-Xloggc:go-server-gc.log"
@@ -143,12 +143,12 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraGroups = optional (cfg.group == "gocd-server") {
+    users.groups = optional (cfg.group == "gocd-server") {
       name = "gocd-server";
       gid = config.ids.gids.gocd-server;
     };
 
-    users.extraUsers = optional (cfg.user == "gocd-server") {
+    users.users = optional (cfg.user == "gocd-server") {
       name = "gocd-server";
       description = "gocd-server user";
       createHome = true;
diff --git a/nixos/modules/services/continuous-integration/hydra/default.nix b/nixos/modules/services/continuous-integration/hydra/default.nix
index 2fa7c59a965d..c7fe4eeeab99 100644
--- a/nixos/modules/services/continuous-integration/hydra/default.nix
+++ b/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -194,11 +194,11 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraGroups.hydra = {
+    users.groups.hydra = {
       gid = config.ids.gids.hydra;
     };
 
-    users.extraUsers.hydra =
+    users.users.hydra =
       { description = "Hydra";
         group = "hydra";
         createHome = true;
@@ -207,7 +207,7 @@ in
         uid = config.ids.uids.hydra;
       };
 
-    users.extraUsers.hydra-queue-runner =
+    users.users.hydra-queue-runner =
       { description = "Hydra queue runner";
         group = "hydra";
         useDefaultShell = true;
@@ -215,7 +215,7 @@ in
         uid = config.ids.uids.hydra-queue-runner;
       };
 
-    users.extraUsers.hydra-www =
+    users.users.hydra-www =
       { description = "Hydra web server";
         group = "hydra";
         useDefaultShell = true;
diff --git a/nixos/modules/services/continuous-integration/jenkins/default.nix b/nixos/modules/services/continuous-integration/jenkins/default.nix
index c2f4e9c0c5a7..1eca45fbd570 100644
--- a/nixos/modules/services/continuous-integration/jenkins/default.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/default.nix
@@ -150,12 +150,12 @@ in {
       pkgs.dejavu_fonts
     ];
 
-    users.extraGroups = optional (cfg.group == "jenkins") {
+    users.groups = optional (cfg.group == "jenkins") {
       name = "jenkins";
       gid = config.ids.gids.jenkins;
     };
 
-    users.extraUsers = optional (cfg.user == "jenkins") {
+    users.users = optional (cfg.user == "jenkins") {
       name = "jenkins";
       description = "jenkins user";
       createHome = true;
diff --git a/nixos/modules/services/continuous-integration/jenkins/slave.nix b/nixos/modules/services/continuous-integration/jenkins/slave.nix
index a0216caf2b5c..d8f55fb826f2 100644
--- a/nixos/modules/services/continuous-integration/jenkins/slave.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/slave.nix
@@ -50,12 +50,12 @@ in {
   };
 
   config = mkIf (cfg.enable && !masterCfg.enable) {
-    users.extraGroups = optional (cfg.group == "jenkins") {
+    users.groups = optional (cfg.group == "jenkins") {
       name = "jenkins";
       gid = config.ids.gids.jenkins;
     };
 
-    users.extraUsers = optional (cfg.user == "jenkins") {
+    users.users = optional (cfg.user == "jenkins") {
       name = "jenkins";
       description = "jenkins user";
       createHome = true;
diff --git a/nixos/modules/services/databases/4store-endpoint.nix b/nixos/modules/services/databases/4store-endpoint.nix
index d528355671f6..59ed0e5f0afd 100644
--- a/nixos/modules/services/databases/4store-endpoint.nix
+++ b/nixos/modules/services/databases/4store-endpoint.nix
@@ -52,7 +52,7 @@ with lib;
         message = "Must specify 4Store database name";
       };
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = endpointUser;
         uid = config.ids.uids.fourstorehttp;
         description = "4Store SPARQL endpoint user";
diff --git a/nixos/modules/services/databases/4store.nix b/nixos/modules/services/databases/4store.nix
index abb62e1f2637..be4351c1c38f 100644
--- a/nixos/modules/services/databases/4store.nix
+++ b/nixos/modules/services/databases/4store.nix
@@ -43,7 +43,7 @@ with lib;
         message = "Must specify 4Store database name.";
       };
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = fourStoreUser;
         uid = config.ids.uids.fourstore;
         description = "4Store database user";
diff --git a/nixos/modules/services/databases/aerospike.nix b/nixos/modules/services/databases/aerospike.nix
new file mode 100644
index 000000000000..5f33164998be
--- /dev/null
+++ b/nixos/modules/services/databases/aerospike.nix
@@ -0,0 +1,155 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.aerospike;
+
+  aerospikeConf = pkgs.writeText "aerospike.conf" ''
+    # This stanza must come first.
+    service {
+      user aerospike
+      group aerospike
+      paxos-single-replica-limit 1 # Number of nodes where the replica count is automatically reduced to 1.
+      proto-fd-max 15000
+      work-directory ${cfg.workDir}
+    }
+    logging {
+      console {
+        context any info
+      }
+    }
+    mod-lua {
+      system-path ${cfg.package}/share/udf/lua
+      user-path ${cfg.workDir}/udf/lua
+    }
+    network {
+      ${cfg.networkConfig}
+    }
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.aerospike = {
+      enable = mkEnableOption "Aerospike server";
+
+      package = mkOption {
+        default = pkgs.aerospike;
+        type = types.package;
+        description = "Which Aerospike derivation to use";
+      };
+
+      workDir = mkOption {
+        type = types.str;
+        default = "/var/lib/aerospike";
+        description = "Location where Aerospike stores its files";
+      };
+
+      networkConfig = mkOption {
+        type = types.lines;
+        default = ''
+          service {
+            address any
+            port 3000
+          }
+
+          heartbeat {
+            address any
+            mode mesh
+            port 3002
+            interval 150
+            timeout 10
+          }
+
+          fabric {
+            address any
+            port 3001
+          }
+
+          info {
+            address any
+            port 3003
+          }
+        '';
+        description = "network section of configuration file";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          namespace test {
+            replication-factor 2
+            memory-size 4G
+            default-ttl 30d
+            storage-engine memory
+          }
+        '';
+        description = "Extra configuration";
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.aerospike.enable {
+
+    users.users.aerospike = {
+      name = "aerospike";
+      group = "aerospike";
+      uid = config.ids.uids.aerospike;
+      description = "Aerospike server user";
+    };
+    users.groups.aerospike.gid = config.ids.gids.aerospike;
+
+    systemd.services.aerospike = rec {
+      description = "Aerospike server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/asd --fgdaemon --config-file ${aerospikeConf}";
+        User = "aerospike";
+        Group = "aerospike";
+        LimitNOFILE = 100000;
+        PermissionsStartOnly = true;
+      };
+
+      preStart = ''
+        if [ $(echo "$(${pkgs.procps}/bin/sysctl -n kernel.shmall) < 4294967296" | ${pkgs.bc}/bin/bc) == "1"  ]; then
+          echo "kernel.shmall too low, setting to 4G pages"
+          ${pkgs.procps}/bin/sysctl -w kernel.shmall=4294967296
+        fi
+        if [ $(echo "$(${pkgs.procps}/bin/sysctl -n kernel.shmmax) < 1073741824" | ${pkgs.bc}/bin/bc) == "1"  ]; then
+          echo "kernel.shmmax too low, setting to 1GB"
+          ${pkgs.procps}/bin/sysctl -w kernel.shmmax=1073741824
+        fi
+        if [ $(echo "$(cat /proc/sys/net/core/rmem_max) < 15728640" | ${pkgs.bc}/bin/bc) == "1" ]; then
+          echo "increasing socket buffer limit (/proc/sys/net/core/rmem_max): $(cat /proc/sys/net/core/rmem_max) -> 15728640"
+          echo 15728640 > /proc/sys/net/core/rmem_max
+        fi
+        if [ $(echo "$(cat /proc/sys/net/core/wmem_max) <  5242880" | ${pkgs.bc}/bin/bc) == "1"  ]; then
+          echo "increasing socket buffer limit (/proc/sys/net/core/wmem_max): $(cat /proc/sys/net/core/wmem_max) -> 5242880"
+          echo  5242880 > /proc/sys/net/core/wmem_max
+        fi
+        install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}"
+        install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/smd"
+        install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/udf"
+        install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/udf/lua"
+      '';
+    };
+
+  };
+
+}
diff --git a/nixos/modules/services/databases/cassandra.nix b/nixos/modules/services/databases/cassandra.nix
index 1e5cd8f54130..09b3fbd8a62a 100644
--- a/nixos/modules/services/databases/cassandra.nix
+++ b/nixos/modules/services/databases/cassandra.nix
@@ -420,7 +420,7 @@ in {
       9160
     ];
 
-    users.extraUsers.cassandra =
+    users.users.cassandra =
       if config.ids.uids ? "cassandra"
       then { uid = config.ids.uids.cassandra; } // cassandraUser
       else cassandraUser ;
diff --git a/nixos/modules/services/databases/clickhouse.nix b/nixos/modules/services/databases/clickhouse.nix
index 631d7f8cba79..1b8771cec391 100644
--- a/nixos/modules/services/databases/clickhouse.nix
+++ b/nixos/modules/services/databases/clickhouse.nix
@@ -27,14 +27,14 @@ with lib;
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.clickhouse = {
+    users.users.clickhouse = {
       name = "clickhouse";
       uid = config.ids.uids.clickhouse;
       group = "clickhouse";
       description = "ClickHouse server user";
     };
 
-    users.extraGroups.clickhouse.gid = config.ids.gids.clickhouse;
+    users.groups.clickhouse.gid = config.ids.gids.clickhouse;
 
     systemd.services.clickhouse = {
       description = "ClickHouse server";
diff --git a/nixos/modules/services/databases/couchdb.nix b/nixos/modules/services/databases/couchdb.nix
index 52247bfb983e..ca89b1198205 100644
--- a/nixos/modules/services/databases/couchdb.nix
+++ b/nixos/modules/services/databases/couchdb.nix
@@ -198,13 +198,13 @@ in {
       };
     };
 
-    users.extraUsers.couchdb = {
+    users.users.couchdb = {
       description = "CouchDB Server user";
       group = "couchdb";
       uid = config.ids.uids.couchdb;
     };
 
-    users.extraGroups.couchdb.gid = config.ids.gids.couchdb;
+    users.groups.couchdb.gid = config.ids.gids.couchdb;
 
   };
 }
diff --git a/nixos/modules/services/databases/firebird.nix b/nixos/modules/services/databases/firebird.nix
index b9f66612d4eb..cc81b440450b 100644
--- a/nixos/modules/services/databases/firebird.nix
+++ b/nixos/modules/services/databases/firebird.nix
@@ -154,13 +154,13 @@ in
       # there are some additional settings which should be reviewed
     '';
 
-    users.extraUsers.firebird = {
+    users.users.firebird = {
       description = "Firebird server user";
       group = "firebird";
       uid = config.ids.uids.firebird;
     };
 
-    users.extraGroups.firebird.gid = config.ids.gids.firebird;
+    users.groups.firebird.gid = config.ids.gids.firebird;
 
   };
 }
diff --git a/nixos/modules/services/databases/foundationdb.nix b/nixos/modules/services/databases/foundationdb.nix
index 693d2fde9916..91337cf791dc 100644
--- a/nixos/modules/services/databases/foundationdb.nix
+++ b/nixos/modules/services/databases/foundationdb.nix
@@ -325,14 +325,14 @@ in
 
     environment.systemPackages = [ pkg ];
 
-    users.extraUsers = optionalAttrs (cfg.user == "foundationdb") (singleton
+    users.users = optionalAttrs (cfg.user == "foundationdb") (singleton
       { name        = "foundationdb";
         description = "FoundationDB User";
         uid         = config.ids.uids.foundationdb;
         group       = cfg.group;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "foundationdb") (singleton
+    users.groups = optionalAttrs (cfg.group == "foundationdb") (singleton
       { name = "foundationdb";
         gid  = config.ids.gids.foundationdb;
       });
diff --git a/nixos/modules/services/databases/hbase.nix b/nixos/modules/services/databases/hbase.nix
index 629d02209a9c..4772e897efe2 100644
--- a/nixos/modules/services/databases/hbase.nix
+++ b/nixos/modules/services/databases/hbase.nix
@@ -122,13 +122,13 @@ in {
       };
     };
 
-    users.extraUsers.hbase = {
+    users.users.hbase = {
       description = "HBase Server user";
       group = "hbase";
       uid = config.ids.uids.hbase;
     };
 
-    users.extraGroups.hbase.gid = config.ids.gids.hbase;
+    users.groups.hbase.gid = config.ids.gids.hbase;
 
   };
 }
diff --git a/nixos/modules/services/databases/influxdb.nix b/nixos/modules/services/databases/influxdb.nix
index 15b711f57b13..d7a028b25d8d 100644
--- a/nixos/modules/services/databases/influxdb.nix
+++ b/nixos/modules/services/databases/influxdb.nix
@@ -182,13 +182,13 @@ in
         '';
     };
 
-    users.extraUsers = optional (cfg.user == "influxdb") {
+    users.users = optional (cfg.user == "influxdb") {
       name = "influxdb";
       uid = config.ids.uids.influxdb;
       description = "Influxdb daemon user";
     };
 
-    users.extraGroups = optional (cfg.group == "influxdb") {
+    users.groups = optional (cfg.group == "influxdb") {
       name = "influxdb";
       gid = config.ids.gids.influxdb;
     };
diff --git a/nixos/modules/services/databases/memcached.nix b/nixos/modules/services/databases/memcached.nix
index 46bc6fc5c132..7af452e4dced 100644
--- a/nixos/modules/services/databases/memcached.nix
+++ b/nixos/modules/services/databases/memcached.nix
@@ -64,7 +64,7 @@ in
 
   config = mkIf config.services.memcached.enable {
 
-    users.extraUsers = optional (cfg.user == "memcached") {
+    users.users = optional (cfg.user == "memcached") {
       name = "memcached";
       description = "Memcached server user";
     };
diff --git a/nixos/modules/services/databases/mongodb.nix b/nixos/modules/services/databases/mongodb.nix
index 78dbf0d784cf..4c46d9228e5f 100644
--- a/nixos/modules/services/databases/mongodb.nix
+++ b/nixos/modules/services/databases/mongodb.nix
@@ -93,7 +93,7 @@ in
 
   config = mkIf config.services.mongodb.enable {
 
-    users.extraUsers.mongodb = mkIf (cfg.user == "mongodb")
+    users.users.mongodb = mkIf (cfg.user == "mongodb")
       { name = "mongodb";
         uid = config.ids.uids.mongodb;
         description = "MongoDB server user";
diff --git a/nixos/modules/services/databases/mysql.nix b/nixos/modules/services/databases/mysql.nix
index 66c9330c3550..3eb7879e2d9f 100644
--- a/nixos/modules/services/databases/mysql.nix
+++ b/nixos/modules/services/databases/mysql.nix
@@ -221,18 +221,20 @@ in
       mkDefault (if versionAtLeast config.system.nixos.stateVersion "17.09" then "/var/lib/mysql"
                  else "/var/mysql");
 
-    users.extraUsers.mysql = {
+    users.users.mysql = {
       description = "MySQL server user";
       group = "mysql";
       uid = config.ids.uids.mysql;
     };
 
-    users.extraGroups.mysql.gid = config.ids.gids.mysql;
+    users.groups.mysql.gid = config.ids.gids.mysql;
 
     environment.systemPackages = [mysql];
 
-    systemd.services.mysql =
-      { description = "MySQL Server";
+    systemd.services.mysql = let
+      hasNotify = (cfg.package == pkgs.mariadb);
+    in {
+        description = "MySQL Server";
 
         after = [ "network.target" ];
         wantedBy = [ "multi-user.target" ];
@@ -256,17 +258,16 @@ in
 
             mkdir -m 0755 -p ${cfg.pidDir}
             chown -R ${cfg.user} ${cfg.pidDir}
-
-            # Make the socket directory
-            mkdir -p /run/mysqld
-            chmod 0755 /run/mysqld
-            chown -R ${cfg.user} /run/mysqld
           '';
 
-        serviceConfig.ExecStart = "${mysql}/bin/mysqld --defaults-extra-file=${myCnf} ${mysqldOptions}";
+        serviceConfig = {
+          Type = if hasNotify then "notify" else "simple";
+          RuntimeDirectory = "mysqld";
+          ExecStart = "${mysql}/bin/mysqld --defaults-extra-file=${myCnf} ${mysqldOptions}";
+        };
 
-        postStart =
-          ''
+        postStart = ''
+          ${lib.optionalString (!hasNotify) ''
             # Wait until the MySQL server is available for use
             count=0
             while [ ! -e /run/mysqld/mysqld.sock ]
@@ -281,6 +282,7 @@ in
                 count=$((count++))
                 sleep 1
             done
+          ''}
 
             if [ -f /tmp/mysql_init ]
             then
diff --git a/nixos/modules/services/databases/neo4j.nix b/nixos/modules/services/databases/neo4j.nix
index 424e08a6ee34..5533182c3116 100644
--- a/nixos/modules/services/databases/neo4j.nix
+++ b/nixos/modules/services/databases/neo4j.nix
@@ -1,32 +1,87 @@
-{ config, lib, pkgs, ... }:
+{ config, options, lib, pkgs, ... }:
 
 with lib;
 
 let
   cfg = config.services.neo4j;
+  certDirOpt = options.services.neo4j.directories.certificates;
+  isDefaultPathOption = opt: isOption opt && opt.type == types.path && opt.highestPrio >= 1500;
+
+  sslPolicies = mapAttrsToList (
+    name: conf: ''
+      dbms.ssl.policy.${name}.allow_key_generation=${boolToString conf.allowKeyGeneration}
+      dbms.ssl.policy.${name}.base_directory=${conf.baseDirectory}
+      ${optionalString (conf.ciphers != null) ''
+        dbms.ssl.policy.${name}.ciphers=${concatStringsSep "," conf.ciphers}
+      ''}
+      dbms.ssl.policy.${name}.client_auth=${conf.clientAuth}
+      ${if length (splitString "/" conf.privateKey) > 1 then
+        ''dbms.ssl.policy.${name}.private_key=${conf.privateKey}''
+      else
+        ''dbms.ssl.policy.${name}.private_key=${conf.baseDirectory}/${conf.privateKey}''
+      }
+      ${if length (splitString "/" conf.privateKey) > 1 then
+        ''dbms.ssl.policy.${name}.public_certificate=${conf.publicCertificate}''
+      else
+        ''dbms.ssl.policy.${name}.public_certificate=${conf.baseDirectory}/${conf.publicCertificate}''
+      }
+      dbms.ssl.policy.${name}.revoked_dir=${conf.revokedDir}
+      dbms.ssl.policy.${name}.tls_versions=${concatStringsSep "," conf.tlsVersions}
+      dbms.ssl.policy.${name}.trust_all=${boolToString conf.trustAll}
+      dbms.ssl.policy.${name}.trusted_dir=${conf.trustedDir}
+    ''
+  ) cfg.ssl.policies;
 
   serverConfig = pkgs.writeText "neo4j.conf" ''
-    dbms.directories.data=${cfg.dataDir}/data
-    dbms.directories.certificates=${cfg.certDir}
-    dbms.directories.logs=${cfg.dataDir}/logs
-    dbms.directories.plugins=${cfg.dataDir}/plugins
-    dbms.connector.http.type=HTTP
-    dbms.connector.http.enabled=true
-    dbms.connector.http.address=${cfg.listenAddress}:${toString cfg.port}
-    ${optionalString cfg.enableBolt ''
-      dbms.connector.bolt.type=BOLT
-      dbms.connector.bolt.enabled=true
-      dbms.connector.bolt.tls_level=OPTIONAL
-      dbms.connector.bolt.address=${cfg.listenAddress}:${toString cfg.boltPort}
+    # General
+    dbms.allow_upgrade=${boolToString cfg.allowUpgrade}
+    dbms.connectors.default_listen_address=${cfg.defaultListenAddress}
+    dbms.read_only=${boolToString cfg.readOnly}
+    ${optionalString (cfg.workerCount > 0) ''
+      dbms.threads.worker_count=${toString cfg.workerCount}
     ''}
-    ${optionalString cfg.enableHttps ''
-      dbms.connector.https.type=HTTP
-      dbms.connector.https.enabled=true
-      dbms.connector.https.encryption=TLS
-      dbms.connector.https.address=${cfg.listenAddress}:${toString cfg.httpsPort}
+
+    # Directories
+    dbms.directories.certificates=${cfg.directories.certificates}
+    dbms.directories.data=${cfg.directories.data}
+    dbms.directories.logs=${cfg.directories.home}/logs
+    dbms.directories.plugins=${cfg.directories.plugins}
+    ${optionalString (cfg.constrainLoadCsv) ''
+      dbms.directories.import=${cfg.directories.imports}
     ''}
-    dbms.shell.enabled=true
-    ${cfg.extraServerConfig}
+
+    # HTTP Connector
+    ${optionalString (cfg.http.enable) ''
+      dbms.connector.http.enabled=${boolToString cfg.http.enable}
+      dbms.connector.http.listen_address=${cfg.http.listenAddress}
+    ''}
+    ${optionalString (!cfg.http.enable) ''
+      # It is not possible to disable the HTTP connector. To fully prevent
+      # clients from connecting to HTTP, block the HTTP port (7474 by default)
+      # via firewall. listen_address is set to the loopback interface to
+      # prevent remote clients from connecting.
+      dbms.connector.http.listen_address=127.0.0.1
+    ''}
+
+    # HTTPS Connector
+    dbms.connector.https.enabled=${boolToString cfg.https.enable}
+    dbms.connector.https.listen_address=${cfg.https.listenAddress}
+    https.ssl_policy=${cfg.https.sslPolicy}
+
+    # BOLT Connector
+    dbms.connector.bolt.enabled=${boolToString cfg.bolt.enable}
+    dbms.connector.bolt.listen_address=${cfg.bolt.listenAddress}
+    bolt.ssl_policy=${cfg.bolt.sslPolicy}
+    dbms.connector.bolt.tls_level=${cfg.bolt.tlsLevel}
+
+    # neo4j-shell
+    dbms.shell.enabled=${boolToString cfg.shell.enable}
+
+    # SSL Policies
+    ${concatStringsSep "\n" sslPolicies}
+
+    # Default retention policy from neo4j.conf
+    dbms.tx_log.rotation.retention_policy=1 days
 
     # Default JVM parameters from neo4j.conf
     dbms.jvm.additional=-XX:+UseG1GC
@@ -36,8 +91,14 @@ let
     dbms.jvm.additional=-XX:+TrustFinalNonStaticFields
     dbms.jvm.additional=-XX:+DisableExplicitGC
     dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048
-
+    dbms.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true
     dbms.jvm.additional=-Dunsupported.dbms.udc.source=tarball
+
+    # Usage Data Collector
+    dbms.udc.enabled=${boolToString cfg.udc.enable}
+
+    # Extra Configuration
+    ${cfg.extraServerConfig}
   '';
 
 in {
@@ -45,105 +106,547 @@ in {
   ###### interface
 
   options.services.neo4j = {
+
     enable = mkOption {
-      description = "Whether to enable neo4j.";
+      type = types.bool;
       default = false;
+      description = ''
+        Whether to enable Neo4j Community Edition.
+      '';
+    };
+
+    allowUpgrade = mkOption {
       type = types.bool;
+      default = false;
+      description = ''
+        Allow upgrade of Neo4j database files from an older version.
+      '';
+    };
+
+    constrainLoadCsv = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Sets the root directory for file URLs used with the Cypher
+        <literal>LOAD CSV</literal> clause to be that defined by
+        <option>directories.imports</option>. It restricts
+        access to only those files within that directory and its
+        subdirectories.
+        </para>
+        <para>
+        Setting this option to <literal>false</literal> introduces
+        possible security problems.
+      '';
+    };
+
+    defaultListenAddress = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = ''
+        Default network interface to listen for incoming connections. To
+        listen for connections on all interfaces, use "0.0.0.0".
+        </para>
+        <para>
+        Specifies the default IP address and address part of connector
+        specific <option>listenAddress</option> options. To bind specific
+        connectors to a specific network interfaces, specify the entire
+        <option>listenAddress</option> option for that connector.
+      '';
+    };
+
+    extraServerConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Extra configuration for Neo4j Community server. Refer to the
+        <link xlink:href="https://neo4j.com/docs/operations-manual/current/reference/configuration-settings/">complete reference</link>
+        of Neo4j configuration settings.
+      '';
     };
 
     package = mkOption {
-      description = "Neo4j package to use.";
+      type = types.package;
       default = pkgs.neo4j;
       defaultText = "pkgs.neo4j";
-      type = types.package;
+      description = ''
+        Neo4j package to use.
+      '';
     };
 
-    listenAddress = mkOption {
-      description = "Neo4j listen address.";
-      default = "127.0.0.1";
-      type = types.str;
+    readOnly = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Only allow read operations from this Neo4j instance.
+      '';
     };
 
-    port = mkOption {
-      description = "Neo4j port to listen for HTTP traffic.";
-      default = 7474;
-      type = types.int;
+    workerCount = mkOption {
+      type = types.ints.between 0 44738;
+      default = 0;
+      description = ''
+        Number of Neo4j worker threads, where the default of
+        <literal>0</literal> indicates a worker count equal to the number of
+        available processors.
+      '';
     };
 
-    enableBolt = mkOption {
-      description = "Enable bolt for Neo4j.";
-      default = true;
-      type = types.bool;
+    bolt = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Enable the BOLT connector for Neo4j. Setting this option to
+          <literal>false</literal> will stop Neo4j from listening for incoming
+          connections on the BOLT port (7687 by default).
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = ":7687";
+        description = ''
+          Neo4j listen address for BOLT traffic. The listen address is
+          expressed in the format <literal>&lt;ip-address&gt;:&lt;port-number&gt;</literal>.
+        '';
+      };
+
+      sslPolicy = mkOption {
+        type = types.str;
+        default = "legacy";
+        description = ''
+          Neo4j SSL policy for BOLT traffic.
+          </para>
+          <para>
+          The legacy policy is a special policy which is not defined in
+          the policy configuration section, but rather derives from
+          <option>directories.certificates</option> and
+          associated files (by default: <filename>neo4j.key</filename> and
+          <filename>neo4j.cert</filename>). Its use will be deprecated.
+          </para>
+          <para>
+          Note: This connector must be configured to support/require
+          SSL/TLS for the legacy policy to actually be utilized. See
+          <option>bolt.tlsLevel</option>.
+        '';
+      };
+
+      tlsLevel = mkOption {
+        type = types.enum [ "REQUIRED" "OPTIONAL" "DISABLED" ];
+        default = "OPTIONAL";
+        description = ''
+          SSL/TSL requirement level for BOLT traffic.
+        '';
+      };
     };
 
-    boltPort = mkOption {
-      description = "Neo4j port to listen for BOLT traffic.";
-      default = 7687;
-      type = types.int;
+    directories = {
+      certificates = mkOption {
+        type = types.path;
+        default = "${cfg.directories.home}/certificates";
+        description = ''
+          Directory for storing certificates to be used by Neo4j for
+          TLS connections.
+          </para>
+          <para>
+          When setting this directory to something other than its default,
+          ensure the directory's existence, and that read/write permissions are
+          given to the Neo4j daemon user <literal>neo4j</literal>.
+          </para>
+          <para>
+          Note that changing this directory from its default will prevent
+          the directory structure required for each SSL policy from being
+          automatically generated. A policy's directory structure as defined by
+          its <option>baseDirectory</option>,<option>revokedDir</option> and
+          <option>trustedDir</option> must then be setup manually. The
+          existence of these directories is mandatory, as well as the presence
+          of the certificate file and the private key. Ensure the correct
+          permissions are set on these directories and files.
+        '';
+      };
+
+      data = mkOption {
+        type = types.path;
+        default = "${cfg.directories.home}/data";
+        description = ''
+          Path of the data directory. You must not configure more than one
+          Neo4j installation to use the same data directory.
+          </para>
+          <para>
+          When setting this directory to something other than its default,
+          ensure the directory's existence, and that read/write permissions are
+          given to the Neo4j daemon user <literal>neo4j</literal>.
+        '';
+      };
+
+      home = mkOption {
+        type = types.path;
+        default = "/var/lib/neo4j";
+        description = ''
+          Path of the Neo4j home directory. Other default directories are
+          subdirectories of this path. This directory will be created if
+          non-existent, and its ownership will be <command>chown</command> to
+          the Neo4j daemon user <literal>neo4j</literal>.
+        '';
+      };
+
+      imports = mkOption {
+        type = types.path;
+        default = "${cfg.directories.home}/import";
+        description = ''
+          The root directory for file URLs used with the Cypher
+          <literal>LOAD CSV</literal> clause. Only meaningful when
+          <option>constrainLoadCvs</option> is set to
+          <literal>true</literal>.
+          </para>
+          <para>
+          When setting this directory to something other than its default,
+          ensure the directory's existence, and that read permission is
+          given to the Neo4j daemon user <literal>neo4j</literal>.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.path;
+        default = "${cfg.directories.home}/plugins";
+        description = ''
+          Path of the database plugin directory. Compiled Java JAR files that
+          contain database procedures will be loaded if they are placed in
+          this directory.
+          </para>
+          <para>
+          When setting this directory to something other than its default,
+          ensure the directory's existence, and that read permission is
+          given to the Neo4j daemon user <literal>neo4j</literal>.
+        '';
+      };
     };
 
-    enableHttps = mkOption {
-      description = "Enable https for Neo4j.";
-      default = false;
-      type = types.bool;
+    http = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          The HTTP connector is required for Neo4j, and cannot be disabled.
+          Setting this option to <literal>false</literal> will force the HTTP
+          connector's <option>listenAddress</option> to the loopback
+          interface to prevent connection of remote clients. To prevent all
+          clients from connecting, block the HTTP port (7474 by default) by
+          firewall.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = ":7474";
+        description = ''
+          Neo4j listen address for HTTP traffic. The listen address is
+          expressed in the format <literal>&lt;ip-address&gt;:&lt;port-number&gt;</literal>.
+        '';
+      };
     };
 
-    httpsPort = mkOption {
-      description = "Neo4j port to listen for HTTPS traffic.";
-      default = 7473;
-      type = types.int;
+    https = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Enable the HTTPS connector for Neo4j. Setting this option to
+          <literal>false</literal> will stop Neo4j from listening for incoming
+          connections on the HTTPS port (7473 by default).
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = ":7473";
+        description = ''
+          Neo4j listen address for HTTPS traffic. The listen address is
+          expressed in the format <literal>&lt;ip-address&gt;:&lt;port-number&gt;</literal>.
+        '';
+      };
+
+      sslPolicy = mkOption {
+        type = types.str;
+        default = "legacy";
+        description = ''
+          Neo4j SSL policy for HTTPS traffic.
+          </para>
+          <para>
+          The legacy policy is a special policy which is not defined in the
+          policy configuration section, but rather derives from
+          <option>directories.certificates</option> and
+          associated files (by default: <filename>neo4j.key</filename> and
+          <filename>neo4j.cert</filename>). Its use will be deprecated.
+        '';
+      };
     };
 
-    certDir = mkOption {
-      description = "Neo4j TLS certificates directory.";
-      default = "${cfg.dataDir}/certificates";
-      type = types.path;
+    shell = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable a remote shell server which Neo4j Shell clients can log in to.
+          Only applicable to <command>neo4j-shell</command>.
+        '';
+      };
     };
 
-    dataDir = mkOption {
-      description = "Neo4j data directory.";
-      default = "/var/lib/neo4j";
-      type = types.path;
+    ssl.policies = mkOption {
+      type = with types; attrsOf (submodule ({ name, config, options, ... }: {
+        options = {
+
+          allowKeyGeneration = mkOption {
+            type = types.bool;
+            default = false;
+            description = ''
+              Allows the generation of a private key and associated self-signed
+              certificate. Only performed when both objects cannot be found for
+              this policy. It is recommended to turn this off again after keys
+              have been generated.
+              </para>
+              <para>
+              The public certificate is required to be duplicated to the
+              directory holding trusted certificates as defined by the
+              <option>trustedDir</option> option.
+              </para>
+              <para>
+              Keys should in general be generated and distributed offline by a
+              trusted certificate authority and not by utilizing this mode.
+            '';
+          };
+
+          baseDirectory = mkOption {
+            type = types.path;
+            default = "${cfg.directories.certificates}/${name}";
+            description = ''
+              The mandatory base directory for cryptographic objects of this
+              policy. This path is only automatically generated when this
+              option as well as <option>directories.certificates</option> are
+              left at their default. Ensure read/write permissions are given
+              to the Neo4j daemon user <literal>neo4j</literal>.
+              </para>
+              <para>
+              It is also possible to override each individual
+              configuration with absolute paths. See the
+              <option>privateKey</option> and <option>publicCertificate</option>
+              policy options.
+            '';
+          };
+
+          ciphers = mkOption {
+            type = types.nullOr (types.listOf types.str);
+            default = null;
+            description = ''
+              Restrict the allowed ciphers of this policy to those defined
+              here. The default ciphers are those of the JVM platform.
+            '';
+          };
+
+          clientAuth = mkOption {
+            type = types.enum [ "NONE" "OPTIONAL" "REQUIRE" ];
+            default = "REQUIRE";
+            description = ''
+              The client authentication stance for this policy.
+            '';
+          };
+
+          privateKey = mkOption {
+            type = types.str;
+            default = "private.key";
+            description = ''
+              The name of private PKCS #8 key file for this policy to be found
+              in the <option>baseDirectory</option>, or the absolute path to
+              the key file. It is mandatory that a key can be found or generated.
+            '';
+          };
+
+          publicCertificate = mkOption {
+            type = types.str;
+            default = "public.crt";
+            description = ''
+              The name of public X.509 certificate (chain) file in PEM format
+              for this policy to be found in the <option>baseDirectory</option>,
+              or the absolute path to the certificate file. It is mandatory
+              that a certificate can be found or generated.
+              </para>
+              <para>
+              The public certificate is required to be duplicated to the
+              directory holding trusted certificates as defined by the
+              <option>trustedDir</option> option.
+            '';
+          };
+
+          revokedDir = mkOption {
+            type = types.path;
+            default = "${config.baseDirectory}/revoked";
+            description = ''
+              Path to directory of CRLs (Certificate Revocation Lists) in
+              PEM format. Must be an absolute path. The existence of this
+              directory is mandatory and will need to be created manually when:
+              setting this option to something other than its default; setting
+              either this policy's <option>baseDirectory</option> or
+              <option>directories.certificates</option> to something other than
+              their default. Ensure read/write permissions are given to the
+              Neo4j daemon user <literal>neo4j</literal>.
+            '';
+          };
+
+          tlsVersions = mkOption {
+            type = types.listOf types.str;
+            default = [ "TLSv1.2" ];
+            description = ''
+              Restrict the TLS protocol versions of this policy to those
+              defined here.
+            '';
+          };
+
+          trustAll = mkOption {
+            type = types.bool;
+            default = false;
+            description = ''
+              Makes this policy trust all remote parties. Enabling this is not
+              recommended and the policy's trusted directory will be ignored.
+              Use of this mode is discouraged. It would offer encryption but
+              no security.
+            '';
+          };
+
+          trustedDir = mkOption {
+            type = types.path;
+            default = "${config.baseDirectory}/trusted";
+            description = ''
+              Path to directory of X.509 certificates in PEM format for
+              trusted parties. Must be an absolute path. The existence of this
+              directory is mandatory and will need to be created manually when:
+              setting this option to something other than its default; setting
+              either this policy's <option>baseDirectory</option> or
+              <option>directories.certificates</option> to something other than
+              their default. Ensure read/write permissions are given to the
+              Neo4j daemon user <literal>neo4j</literal>.
+              </para>
+              <para>
+              The public certificate as defined by
+              <option>publicCertificate</option> is required to be duplicated
+              to this directory.
+            '';
+          };
+
+          directoriesToCreate = mkOption {
+            type = types.listOf types.path;
+            internal = true;
+            readOnly = true;
+            description = ''
+              Directories of this policy that will be created automatically
+              when the certificates directory is left at its default value.
+              This includes all options of type path that are left at their
+              default value.
+            '';
+          };
+
+        };
+
+        config.directoriesToCreate = optionals
+          (certDirOpt.highestPrio >= 1500 && options.baseDirectory.highestPrio >= 1500)
+          (map (opt: opt.value) (filter isDefaultPathOption (attrValues options)));
+
+      }));
+      default = {};
+      description = ''
+        Defines the SSL policies for use with Neo4j connectors. Each attribute
+        of this set defines a policy, with the attribute name defining the name
+        of the policy and its namespace. Refer to the operations manual section
+        on Neo4j's
+        <link xlink:href="https://neo4j.com/docs/operations-manual/current/security/ssl-framework/">SSL Framework</link>
+        for further details.
+      '';
     };
 
-    extraServerConfig = mkOption {
-      description = "Extra configuration for neo4j server.";
-      default = "";
-      type = types.lines;
+    udc = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable the Usage Data Collector which Neo4j uses to collect usage
+          data. Refer to the operations manual section on the
+          <link xlink:href="https://neo4j.com/docs/operations-manual/current/configuration/usage-data-collector/">Usage Data Collector</link>
+          for more information.
+        '';
+      };
     };
+
   };
 
   ###### implementation
 
-  config = mkIf cfg.enable {
-    systemd.services.neo4j = {
-      description = "Neo4j Daemon";
-      wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" ];
-      environment = {
-        NEO4J_HOME = "${cfg.package}/share/neo4j";
-        NEO4J_CONF = "${cfg.dataDir}/conf";
-      };
-      serviceConfig = {
-        ExecStart = "${cfg.package}/bin/neo4j console";
-        User = "neo4j";
-        PermissionsStartOnly = true;
-        LimitNOFILE = 40000;
-      };
-      preStart = ''
-        mkdir -m 0700 -p ${cfg.dataDir}/{data/graph.db,conf,logs}
-        ln -fs ${serverConfig} ${cfg.dataDir}/conf/neo4j.conf
-        if [ "$(id -u)" = 0 ]; then chown -R neo4j ${cfg.dataDir}; fi
-      '';
-    };
+  config =
+    let
+      # Assertion helpers
+      policyNameList = attrNames cfg.ssl.policies;
+      validPolicyNameList = [ "legacy" ] ++ policyNameList;
+      validPolicyNameString = concatStringsSep ", " validPolicyNameList;
+
+      # Capture various directories left at their default so they can be created.
+      defaultDirectoriesToCreate = map (opt: opt.value) (filter isDefaultPathOption (attrValues options.services.neo4j.directories));
+      policyDirectoriesToCreate = concatMap (pol: pol.directoriesToCreate) (attrValues cfg.ssl.policies);
+    in
+
+    mkIf cfg.enable {
+      assertions = [
+        { assertion = !elem "legacy" policyNameList;
+          message = "The policy 'legacy' is special to Neo4j, and its name is reserved."; }
+        { assertion = elem cfg.bolt.sslPolicy validPolicyNameList;
+          message = "Invalid policy assigned: `services.neo4j.bolt.sslPolicy = \"${cfg.bolt.sslPolicy}\"`, defined policies are: ${validPolicyNameString}"; }
+        { assertion = elem cfg.https.sslPolicy validPolicyNameList;
+          message = "Invalid policy assigned: `services.neo4j.https.sslPolicy = \"${cfg.https.sslPolicy}\"`, defined policies are: ${validPolicyNameString}"; }
+      ];
+
+      systemd.services.neo4j = {
+        description = "Neo4j Daemon";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        environment = {
+          NEO4J_HOME = "${cfg.package}/share/neo4j";
+          NEO4J_CONF = "${cfg.directories.home}/conf";
+        };
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/neo4j console";
+          User = "neo4j";
+          PermissionsStartOnly = true;
+          LimitNOFILE = 40000;
+        };
+
+        preStart = ''
+          # Directories Setup
+          #   Always ensure home exists with nested conf, logs directories.
+          mkdir -m 0700 -p ${cfg.directories.home}/{conf,logs}
 
-    environment.systemPackages = [ cfg.package ];
+          #   Create other sub-directories and policy directories that have been left at their default.
+          ${concatMapStringsSep "\n" (
+            dir: ''
+              mkdir -m 0700 -p ${dir}
+          '') (defaultDirectoriesToCreate ++ policyDirectoriesToCreate)}
 
-    users.extraUsers = singleton {
-      name = "neo4j";
-      uid = config.ids.uids.neo4j;
-      description = "Neo4j daemon user";
-      home = cfg.dataDir;
+          # Place the configuration where Neo4j can find it.
+          ln -fs ${serverConfig} ${cfg.directories.home}/conf/neo4j.conf
+
+          # Ensure neo4j user ownership
+          chown -R neo4j ${cfg.directories.home}
+        '';
+      };
+
+      environment.systemPackages = [ cfg.package ];
+
+      users.users = singleton {
+        name = "neo4j";
+        uid = config.ids.uids.neo4j;
+        description = "Neo4j daemon user";
+        home = cfg.directories.home;
+      };
     };
+
+  meta = {
+    maintainers = with lib.maintainers; [ patternspandemic ];
   };
 }
diff --git a/nixos/modules/services/databases/openldap.nix b/nixos/modules/services/databases/openldap.nix
index a67c61eb9949..9f2bf5ef8a9c 100644
--- a/nixos/modules/services/databases/openldap.nix
+++ b/nixos/modules/services/databases/openldap.nix
@@ -145,13 +145,13 @@ in
           "${configOpts}";
     };
 
-    users.extraUsers.openldap =
+    users.users.openldap =
       { name = cfg.user;
         group = cfg.group;
         uid = config.ids.uids.openldap;
       };
 
-    users.extraGroups.openldap =
+    users.groups.openldap =
       { name = cfg.group;
         gid = config.ids.gids.openldap;
       };
diff --git a/nixos/modules/services/databases/opentsdb.nix b/nixos/modules/services/databases/opentsdb.nix
index 489cdcffe658..b26fa9093ef4 100644
--- a/nixos/modules/services/databases/opentsdb.nix
+++ b/nixos/modules/services/databases/opentsdb.nix
@@ -97,13 +97,13 @@ in {
       };
     };
 
-    users.extraUsers.opentsdb = {
+    users.users.opentsdb = {
       description = "OpenTSDB Server user";
       group = "opentsdb";
       uid = config.ids.uids.opentsdb;
     };
 
-    users.extraGroups.opentsdb.gid = config.ids.gids.opentsdb;
+    users.groups.opentsdb.gid = config.ids.gids.opentsdb;
 
   };
 }
diff --git a/nixos/modules/services/databases/pgmanage.nix b/nixos/modules/services/databases/pgmanage.nix
index d1b48c06440e..1a34c7f5ecee 100644
--- a/nixos/modules/services/databases/pgmanage.nix
+++ b/nixos/modules/services/databases/pgmanage.nix
@@ -41,7 +41,9 @@ let
 
   pgmanage = "pgmanage";
 
-  pgmanageOptions = {
+in {
+
+  options.services.pgmanage = {
     enable = mkEnableOption "PostgreSQL Administration for the web";
 
     package = mkOption {
@@ -176,47 +178,29 @@ let
     };
   };
 
-
-in {
-
-  options.services.pgmanage = pgmanageOptions;
-
-  # This is deprecated and should be removed for NixOS-18.03.
-  options.services.postage = pgmanageOptions;
-
-  config = mkMerge [
-    { assertions = [
-        { assertion = !config.services.postage.enable;
-          message =
-            "services.postage is deprecated in favour of pgmanage. " +
-            "They have the same options so just substitute postage for pgmanage." ;
-        }
-      ];
-    }
-    (mkIf cfg.enable {
-      systemd.services.pgmanage = {
-        description = "pgmanage - PostgreSQL Administration for the web";
-        wants    = [ "postgresql.service" ];
-        after    = [ "postgresql.service" ];
-        wantedBy = [ "multi-user.target" ];
-        serviceConfig = {
-          User         = pgmanage;
-          Group        = pgmanage;
-          ExecStart    = "${pkgs.pgmanage}/sbin/pgmanage -c ${confFile}" +
-                         optionalString cfg.localOnly " --local-only=true";
-        };
+  config = mkIf cfg.enable {
+    systemd.services.pgmanage = {
+      description = "pgmanage - PostgreSQL Administration for the web";
+      wants    = [ "postgresql.service" ];
+      after    = [ "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User         = pgmanage;
+        Group        = pgmanage;
+        ExecStart    = "${pkgs.pgmanage}/sbin/pgmanage -c ${confFile}" +
+                       optionalString cfg.localOnly " --local-only=true";
       };
-      users = {
-        users."${pgmanage}" = {
-          name  = pgmanage;
-          group = pgmanage;
-          home  = cfg.sqlRoot;
-          createHome = true;
-        };
-        groups."${pgmanage}" = {
-          name = pgmanage;
-        };
+    };
+    users = {
+      users."${pgmanage}" = {
+        name  = pgmanage;
+        group = pgmanage;
+        home  = cfg.sqlRoot;
+        createHome = true;
       };
-    })
-  ];
+      groups."${pgmanage}" = {
+        name = pgmanage;
+      };
+    };
+  };
 }
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 4ad4728ccda6..e33bee9db6e9 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -24,14 +24,13 @@ let
 
   postgresql = postgresqlAndPlugins cfg.package;
 
-  flags = optional cfg.enableTCPIP "-i";
-
   # The main PostgreSQL configuration file.
   configFile = pkgs.writeText "postgresql.conf"
     ''
       hba_file = '${pkgs.writeText "pg_hba.conf" cfg.authentication}'
       ident_file = '${pkgs.writeText "pg_ident.conf" cfg.identMap}'
       log_destination = 'stderr'
+      listen_addresses = '${if cfg.enableTCPIP then "*" else "localhost"}'
       port = ${toString cfg.port}
       ${cfg.extraConfig}
     '';
@@ -184,14 +183,14 @@ in
         host  all all ::1/128      md5
       '';
 
-    users.extraUsers.postgres =
+    users.users.postgres =
       { name = "postgres";
         uid = config.ids.uids.postgres;
         group = "postgres";
         description = "PostgreSQL server user";
       };
 
-    users.extraGroups.postgres.gid = config.ids.gids.postgres;
+    users.groups.postgres.gid = config.ids.gids.postgres;
 
     environment.systemPackages = [ postgresql ];
 
@@ -229,7 +228,7 @@ in
                 "${cfg.dataDir}/recovery.conf"
             ''}
 
-             exec postgres ${toString flags}
+             exec postgres
           '';
 
         serviceConfig =
diff --git a/nixos/modules/services/databases/redis.nix b/nixos/modules/services/databases/redis.nix
index e4e38a4364a0..cc7b51982d1d 100644
--- a/nixos/modules/services/databases/redis.nix
+++ b/nixos/modules/services/databases/redis.nix
@@ -217,7 +217,7 @@ in
       allowedTCPPorts = [ cfg.port ];
     };
 
-    users.extraUsers.redis =
+    users.users.redis =
       { name = cfg.user;
         description = "Redis database user";
       };
diff --git a/nixos/modules/services/databases/rethinkdb.nix b/nixos/modules/services/databases/rethinkdb.nix
index cd8c386b08db..789d9c851d64 100644
--- a/nixos/modules/services/databases/rethinkdb.nix
+++ b/nixos/modules/services/databases/rethinkdb.nix
@@ -96,12 +96,12 @@ in
       '';
     };
 
-    users.extraUsers.rethinkdb = mkIf (cfg.user == "rethinkdb")
+    users.users.rethinkdb = mkIf (cfg.user == "rethinkdb")
       { name = "rethinkdb";
         description = "RethinkDB server user";
       };
 
-    users.extraGroups = optionalAttrs (cfg.group == "rethinkdb") (singleton
+    users.groups = optionalAttrs (cfg.group == "rethinkdb") (singleton
       { name = "rethinkdb";
       });
 
diff --git a/nixos/modules/services/databases/riak-cs.nix b/nixos/modules/services/databases/riak-cs.nix
index 198efc29222a..2cb204f729a7 100644
--- a/nixos/modules/services/databases/riak-cs.nix
+++ b/nixos/modules/services/databases/riak-cs.nix
@@ -145,7 +145,7 @@ in
       ${cfg.extraAdvancedConfig}
     '';
 
-    users.extraUsers.riak-cs = {
+    users.users.riak-cs = {
       name = "riak-cs";
       uid = config.ids.uids.riak-cs;
       group = "riak";
diff --git a/nixos/modules/services/databases/riak.nix b/nixos/modules/services/databases/riak.nix
index e0ebf164aef0..ac086cf55996 100644
--- a/nixos/modules/services/databases/riak.nix
+++ b/nixos/modules/services/databases/riak.nix
@@ -102,14 +102,14 @@ in
       ${cfg.extraAdvancedConfig}
     '';
 
-    users.extraUsers.riak = {
+    users.users.riak = {
       name = "riak";
       uid = config.ids.uids.riak;
       group = "riak";
       description = "Riak server user";
     };
 
-    users.extraGroups.riak.gid = config.ids.gids.riak;
+    users.groups.riak.gid = config.ids.gids.riak;
 
     systemd.services.riak = {
       description = "Riak Server";
diff --git a/nixos/modules/services/databases/stanchion.nix b/nixos/modules/services/databases/stanchion.nix
index a4597cac3cd6..9fe49f51edd2 100644
--- a/nixos/modules/services/databases/stanchion.nix
+++ b/nixos/modules/services/databases/stanchion.nix
@@ -143,14 +143,14 @@ in
       ${cfg.extraConfig}
     '';
 
-    users.extraUsers.stanchion = {
+    users.users.stanchion = {
       name = "stanchion";
       uid = config.ids.uids.stanchion;
       group = "stanchion";
       description = "Stanchion server user";
     };
 
-    users.extraGroups.stanchion.gid = config.ids.gids.stanchion;
+    users.groups.stanchion.gid = config.ids.gids.stanchion;
 
     systemd.services.stanchion = {
       description = "Stanchion Server";
diff --git a/nixos/modules/services/databases/virtuoso.nix b/nixos/modules/services/databases/virtuoso.nix
index 3231fede08fa..6ffc44a5274e 100644
--- a/nixos/modules/services/databases/virtuoso.nix
+++ b/nixos/modules/services/databases/virtuoso.nix
@@ -54,7 +54,7 @@ with lib;
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = virtuosoUser;
         uid = config.ids.uids.virtuoso;
         description = "virtuoso user";
diff --git a/nixos/modules/services/desktops/bamf.nix b/nixos/modules/services/desktops/bamf.nix
new file mode 100644
index 000000000000..0928ee81a648
--- /dev/null
+++ b/nixos/modules/services/desktops/bamf.nix
@@ -0,0 +1,23 @@
+# Bamf
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+
+  options = {
+    services.bamf = {
+      enable = mkEnableOption "bamf";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.bamf.enable {
+    services.dbus.packages = [ pkgs.bamf ];
+
+    systemd.packages = [ pkgs.bamf ];
+  };
+}
diff --git a/nixos/modules/services/desktops/flatpak.nix b/nixos/modules/services/desktops/flatpak.nix
index 024dc65629a8..cfca1893bd82 100644
--- a/nixos/modules/services/desktops/flatpak.nix
+++ b/nixos/modules/services/desktops/flatpak.nix
@@ -40,12 +40,12 @@ in {
 
     systemd.packages = [ pkgs.flatpak pkgs.xdg-desktop-portal ] ++ cfg.extraPortals;
 
-    environment.variables = {
-      PATH = [
-        "$HOME/.local/share/flatpak/exports/bin"
-        "/var/lib/flatpak/exports/bin"
-      ];
+    environment.profiles = [
+      "$HOME/.local/share/flatpak/exports"
+      "/var/lib/flatpak/exports"
+    ];
 
+    environment.variables = {
       XDG_DESKTOP_PORTAL_PATH = map (p: "${p}/share/xdg-desktop-portal/portals") cfg.extraPortals;
     };
   };
diff --git a/nixos/modules/services/desktops/pipewire.nix b/nixos/modules/services/desktops/pipewire.nix
index 263a06156f84..13f3d61e84ca 100644
--- a/nixos/modules/services/desktops/pipewire.nix
+++ b/nixos/modules/services/desktops/pipewire.nix
@@ -3,20 +3,34 @@
 
 with lib;
 
-{
+let
+  cfg = config.services.pipewire;
+  packages = with pkgs; [ pipewire ];
+
+in {
   ###### interface
   options = {
     services.pipewire = {
       enable = mkEnableOption "pipewire service";
+
+      socketActivation = mkOption {
+        default = true;
+        type = types.bool;
+        description = ''
+          Automatically run pipewire when connections are made to the pipewire socket.
+        '';
+      };
     };
   };
 
 
   ###### implementation
-  config = mkIf config.services.pipewire.enable {
-    environment.systemPackages = [ pkgs.pipewire ];
+  config = mkIf cfg.enable {
+    environment.systemPackages = packages;
+
+    systemd.packages = packages;
 
-    systemd.packages = [ pkgs.pipewire ];
+    systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
   };
 
   meta.maintainers = with lib.maintainers; [ jtojnar ];
diff --git a/nixos/modules/services/development/bloop.nix b/nixos/modules/services/development/bloop.nix
new file mode 100644
index 000000000000..56904b7c40e6
--- /dev/null
+++ b/nixos/modules/services/development/bloop.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.bloop;
+
+in {
+
+  options.services.bloop = {
+    install = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to install a user service for the Bloop server.
+
+        The service must be manually started for each user with
+        "systemctl --user start bloop".
+      '';
+    };
+  };
+
+  config = mkIf (cfg.install) {
+    systemd.user.services.bloop = {
+      description = "Bloop Scala build server";
+
+      serviceConfig = {
+        Type      = "simple";
+        ExecStart = ''${pkgs.bloop}/bin/blp-server'';
+        Restart   = "always";
+      };
+    };
+
+    environment.systemPackages = [ pkgs.bloop ];
+  };
+}
diff --git a/nixos/modules/services/editors/infinoted.nix b/nixos/modules/services/editors/infinoted.nix
index 9074a4345eae..bba21caca85d 100644
--- a/nixos/modules/services/editors/infinoted.nix
+++ b/nixos/modules/services/editors/infinoted.nix
@@ -111,12 +111,12 @@ in {
   };
 
   config = mkIf (cfg.enable) {
-    users.extraUsers = optional (cfg.user == "infinoted")
+    users.users = optional (cfg.user == "infinoted")
       { name = "infinoted";
         description = "Infinoted user";
         group = cfg.group;
       };
-    users.extraGroups = optional (cfg.group == "infinoted")
+    users.groups = optional (cfg.group == "infinoted")
       { name = "infinoted";
       };
   
diff --git a/nixos/modules/services/games/minecraft-server.nix b/nixos/modules/services/games/minecraft-server.nix
index d2c8af6de0c5..f50d2897843a 100644
--- a/nixos/modules/services/games/minecraft-server.nix
+++ b/nixos/modules/services/games/minecraft-server.nix
@@ -45,7 +45,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers.minecraft = {
+    users.users.minecraft = {
       description     = "Minecraft Server Service user";
       home            = cfg.dataDir;
       createHome      = true;
diff --git a/nixos/modules/services/games/minetest-server.nix b/nixos/modules/services/games/minetest-server.nix
index 58b73ac4f6bf..2de42f20f6cc 100644
--- a/nixos/modules/services/games/minetest-server.nix
+++ b/nixos/modules/services/games/minetest-server.nix
@@ -79,7 +79,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers.minetest = {
+    users.users.minetest = {
       description     = "Minetest Server Service user";
       home            = "/var/lib/minetest";
       createHome      = true;
diff --git a/nixos/modules/services/games/terraria.nix b/nixos/modules/services/games/terraria.nix
index 21aff780b672..ddf17599296a 100644
--- a/nixos/modules/services/games/terraria.nix
+++ b/nixos/modules/services/games/terraria.nix
@@ -105,14 +105,14 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers.terraria = {
+    users.users.terraria = {
       description = "Terraria server service user";
       home        = "/var/lib/terraria";
       createHome  = true;
       uid         = config.ids.uids.terraria;
     };
 
-    users.extraGroups.terraria = {
+    users.groups.terraria = {
       gid = config.ids.gids.terraria;
       members = [ "terraria" ];
     };
diff --git a/nixos/modules/services/hardware/fwupd.nix b/nixos/modules/services/hardware/fwupd.nix
index d8abde2a600a..d97d690920a6 100644
--- a/nixos/modules/services/hardware/fwupd.nix
+++ b/nixos/modules/services/hardware/fwupd.nix
@@ -85,6 +85,6 @@ in {
   };
 
   meta = {
-    maintainers = pkgs.fwupd.maintainers;
+    maintainers = pkgs.fwupd.meta.maintainers;
   };
 }
diff --git a/nixos/modules/services/hardware/pcscd.nix b/nixos/modules/services/hardware/pcscd.nix
index fa97e8bf746b..f3fc4c3cc79e 100644
--- a/nixos/modules/services/hardware/pcscd.nix
+++ b/nixos/modules/services/hardware/pcscd.nix
@@ -61,8 +61,8 @@ in {
       description = "PCSC-Lite daemon";
       environment.PCSCLITE_HP_DROPDIR = pluginEnv;
       serviceConfig = {
-        ExecStart = "${pkgs.pcsclite}/sbin/pcscd -f -x -c ${cfgFile}";
-        ExecReload = "${pkgs.pcsclite}/sbin/pcscd -H";
+        ExecStart = "${getBin pkgs.pcsclite}/sbin/pcscd -f -x -c ${cfgFile}";
+        ExecReload = "${getBin pkgs.pcsclite}/sbin/pcscd -H";
       };
     };
   };
diff --git a/nixos/modules/services/hardware/sane.nix b/nixos/modules/services/hardware/sane.nix
index d651ccaa5776..fe05c5a5c06f 100644
--- a/nixos/modules/services/hardware/sane.nix
+++ b/nixos/modules/services/hardware/sane.nix
@@ -124,7 +124,7 @@ in
       environment.sessionVariables = env;
       services.udev.packages = backends;
 
-      users.extraGroups."scanner".gid = config.ids.gids.scanner;
+      users.groups."scanner".gid = config.ids.gids.scanner;
     })
 
     (mkIf config.services.saned.enable {
@@ -152,7 +152,7 @@ in
         };
       };
 
-      users.extraUsers."scanner" = {
+      users.users."scanner" = {
         uid = config.ids.uids.scanner;
         group = "scanner";
       };
diff --git a/nixos/modules/services/hardware/tcsd.nix b/nixos/modules/services/hardware/tcsd.nix
index d957b5063d38..d4b0a9495d75 100644
--- a/nixos/modules/services/hardware/tcsd.nix
+++ b/nixos/modules/services/hardware/tcsd.nix
@@ -137,13 +137,13 @@ in
       serviceConfig.ExecStart = "${pkgs.trousers}/sbin/tcsd -f -c ${tcsdConf}";
     };
 
-    users.extraUsers = optionalAttrs (cfg.user == "tss") (singleton
+    users.users = optionalAttrs (cfg.user == "tss") (singleton
       { name = "tss";
         group = "tss";
         uid = config.ids.uids.tss;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "tss") (singleton
+    users.groups = optionalAttrs (cfg.group == "tss") (singleton
       { name = "tss";
         gid = config.ids.gids.tss;
       });
diff --git a/nixos/modules/services/hardware/udisks2.nix b/nixos/modules/services/hardware/udisks2.nix
index ad5dc8e8a49b..ed8703be921c 100644
--- a/nixos/modules/services/hardware/udisks2.nix
+++ b/nixos/modules/services/hardware/udisks2.nix
@@ -40,15 +40,8 @@ with lib;
       '';
 
     services.udev.packages = [ pkgs.udisks2 ];
-    
-    systemd.services.udisks2 = {
-      description = "Udisks2 service";
-      serviceConfig = {
-        Type = "dbus";
-        BusName = "org.freedesktop.UDisks2";
-        ExecStart = "${pkgs.udisks2}/libexec/udisks2/udisksd --no-debug";
-      };
-    };
+
+    systemd.packages = [ pkgs.udisks2 ];
   };
 
 }
diff --git a/nixos/modules/services/hardware/usbmuxd.nix b/nixos/modules/services/hardware/usbmuxd.nix
index 7ebd49fa01c2..93ced0b9f04d 100644
--- a/nixos/modules/services/hardware/usbmuxd.nix
+++ b/nixos/modules/services/hardware/usbmuxd.nix
@@ -43,13 +43,13 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = optional (cfg.user == defaultUserGroup) {
+    users.users = optional (cfg.user == defaultUserGroup) {
       name = cfg.user;
       description = "usbmuxd user";
       group = cfg.group;
     };
 
-    users.extraGroups = optional (cfg.group == defaultUserGroup) {
+    users.groups = optional (cfg.group == defaultUserGroup) {
       name = cfg.group;
     };
 
@@ -65,7 +65,7 @@ in
       serviceConfig = {
         # Trigger the udev rule manually. This doesn't require replugging the
         # device when first enabling the option to get it to work
-        ExecStartPre = "${pkgs.libudev}/bin/udevadm trigger -s usb -a idVendor=${apple}";
+        ExecStartPre = "${pkgs.udev}/bin/udevadm trigger -s usb -a idVendor=${apple}";
         ExecStart = "${pkgs.usbmuxd}/bin/usbmuxd -U ${cfg.user} -f";
       };
     };
diff --git a/nixos/modules/services/logging/graylog.nix b/nixos/modules/services/logging/graylog.nix
index 95f31829882f..74a7b3c9b470 100644
--- a/nixos/modules/services/logging/graylog.nix
+++ b/nixos/modules/services/logging/graylog.nix
@@ -127,7 +127,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = mkIf (cfg.user == "graylog") {
+    users.users = mkIf (cfg.user == "graylog") {
       graylog = {
         uid = config.ids.uids.graylog;
         description = "Graylog server daemon user";
diff --git a/nixos/modules/services/logging/journaldriver.nix b/nixos/modules/services/logging/journaldriver.nix
new file mode 100644
index 000000000000..74ac3d4c2365
--- /dev/null
+++ b/nixos/modules/services/logging/journaldriver.nix
@@ -0,0 +1,112 @@
+# This module implements a systemd service for running journaldriver,
+# a log forwarding agent that sends logs from journald to Stackdriver
+# Logging.
+#
+# It can be enabled without extra configuration when running on GCP.
+# On machines hosted elsewhere, the other configuration options need
+# to be set.
+#
+# For further information please consult the documentation in the
+# upstream repository at: https://github.com/aprilabank/journaldriver/
+
+{ config, lib, pkgs, ...}:
+
+with lib; let cfg = config.services.journaldriver;
+in {
+  options.services.journaldriver = {
+    enable = mkOption {
+      type        = types.bool;
+      default     = false;
+      description = ''
+        Whether to enable journaldriver to forward journald logs to
+        Stackdriver Logging.
+      '';
+    };
+
+    logLevel = mkOption {
+      type        = types.str;
+      default     = "info";
+      description = ''
+        Log level at which journaldriver logs its own output.
+      '';
+    };
+
+    logName = mkOption {
+      type        = with types; nullOr str;
+      default     = null;
+      description = ''
+        Configures the name of the target log in Stackdriver Logging.
+        This option can be set to, for example, the hostname of a
+        machine to improve the user experience in the logging
+        overview.
+      '';
+    };
+
+    googleCloudProject = mkOption {
+      type        = with types; nullOr str;
+      default     = null;
+      description = ''
+        Configures the name of the Google Cloud project to which to
+        forward journald logs.
+
+        This option is required on non-GCP machines, but should not be
+        set on GCP instances.
+      '';
+    };
+
+    logStream = mkOption {
+      type        = with types; nullOr str;
+      default     = null;
+      description = ''
+        Configures the name of the Stackdriver Logging log stream into
+        which to write journald entries.
+
+        This option is required on non-GCP machines, but should not be
+        set on GCP instances.
+      '';
+    };
+
+    applicationCredentials = mkOption {
+      type        = with types; nullOr path;
+      default     = null;
+      description = ''
+        Path to the service account private key (in JSON-format) used
+        to forward log entries to Stackdriver Logging on non-GCP
+        instances.
+
+        This option is required on non-GCP machines, but should not be
+        set on GCP instances.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.journaldriver = {
+      description = "Stackdriver Logging journal forwarder";
+      script      = "${pkgs.journaldriver}/bin/journaldriver";
+      after       = [ "network-online.target" ];
+      wantedBy    = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Restart        = "always";
+        DynamicUser    = true;
+
+        # This directive lets systemd automatically configure
+        # permissions on /var/lib/journaldriver, the directory in
+        # which journaldriver persists its cursor state.
+        StateDirectory = "journaldriver";
+
+        # This group is required for accessing journald.
+        SupplementaryGroups = "systemd-journal";
+      };
+
+      environment = {
+        RUST_LOG                       = cfg.logLevel;
+        LOG_NAME                       = cfg.logName;
+        LOG_STREAM                     = cfg.logStream;
+        GOOGLE_CLOUD_PROJECT           = cfg.googleCloudProject;
+        GOOGLE_APPLICATION_CREDENTIALS = cfg.applicationCredentials;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/logging/journalwatch.nix b/nixos/modules/services/logging/journalwatch.nix
index d49795fe2b77..2c9bc18c8c3c 100644
--- a/nixos/modules/services/logging/journalwatch.nix
+++ b/nixos/modules/services/logging/journalwatch.nix
@@ -197,7 +197,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.${user} = {
+    users.users.${user} = {
       isSystemUser = true;
       createHome = true;
       home = dataDir;
@@ -241,6 +241,6 @@ in {
   };
 
   meta = {
-    maintainers = with stdenv.lib.maintainers; [ florianjacob ];
+    maintainers = with lib.maintainers; [ florianjacob ];
   };
 }
diff --git a/nixos/modules/services/logging/logcheck.nix b/nixos/modules/services/logging/logcheck.nix
index a4cab0c94cdc..1477d273d5ee 100644
--- a/nixos/modules/services/logging/logcheck.nix
+++ b/nixos/modules/services/logging/logcheck.nix
@@ -213,7 +213,7 @@ in
         mapAttrsToList writeIgnoreRule cfg.ignore
         ++ mapAttrsToList writeIgnoreCronRule cfg.ignoreCron;
 
-    users.extraUsers = optionalAttrs (cfg.user == "logcheck") (singleton
+    users.users = optionalAttrs (cfg.user == "logcheck") (singleton
       { name = "logcheck";
         uid = config.ids.uids.logcheck;
         shell = "/bin/sh";
diff --git a/nixos/modules/services/mail/dovecot.nix b/nixos/modules/services/mail/dovecot.nix
index 50477fdd25ba..04df97fdbbec 100644
--- a/nixos/modules/services/mail/dovecot.nix
+++ b/nixos/modules/services/mail/dovecot.nix
@@ -9,8 +9,6 @@ let
   baseDir = "/run/dovecot2";
   stateDir = "/var/lib/dovecot";
 
-  canCreateMailUserGroup = cfg.mailUser != null && cfg.mailGroup != null;
-
   dovecotConf = concatStrings [
     ''
       base_dir = ${baseDir}
@@ -309,7 +307,7 @@ in
      ++ optional cfg.enablePop3 "pop3"
      ++ optional cfg.enableLmtp "lmtp";
 
-    users.extraUsers = [
+    users.users = [
       { name = "dovenull";
         uid = config.ids.uids.dovenull2;
         description = "Dovecot user for untrusted logins";
@@ -328,7 +326,7 @@ in
            group = cfg.mailGroup;
          });
 
-    users.extraGroups = optional (cfg.group == "dovecot2")
+    users.groups = optional (cfg.group == "dovecot2")
       { name = "dovecot2";
         gid = config.ids.gids.dovecot2;
       }
diff --git a/nixos/modules/services/mail/dspam.nix b/nixos/modules/services/mail/dspam.nix
index 89076ff05462..167b5aeccc84 100644
--- a/nixos/modules/services/mail/dspam.nix
+++ b/nixos/modules/services/mail/dspam.nix
@@ -86,13 +86,13 @@ in {
 
   config = mkIf cfg.enable (mkMerge [
     {
-      users.extraUsers = optionalAttrs (cfg.user == "dspam") (singleton
+      users.users = optionalAttrs (cfg.user == "dspam") (singleton
         { name = "dspam";
           group = cfg.group;
           uid = config.ids.uids.dspam;
         });
 
-      users.extraGroups = optionalAttrs (cfg.group == "dspam") (singleton
+      users.groups = optionalAttrs (cfg.group == "dspam") (singleton
         { name = "dspam";
           gid = config.ids.gids.dspam;
         });
diff --git a/nixos/modules/services/mail/exim.nix b/nixos/modules/services/mail/exim.nix
index 440eae281f40..06c4b2811b3f 100644
--- a/nixos/modules/services/mail/exim.nix
+++ b/nixos/modules/services/mail/exim.nix
@@ -77,14 +77,14 @@ in
       systemPackages = [ exim ];
     };
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = cfg.user;
       description = "Exim mail transfer agent user";
       uid = config.ids.uids.exim;
       group = cfg.group;
     };
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = cfg.group;
       gid = config.ids.gids.exim;
     };
@@ -94,6 +94,7 @@ in
     systemd.services.exim = {
       description = "Exim Mail Daemon";
       wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."exim.conf".source ];
       serviceConfig = {
         ExecStart   = "${exim}/bin/exim -bdf -q30m";
         ExecReload  = "${coreutils}/bin/kill -HUP $MAINPID";
diff --git a/nixos/modules/services/mail/mailhog.nix b/nixos/modules/services/mail/mailhog.nix
index 206fb50d31a2..b78f4c8e0e66 100644
--- a/nixos/modules/services/mail/mailhog.nix
+++ b/nixos/modules/services/mail/mailhog.nix
@@ -24,7 +24,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.mailhog = {
+    users.users.mailhog = {
       name = cfg.user;
       description = "MailHog service user";
     };
diff --git a/nixos/modules/services/mail/mlmmj.nix b/nixos/modules/services/mail/mlmmj.nix
index b6439b44fb5f..11565bc02f89 100644
--- a/nixos/modules/services/mail/mlmmj.nix
+++ b/nixos/modules/services/mail/mlmmj.nix
@@ -94,7 +94,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = cfg.user;
       description = "mlmmj user";
       home = stateDir;
@@ -104,7 +104,7 @@ in
       useDefaultShell = true;
     };
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = cfg.group;
       gid = config.ids.gids.mlmmj;
     };
diff --git a/nixos/modules/services/mail/nullmailer.nix b/nixos/modules/services/mail/nullmailer.nix
index 59cb512c115b..418c02af4b7f 100644
--- a/nixos/modules/services/mail/nullmailer.nix
+++ b/nixos/modules/services/mail/nullmailer.nix
@@ -201,13 +201,13 @@ with lib;
     };
 
     users = {
-      extraUsers = singleton {
+      users = singleton {
         name = cfg.user;
         description = "Nullmailer relay-only mta user";
         group = cfg.group;
       };
 
-      extraGroups = singleton {
+      groups = singleton {
         name = cfg.group;
       };
     };
diff --git a/nixos/modules/services/mail/opendkim.nix b/nixos/modules/services/mail/opendkim.nix
index 59a8373843a1..7855efb46c73 100644
--- a/nixos/modules/services/mail/opendkim.nix
+++ b/nixos/modules/services/mail/opendkim.nix
@@ -88,13 +88,13 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = optionalAttrs (cfg.user == "opendkim") (singleton
+    users.users = optionalAttrs (cfg.user == "opendkim") (singleton
       { name = "opendkim";
         group = cfg.group;
         uid = config.ids.uids.opendkim;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "opendkim") (singleton
+    users.groups = optionalAttrs (cfg.group == "opendkim") (singleton
       { name = "opendkim";
         gid = config.ids.gids.opendkim;
       });
diff --git a/nixos/modules/services/mail/opensmtpd.nix b/nixos/modules/services/mail/opensmtpd.nix
index 53acdba42457..4276552d4f03 100644
--- a/nixos/modules/services/mail/opensmtpd.nix
+++ b/nixos/modules/services/mail/opensmtpd.nix
@@ -10,7 +10,7 @@ let
 
   sendmail = pkgs.runCommand "opensmtpd-sendmail" {} ''
     mkdir -p $out/bin
-    ln -s ${pkgs.opensmtpd}/sbin/smtpctl $out/bin/sendmail
+    ln -s ${cfg.package}/sbin/smtpctl $out/bin/sendmail
   '';
 
 in {
@@ -27,6 +27,13 @@ in {
         description = "Whether to enable the OpenSMTPD server.";
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.opensmtpd;
+        defaultText = "pkgs.opensmtpd";
+        description = "The OpenSMTPD package to use.";
+      };
+
       addSendmailToSystemPath = mkOption {
         type = types.bool;
         default = true;
@@ -76,12 +83,12 @@ in {
   ###### implementation
 
   config = mkIf cfg.enable {
-    users.extraGroups = {
+    users.groups = {
       smtpd.gid = config.ids.gids.smtpd;
       smtpq.gid = config.ids.gids.smtpq;
     };
 
-    users.extraUsers = {
+    users.users = {
       smtpd = {
         description = "OpenSMTPD process user";
         uid = config.ids.uids.smtpd;
@@ -97,7 +104,7 @@ in {
     systemd.services.opensmtpd = let
       procEnv = pkgs.buildEnv {
         name = "opensmtpd-procs";
-        paths = [ pkgs.opensmtpd ] ++ cfg.procPackages;
+        paths = [ cfg.package ] ++ cfg.procPackages;
         pathsToLink = [ "/libexec/opensmtpd" ];
       };
     in {
@@ -115,7 +122,7 @@ in {
         chown smtpq.root /var/spool/smtpd/purge
         chmod 700 /var/spool/smtpd/purge
       '';
-      serviceConfig.ExecStart = "${pkgs.opensmtpd}/sbin/smtpd -d -f ${conf} ${args}";
+      serviceConfig.ExecStart = "${cfg.package}/sbin/smtpd -d -f ${conf} ${args}";
       environment.OPENSMTPD_PROC_PATH = "${procEnv}/libexec/opensmtpd";
     };
 
diff --git a/nixos/modules/services/mail/postfix.nix b/nixos/modules/services/mail/postfix.nix
index 5ab331ac067f..33249aa3e554 100644
--- a/nixos/modules/services/mail/postfix.nix
+++ b/nixos/modules/services/mail/postfix.nix
@@ -616,14 +616,14 @@ in
         setgid = true;
       };
 
-      users.extraUsers = optional (user == "postfix")
+      users.users = optional (user == "postfix")
         { name = "postfix";
           description = "Postfix mail server user";
           uid = config.ids.uids.postfix;
           group = group;
         };
 
-      users.extraGroups =
+      users.groups =
         optional (group == "postfix")
         { name = group;
           gid = config.ids.gids.postfix;
diff --git a/nixos/modules/services/mail/postgrey.nix b/nixos/modules/services/mail/postgrey.nix
index d4ae25c066ac..241f75eae279 100644
--- a/nixos/modules/services/mail/postgrey.nix
+++ b/nixos/modules/services/mail/postgrey.nix
@@ -136,14 +136,14 @@ in {
     environment.systemPackages = [ pkgs.postgrey ];
 
     users = {
-      extraUsers = {
+      users = {
         postgrey = {
           description = "Postgrey Daemon";
           uid = config.ids.uids.postgrey;
           group = "postgrey";
         };
       };
-      extraGroups = {
+      groups = {
         postgrey = {
           gid = config.ids.gids.postgrey;
         };
diff --git a/nixos/modules/services/mail/postsrsd.nix b/nixos/modules/services/mail/postsrsd.nix
index a1af16ec9ac1..8f12a16906c5 100644
--- a/nixos/modules/services/mail/postsrsd.nix
+++ b/nixos/modules/services/mail/postsrsd.nix
@@ -90,13 +90,13 @@ in {
 
     services.postsrsd.domain = mkDefault config.networking.hostName;
 
-    users.extraUsers = optionalAttrs (cfg.user == "postsrsd") (singleton
+    users.users = optionalAttrs (cfg.user == "postsrsd") (singleton
       { name = "postsrsd";
         group = cfg.group;
         uid = config.ids.uids.postsrsd;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "postsrsd") (singleton
+    users.groups = optionalAttrs (cfg.group == "postsrsd") (singleton
       { name = "postsrsd";
         gid = config.ids.gids.postsrsd;
       });
diff --git a/nixos/modules/services/mail/rmilter.nix b/nixos/modules/services/mail/rmilter.nix
index e17b7516bfff..7f38d7570132 100644
--- a/nixos/modules/services/mail/rmilter.nix
+++ b/nixos/modules/services/mail/rmilter.nix
@@ -194,14 +194,14 @@ in
 
     (mkIf cfg.enable {
 
-      users.extraUsers = singleton {
+      users.users = singleton {
         name = cfg.user;
         description = "rmilter daemon";
         uid = config.ids.uids.rmilter;
         group = cfg.group;
       };
 
-      users.extraGroups = singleton {
+      users.groups = singleton {
         name = cfg.group;
         gid = config.ids.gids.rmilter;
       };
@@ -238,12 +238,12 @@ in
     })
 
     (mkIf (cfg.enable && cfg.rspamd.enable && rspamdCfg.enable) {
-      users.extraUsers.${cfg.user}.extraGroups = [ rspamdCfg.group ];
+      users.users.${cfg.user}.extraGroups = [ rspamdCfg.group ];
     })
 
     (mkIf (cfg.enable && cfg.postfix.enable) {
       services.postfix.extraConfig = cfg.postfix.configFragment;
-      users.extraUsers.${postfixCfg.user}.extraGroups = [ cfg.group ];
+      users.users.${postfixCfg.user}.extraGroups = [ cfg.group ];
     })
   ];
 }
diff --git a/nixos/modules/services/mail/rspamd.nix b/nixos/modules/services/mail/rspamd.nix
index 09fb587e74b5..b3dae60c2c7e 100644
--- a/nixos/modules/services/mail/rspamd.nix
+++ b/nixos/modules/services/mail/rspamd.nix
@@ -282,14 +282,14 @@ in
     # Allow users to run 'rspamc' and 'rspamadm'.
     environment.systemPackages = [ pkgs.rspamd ];
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = cfg.user;
       description = "rspamd daemon";
       uid = config.ids.uids.rspamd;
       group = cfg.group;
     };
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = cfg.group;
       gid = config.ids.gids.rspamd;
     };
diff --git a/nixos/modules/services/mail/spamassassin.nix b/nixos/modules/services/mail/spamassassin.nix
index d483a8c3d67d..0c11ea431368 100644
--- a/nixos/modules/services/mail/spamassassin.nix
+++ b/nixos/modules/services/mail/spamassassin.nix
@@ -128,14 +128,14 @@ in
       systemPackages = [ pkgs.spamassassin ];
     };
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "spamd";
       description = "Spam Assassin Daemon";
       uid = config.ids.uids.spamd;
       group = "spamd";
     };
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = "spamd";
       gid = config.ids.gids.spamd;
     };
diff --git a/nixos/modules/services/misc/airsonic.nix b/nixos/modules/services/misc/airsonic.nix
index b92104787a56..083587b8ebb1 100644
--- a/nixos/modules/services/misc/airsonic.nix
+++ b/nixos/modules/services/misc/airsonic.nix
@@ -107,7 +107,7 @@ in {
       };
     };
 
-    users.extraUsers.airsonic = {
+    users.users.airsonic = {
       description = "Airsonic service user";
       name = cfg.user;
       home = cfg.home;
diff --git a/nixos/modules/services/misc/apache-kafka.nix b/nixos/modules/services/misc/apache-kafka.nix
index 82fa1cc2e7e5..363ac4411e11 100644
--- a/nixos/modules/services/misc/apache-kafka.nix
+++ b/nixos/modules/services/misc/apache-kafka.nix
@@ -124,7 +124,7 @@ in {
 
     environment.systemPackages = [cfg.package];
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "apache-kafka";
       uid = config.ids.uids.apache-kafka;
       description = "Apache Kafka daemon user";
diff --git a/nixos/modules/services/misc/autorandr.nix b/nixos/modules/services/misc/autorandr.nix
index 3020130ad1f6..4708e16e2a6c 100644
--- a/nixos/modules/services/misc/autorandr.nix
+++ b/nixos/modules/services/misc/autorandr.nix
@@ -12,6 +12,16 @@ in {
 
     services.autorandr = {
       enable = mkEnableOption "handling of hotplug and sleep events by autorandr";
+
+      defaultTarget = mkOption {
+        default = "default";
+        type = types.str;
+        description = ''
+          Fallback if no monitor layout can be detected. See the docs
+          (https://github.com/phillipberndt/autorandr/blob/v1.0/README.md#how-to-use)
+          for further reference.
+        '';
+      };
     };
 
   };
@@ -22,13 +32,21 @@ in {
 
     environment.systemPackages = [ pkgs.autorandr ];
 
-    systemd.packages = [ pkgs.autorandr ];
-
     systemd.services.autorandr = {
       wantedBy = [ "sleep.target" ];
+      description = "Autorandr execution hook";
+      after = [ "sleep.target" ];
+
+      serviceConfig = {
+        StartLimitInterval = 5;
+        StartLimitBurst = 1;
+        ExecStart = "${pkgs.autorandr}/bin/autorandr --batch --change --default ${cfg.defaultTarget}";
+        Type = "oneshot";
+        RemainAfterExit = false;
+      };
     };
 
   };
 
-  meta.maintainers = with maintainers; [ gnidorah ];
+  meta.maintainers = with maintainers; [ gnidorah ma27 ];
 }
diff --git a/nixos/modules/services/misc/bepasty.nix b/nixos/modules/services/misc/bepasty.nix
index c499e428af35..62835c194e42 100644
--- a/nixos/modules/services/misc/bepasty.nix
+++ b/nixos/modules/services/misc/bepasty.nix
@@ -168,14 +168,14 @@ in
         })
     ) cfg.servers;
 
-    users.extraUsers = [{
+    users.users = [{
       uid = config.ids.uids.bepasty;
       name = user;
       group = group;
       home = default_home;
     }];
 
-    users.extraGroups = [{
+    users.groups = [{
       name = group;
       gid = config.ids.gids.bepasty;
     }];
diff --git a/nixos/modules/services/misc/calibre-server.nix b/nixos/modules/services/misc/calibre-server.nix
index 6b19f780ec0c..84c04f403d3a 100644
--- a/nixos/modules/services/misc/calibre-server.nix
+++ b/nixos/modules/services/misc/calibre-server.nix
@@ -49,12 +49,12 @@ in
 
     environment.systemPackages = [ pkgs.calibre ];
 
-    users.extraUsers.calibre-server = {
+    users.users.calibre-server = {
         uid = config.ids.uids.calibre-server;
         group = "calibre-server";
       };
 
-    users.extraGroups.calibre-server = {
+    users.groups.calibre-server = {
         gid = config.ids.gids.calibre-server;
       };
 
diff --git a/nixos/modules/services/misc/cfdyndns.nix b/nixos/modules/services/misc/cfdyndns.nix
index 69a33d0b8c1b..dcf416022734 100644
--- a/nixos/modules/services/misc/cfdyndns.nix
+++ b/nixos/modules/services/misc/cfdyndns.nix
@@ -54,14 +54,14 @@ in
       };
     };
 
-    users.extraUsers = {
+    users.users = {
       cfdyndns = {
         group = "cfdyndns";
         uid = config.ids.uids.cfdyndns;
       };
     };
 
-    users.extraGroups = {
+    users.groups = {
       cfdyndns = {
         gid = config.ids.gids.cfdyndns;
       };
diff --git a/nixos/modules/services/misc/cgminer.nix b/nixos/modules/services/misc/cgminer.nix
index d5071d8ff767..b1cf5a7d1104 100644
--- a/nixos/modules/services/misc/cgminer.nix
+++ b/nixos/modules/services/misc/cgminer.nix
@@ -110,7 +110,7 @@ in
 
   config = mkIf config.services.cgminer.enable {
 
-    users.extraUsers = optionalAttrs (cfg.user == "cgminer") (singleton
+    users.users = optionalAttrs (cfg.user == "cgminer") (singleton
       { name = "cgminer";
         uid = config.ids.uids.cgminer;
         description = "Cgminer user";
diff --git a/nixos/modules/services/misc/clipmenu.nix b/nixos/modules/services/misc/clipmenu.nix
new file mode 100644
index 000000000000..3ba050044cac
--- /dev/null
+++ b/nixos/modules/services/misc/clipmenu.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.clipmenu;
+in {
+
+  options.services.clipmenu = {
+    enable = mkEnableOption "clipmenu, the clipboard management daemon";
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.clipmenu;
+      defaultText = "pkgs.clipmenu";
+      description = "clipmenu derivation to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.clipmenu = {
+      enable      = true;
+      description = "Clipboard management daemon";
+      wantedBy = [ "graphical-session.target" ];
+      after    = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = "${cfg.package}/bin/clipmenud";
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixos/modules/services/misc/couchpotato.nix b/nixos/modules/services/misc/couchpotato.nix
index 496487622351..70aa895f76d8 100644
--- a/nixos/modules/services/misc/couchpotato.nix
+++ b/nixos/modules/services/misc/couchpotato.nix
@@ -34,7 +34,7 @@ in
       };
     };
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "couchpotato";
         group = "couchpotato";
         home = "/var/lib/couchpotato/";
@@ -42,7 +42,7 @@ in
         uid = config.ids.uids.couchpotato;
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "couchpotato";
         gid = config.ids.gids.couchpotato;
       };
diff --git a/nixos/modules/services/misc/dictd.nix b/nixos/modules/services/misc/dictd.nix
index 7e3b6431a133..8d3e294622d1 100644
--- a/nixos/modules/services/misc/dictd.nix
+++ b/nixos/modules/services/misc/dictd.nix
@@ -45,7 +45,7 @@ in
     # get the command line client on system path to make some use of the service
     environment.systemPackages = [ pkgs.dict ];
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "dictd";
         group = "dictd";
         description = "DICT.org dictd server";
@@ -53,7 +53,7 @@ in
         uid = config.ids.uids.dictd;
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "dictd";
         gid = config.ids.gids.dictd;
       };
diff --git a/nixos/modules/services/misc/disnix.nix b/nixos/modules/services/misc/disnix.nix
index e4517c636e88..bb3ac1ecf075 100644
--- a/nixos/modules/services/misc/disnix.nix
+++ b/nixos/modules/services/misc/disnix.nix
@@ -7,16 +7,6 @@ let
 
   cfg = config.services.disnix;
 
-  dysnomia = pkgs.dysnomia.override (origArgs: {
-    enableApacheWebApplication = config.services.httpd.enable;
-    enableAxis2WebService = config.services.tomcat.axis2.enable;
-    enableEjabberdDump = config.services.ejabberd.enable;
-    enableMySQLDatabase = config.services.mysql.enable;
-    enablePostgreSQLDatabase = config.services.postgresql.enable;
-    enableSubversionRepository = config.services.svnserve.enable;
-    enableTomcatWebApplication = config.services.tomcat.enable;
-    enableMongoDatabase = config.services.mongodb.enable;
-  });
 in
 
 {
@@ -71,7 +61,7 @@ in
       ++ optional cfg.useWebServiceInterface "${pkgs.dbus_java}/share/java/dbus.jar";
     services.tomcat.webapps = optional cfg.useWebServiceInterface pkgs.DisnixWebService;
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "disnix";
         gid = config.ids.gids.disnix;
       };
diff --git a/nixos/modules/services/misc/docker-registry.nix b/nixos/modules/services/misc/docker-registry.nix
index 45931cb42b54..08031d33c131 100644
--- a/nixos/modules/services/misc/docker-registry.nix
+++ b/nixos/modules/services/misc/docker-registry.nix
@@ -5,44 +5,7 @@ with lib;
 let
   cfg = config.services.dockerRegistry;
 
-  blobCache = if cfg.enableRedisCache
-    then "redis"
-    else "inmemory";
-
-  registryConfig = {
-    version =  "0.1";
-    log.fields.service = "registry";
-    storage = {
-      cache.blobdescriptor = blobCache;
-      filesystem.rootdirectory = cfg.storagePath;
-      delete.enabled = cfg.enableDelete;
-    };
-    http = {
-      addr = ":${builtins.toString cfg.port}";
-      headers.X-Content-Type-Options = ["nosniff"];
-    };
-    health.storagedriver = {
-      enabled = true;
-      interval = "10s";
-      threshold = 3;
-    };
-  };
-
-  registryConfig.redis = mkIf cfg.enableRedisCache {
-    addr = "${cfg.redisUrl}";
-    password = "${cfg.redisPassword}";
-    db = 0;
-    dialtimeout = "10ms";
-    readtimeout = "10ms";
-    writetimeout = "10ms";
-    pool = {
-      maxidle = 16;
-      maxactive = 64;
-      idletimeout = "300s";
-    };
-  };
-
-  configFile = pkgs.writeText "docker-registry-config.yml" (builtins.toJSON (registryConfig // cfg.extraConfig));
+  configFile = pkgs.writeText "docker-registry-config.yml" (builtins.toJSON (recursiveUpdate registryConfig cfg.extraConfig));
 
 in {
   options.services.dockerRegistry = {
@@ -91,7 +54,7 @@ in {
         Docker extra registry configuration via environment variables.
       '';
       default = {};
-      type = types.attrsOf types.str;
+      type = types.attrs;
     };
 
     enableGarbageCollect = mkEnableOption "garbage collect";
@@ -120,6 +83,7 @@ in {
       serviceConfig = {
         User = "docker-registry";
         WorkingDirectory = cfg.storagePath;
+        AmbientCapabilities = mkIf (cfg.port < 1024) "cap_net_bind_service";
       };
     };
 
@@ -139,7 +103,7 @@ in {
       startAt = optional cfg.enableGarbageCollect cfg.garbageCollectDates;
     };
 
-    users.extraUsers.docker-registry = {
+    users.users.docker-registry = {
       createHome = true;
       home = cfg.storagePath;
     };
diff --git a/nixos/modules/services/misc/dysnomia.nix b/nixos/modules/services/misc/dysnomia.nix
index 9e66e0811ab7..ba74b18b6970 100644
--- a/nixos/modules/services/misc/dysnomia.nix
+++ b/nixos/modules/services/misc/dysnomia.nix
@@ -62,9 +62,6 @@ let
       cd $out
 
       ${concatMapStrings (containerName:
-        let
-          components = cfg.components."${containerName}";
-        in
         linkMutableComponents { inherit containerName; }
       ) (builtins.attrNames cfg.components)}
     '';
diff --git a/nixos/modules/services/misc/emby.nix b/nixos/modules/services/misc/emby.nix
index e295f0f930e1..92a68b602510 100644
--- a/nixos/modules/services/misc/emby.nix
+++ b/nixos/modules/services/misc/emby.nix
@@ -4,7 +4,6 @@ with lib;
 
 let
   cfg = config.services.emby;
-  emby = pkgs.emby;
 in
 {
   options = {
@@ -54,14 +53,14 @@ in
       };
     };
 
-    users.extraUsers = mkIf (cfg.user == "emby") {
+    users.users = mkIf (cfg.user == "emby") {
       emby = {
         group = cfg.group;
         uid = config.ids.uids.emby;
       };
     };
 
-    users.extraGroups = mkIf (cfg.group == "emby") {
+    users.groups = mkIf (cfg.group == "emby") {
       emby = {
         gid = config.ids.gids.emby;
       };
diff --git a/nixos/modules/services/misc/errbot.nix b/nixos/modules/services/misc/errbot.nix
index cb2fa6776240..ac6ba2181de2 100644
--- a/nixos/modules/services/misc/errbot.nix
+++ b/nixos/modules/services/misc/errbot.nix
@@ -76,8 +76,8 @@ in {
   };
 
   config = mkIf (cfg.instances != {}) {
-    users.extraUsers.errbot.group = "errbot";
-    users.extraGroups.errbot = {};
+    users.users.errbot.group = "errbot";
+    users.groups.errbot = {};
 
     systemd.services = mapAttrs' (name: instanceCfg: nameValuePair "errbot-${name}" (
     let
diff --git a/nixos/modules/services/misc/etcd.nix b/nixos/modules/services/misc/etcd.nix
index 7c91462883f1..2d1893dae64b 100644
--- a/nixos/modules/services/misc/etcd.nix
+++ b/nixos/modules/services/misc/etcd.nix
@@ -188,7 +188,7 @@ in {
 
     environment.systemPackages = [ pkgs.etcdctl ];
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "etcd";
       uid = config.ids.uids.etcd;
       description = "Etcd daemon user";
diff --git a/nixos/modules/services/misc/exhibitor.nix b/nixos/modules/services/misc/exhibitor.nix
index 600bd780e7b0..a90c7f402e7f 100644
--- a/nixos/modules/services/misc/exhibitor.nix
+++ b/nixos/modules/services/misc/exhibitor.nix
@@ -4,7 +4,6 @@ with lib;
 
 let
   cfg = config.services.exhibitor;
-  exhibitor = cfg.package;
   exhibitorConfig = ''
     zookeeper-install-directory=${cfg.baseDir}/zookeeper
     zookeeper-data-directory=${cfg.zkDataDir}
@@ -408,7 +407,7 @@ in
         chmod -R u+w ${cfg.baseDir}/zookeeper/conf
       '';
     };
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "zookeeper";
       uid = config.ids.uids.zookeeper;
       description = "Zookeeper daemon user";
diff --git a/nixos/modules/services/misc/felix.nix b/nixos/modules/services/misc/felix.nix
index d6ad9dcaebc2..1c5ece868258 100644
--- a/nixos/modules/services/misc/felix.nix
+++ b/nixos/modules/services/misc/felix.nix
@@ -47,12 +47,12 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "osgi";
         gid = config.ids.gids.osgi;
       };
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "osgi";
         uid = config.ids.uids.osgi;
         description = "OSGi user";
diff --git a/nixos/modules/services/misc/folding-at-home.nix b/nixos/modules/services/misc/folding-at-home.nix
index 164221cbab7f..122c89ce0680 100644
--- a/nixos/modules/services/misc/folding-at-home.nix
+++ b/nixos/modules/services/misc/folding-at-home.nix
@@ -42,7 +42,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = fahUser;
         uid = config.ids.uids.foldingathome;
         description = "Folding@Home user";
diff --git a/nixos/modules/services/misc/gammu-smsd.nix b/nixos/modules/services/misc/gammu-smsd.nix
index 2d406b634437..3057d7fd1a09 100644
--- a/nixos/modules/services/misc/gammu-smsd.nix
+++ b/nixos/modules/services/misc/gammu-smsd.nix
@@ -200,7 +200,7 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers.${cfg.user} = {
+    users.users.${cfg.user} = {
       description = "gammu-smsd user";
       uid = config.ids.uids.gammu-smsd;
       extraGroups = [ "${cfg.device.group}" ];
diff --git a/nixos/modules/services/misc/geoip-updater.nix b/nixos/modules/services/misc/geoip-updater.nix
index e0b9df96f8e8..baf0a8d73d19 100644
--- a/nixos/modules/services/misc/geoip-updater.nix
+++ b/nixos/modules/services/misc/geoip-updater.nix
@@ -251,7 +251,7 @@ in
       }
     ];
 
-    users.extraUsers.geoip = {
+    users.users.geoip = {
       group = "root";
       description = "GeoIP database updater";
       uid = config.ids.uids.geoip;
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index 63e976ae566c..5d664728e0b5 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -4,6 +4,7 @@ with lib;
 
 let
   cfg = config.services.gitea;
+  gitea = cfg.package;
   pg = config.services.postgresql;
   usePostgresql = cfg.database.type == "postgres";
   configFile = pkgs.writeText "app.ini" ''
@@ -57,6 +58,13 @@ in
         description = "Enable Gitea Service.";
       };
 
+      package = mkOption {
+        default = pkgs.gitea;
+        type = types.package;
+        defaultText = "pkgs.gitea";
+        description = "gitea derivation to use";
+      };
+
       useWizard = mkOption {
         default = false;
         type = types.bool;
@@ -156,6 +164,30 @@ in
         };
       };
 
+      dump = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            Enable a timer that runs gitea dump to generate backup-files of the
+            current gitea database and repositories.
+          '';
+        };
+
+        interval = mkOption {
+          type = types.str;
+          default = "04:31";
+          example = "hourly";
+          description = ''
+            Run a gitea dump at this interval. Runs by default at 04:31 every day.
+
+            The format is described in
+            <citerefentry><refentrytitle>systemd.time</refentrytitle>
+            <manvolnum>7</manvolnum></citerefentry>.
+          '';
+        };
+      };
+
       appName = mkOption {
         type = types.str;
         default = "gitea: Gitea Service";
@@ -203,7 +235,7 @@ in
 
       staticRootPath = mkOption {
         type = types.str;
-        default = "${pkgs.gitea.data}";
+        default = "${gitea.data}";
         example = "/var/lib/gitea/data";
         description = "Upper level of template and static files path.";
       };
@@ -223,7 +255,7 @@ in
       description = "gitea";
       after = [ "network.target" "postgresql.service" ];
       wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.gitea.bin ];
+      path = [ gitea.bin ];
 
       preStart = let
         runConfig = "${cfg.stateDir}/custom/conf/app.ini";
@@ -250,10 +282,10 @@ in
 
         mkdir -p ${cfg.repositoryRoot}
         # update all hooks' binary paths
-        HOOKS=$(find ${cfg.repositoryRoot} -mindepth 4 -maxdepth 4 -type f -wholename "*git/hooks/*")
+        HOOKS=$(find ${cfg.repositoryRoot} -mindepth 4 -maxdepth 5 -type f -wholename "*git/hooks/*")
         if [ "$HOOKS" ]
         then
-          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${pkgs.gitea.bin}/bin/gitea,g' $HOOKS
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea.bin}/bin/gitea,g' $HOOKS
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/env,${pkgs.coreutils}/bin/env,g' $HOOKS
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/bash,${pkgs.bash}/bin/bash,g' $HOOKS
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/perl,${pkgs.perl}/bin/perl,g' $HOOKS
@@ -261,7 +293,12 @@ in
         if [ ! -d ${cfg.stateDir}/conf/locale ]
         then
           mkdir -p ${cfg.stateDir}/conf
-          cp -r ${pkgs.gitea.out}/locale ${cfg.stateDir}/conf/locale
+          cp -r ${gitea.out}/locale ${cfg.stateDir}/conf/locale
+        fi
+        # update command option in authorized_keys
+        if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
+        then
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea.bin}/bin/gitea,g' ${cfg.stateDir}/.ssh/authorized_keys
         fi
       '' + optionalString (usePostgresql && cfg.database.createDatabase) ''
         if ! test -e "${cfg.stateDir}/db-created"; then
@@ -288,7 +325,7 @@ in
         User = cfg.user;
         WorkingDirectory = cfg.stateDir;
         PermissionsStartOnly = true;
-        ExecStart = "${pkgs.gitea.bin}/bin/gitea web";
+        ExecStart = "${gitea.bin}/bin/gitea web";
         Restart = "always";
       };
 
@@ -300,7 +337,7 @@ in
     };
 
     users = mkIf (cfg.user == "gitea") {
-      extraUsers.gitea = {
+      users.gitea = {
         description = "Gitea Service";
         home = cfg.stateDir;
         createHome = true;
@@ -318,5 +355,32 @@ in
         name = "gitea-database-password";
         text = cfg.database.password;
       })));
+
+    systemd.services.gitea-dump = mkIf cfg.dump.enable {
+       description = "gitea dump";
+       after = [ "gitea.service" ];
+       wantedBy = [ "default.target" ];
+       path = [ gitea.bin ];
+
+       environment = {
+         USER = cfg.user;
+         HOME = cfg.stateDir;
+         GITEA_WORK_DIR = cfg.stateDir;
+       };
+
+       serviceConfig = {
+         Type = "oneshot";
+         User = cfg.user;
+         ExecStart = "${gitea.bin}/bin/gitea dump";
+         WorkingDirectory = cfg.stateDir;
+       };
+    };
+
+    systemd.timers.gitea-dump = mkIf cfg.dump.enable {
+      description = "Update timer for gitea-dump";
+      partOf = [ "gitea-dump.service" ];
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = cfg.dump.interval;
+    };
   };
 }
diff --git a/nixos/modules/services/misc/gitit.nix b/nixos/modules/services/misc/gitit.nix
index 94a98e0335df..0025d96bd37b 100644
--- a/nixos/modules/services/misc/gitit.nix
+++ b/nixos/modules/services/misc/gitit.nix
@@ -645,15 +645,15 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.gitit = {
-      group = config.users.extraGroups.gitit.name;
+    users.users.gitit = {
+      group = config.users.groups.gitit.name;
       description = "Gitit user";
       home = homeDir;
       createHome = true;
       uid = config.ids.uids.gitit;
     };
 
-    users.extraGroups.gitit.gid = config.ids.gids.gitit;
+    users.groups.gitit.gid = config.ids.gids.gitit;
 
     systemd.services.gitit = let
       uid = toString config.ids.uids.gitit;
@@ -715,8 +715,8 @@ NAMED
       '';
 
       serviceConfig = {
-        User = config.users.extraUsers.gitit.name;
-        Group = config.users.extraGroups.gitit.name;
+        User = config.users.users.gitit.name;
+        Group = config.users.groups.gitit.name;
         ExecStart = with cfg; gititSh haskellPackages extraPackages;
       };
     };
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index be13fed860bd..b09f0408e102 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -129,6 +129,7 @@ let
         };
       };
       extra = {};
+      uploads.storage_path = cfg.statePath;
     };
   };
 
@@ -443,7 +444,7 @@ in {
     # Use postfix to send out mails.
     services.postfix.enable = mkDefault true;
 
-    users.extraUsers = [
+    users.users = [
       { name = cfg.user;
         group = cfg.group;
         home = "${cfg.statePath}/home";
@@ -452,7 +453,7 @@ in {
       }
     ];
 
-    users.extraGroups = [
+    users.groups = [
       { name = cfg.group;
         gid = config.ids.gids.gitlab;
       }
@@ -565,13 +566,9 @@ in {
 
         ${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret
 
-        # The uploads directory is hardcoded somewhere deep in rails. It is
-        # symlinked in the gitlab package to /run/gitlab/uploads to make it
-        # configurable
         mkdir -p /run/gitlab
-        mkdir -p ${cfg.statePath}/{log,uploads}
+        mkdir -p ${cfg.statePath}/log
         ln -sf ${cfg.statePath}/log /run/gitlab/log
-        ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
         ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
         ln -sf $GITLAB_SHELL_CONFIG_PATH /run/gitlab/shell-config.yml
         chown -R ${cfg.user}:${cfg.group} /run/gitlab
@@ -587,6 +584,8 @@ in {
           ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
         ''}
         ln -sf ${cfg.statePath}/config /run/gitlab/config
+        rm ${cfg.statePath}/lib
+        ln -sf ${pkgs.gitlab}/share/gitlab/lib ${cfg.statePath}/lib
         cp ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
 
         # JSON is a subset of YAML
@@ -638,10 +637,6 @@ in {
         chmod -R ug+rwX,o-rwx ${cfg.statePath}/repositories
         chmod -R ug-s ${cfg.statePath}/repositories
         find ${cfg.statePath}/repositories -type d -print0 | xargs -0 chmod g+s
-        chmod 770 ${cfg.statePath}/uploads
-        chown -R ${cfg.user} ${cfg.statePath}/uploads
-        find ${cfg.statePath}/uploads -type f -exec chmod 0644 {} \;
-        find ${cfg.statePath}/uploads -type d -not -path ${cfg.statePath}/uploads -exec chmod 0770 {} \;
       '';
 
       serviceConfig = {
diff --git a/nixos/modules/services/misc/gitolite.nix b/nixos/modules/services/misc/gitolite.nix
index 6e60316d000c..b9c2a966e6f5 100644
--- a/nixos/modules/services/misc/gitolite.nix
+++ b/nixos/modules/services/misc/gitolite.nix
@@ -140,7 +140,7 @@ in
       push( @{$RC{ENABLE}}, 'git-annex-shell ua');
     '';
 
-    users.extraUsers.${cfg.user} = {
+    users.users.${cfg.user} = {
       description     = "Gitolite user";
       home            = cfg.dataDir;
       createHome      = true;
@@ -148,7 +148,7 @@ in
       group           = cfg.group;
       useDefaultShell = true;
     };
-    users.extraGroups."${cfg.group}".gid = config.ids.gids.gitolite;
+    users.groups."${cfg.group}".gid = config.ids.gids.gitolite;
 
     systemd.services."gitolite-init" = {
       description = "Gitolite initialization";
diff --git a/nixos/modules/services/misc/gogs.nix b/nixos/modules/services/misc/gogs.nix
index ba744d37e71c..ee99967c261b 100644
--- a/nixos/modules/services/misc/gogs.nix
+++ b/nixos/modules/services/misc/gogs.nix
@@ -254,7 +254,7 @@ in
     };
 
     users = mkIf (cfg.user == "gogs") {
-      extraUsers.gogs = {
+      users.gogs = {
         description = "Go Git Service";
         uid = config.ids.uids.gogs;
         group = "gogs";
@@ -262,7 +262,7 @@ in
         createHome = true;
         shell = pkgs.bash;
       };
-      extraGroups.gogs.gid = config.ids.gids.gogs;
+      groups.gogs.gid = config.ids.gids.gogs;
     };
 
     warnings = optional (cfg.database.password != "")
diff --git a/nixos/modules/services/misc/gollum.nix b/nixos/modules/services/misc/gollum.nix
index 0888221ab62f..d1823bc6d4df 100644
--- a/nixos/modules/services/misc/gollum.nix
+++ b/nixos/modules/services/misc/gollum.nix
@@ -93,8 +93,8 @@ in
       '';
 
       serviceConfig = {
-        User = config.users.extraUsers.gollum.name;
-        Group = config.users.extraGroups.gollum.name;
+        User = config.users.users.gollum.name;
+        Group = config.users.groups.gollum.name;
         PermissionsStartOnly = true;
         ExecStart = ''
           ${pkgs.gollum}/bin/gollum \
diff --git a/nixos/modules/services/misc/gpsd.nix b/nixos/modules/services/misc/gpsd.nix
index a4a4c7b5d937..3bfcb636a3c6 100644
--- a/nixos/modules/services/misc/gpsd.nix
+++ b/nixos/modules/services/misc/gpsd.nix
@@ -53,6 +53,14 @@ in
         '';
       };
 
+      nowait = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          don't wait for client connects to poll GPS
+        '';
+      };
+
       port = mkOption {
         type = types.int;
         default = 2947;
@@ -78,14 +86,14 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "gpsd";
         inherit uid;
         description = "gpsd daemon user";
         home = "/var/empty";
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "gpsd";
         inherit gid;
       };
@@ -99,7 +107,8 @@ in
         ExecStart = ''
           ${pkgs.gpsd}/sbin/gpsd -D "${toString cfg.debugLevel}"  \
             -S "${toString cfg.port}"                             \
-            ${if cfg.readonly then "-b" else ""}                  \
+            ${optionalString cfg.readonly "-b"}                   \
+            ${optionalString cfg.nowait "-n"}                     \
             "${cfg.device}"
         '';
       };
diff --git a/nixos/modules/services/misc/home-assistant.nix b/nixos/modules/services/misc/home-assistant.nix
index 1dc7b44ee37b..0756e81612ac 100644
--- a/nixos/modules/services/misc/home-assistant.nix
+++ b/nixos/modules/services/misc/home-assistant.nix
@@ -37,7 +37,7 @@ let
   # List of components used in config
   extraComponents = filter useComponent availableComponents;
 
-  package = if cfg.autoExtraComponents
+  package = if (cfg.autoExtraComponents && cfg.config != null)
     then (cfg.package.override { inherit extraComponents; })
     else cfg.package;
 
@@ -110,7 +110,9 @@ in {
       '';
       description = ''
         Home Assistant package to use.
-        Override <literal>extraPackages</literal> in order to add additional dependencies.
+        Override <literal>extraPackages</literal> or <literal>extraComponents</literal> in order to add additional dependencies.
+        If you specify <option>config</option> and do not set <option>autoExtraComponents</option>
+        to <literal>false</literal>, overriding <literal>extraComponents</literal> will have no effect.
       '';
     };
 
@@ -128,9 +130,17 @@ in {
         you might need to specify it in <literal>extraPackages</literal>.
       '';
     };
+
+    openFirewall = mkOption {
+      default = false;
+      type = types.bool;
+      description = "Whether to open the firewall for the specified port.";
+    };
   };
 
   config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
     systemd.services.home-assistant = {
       description = "Home Assistant";
       after = [ "network.target" ];
@@ -162,13 +172,13 @@ in {
       after = wants;
     };
 
-    users.extraUsers.hass = {
+    users.users.hass = {
       home = cfg.configDir;
       createHome = true;
       group = "hass";
       uid = config.ids.uids.hass;
     };
 
-    users.extraGroups.hass.gid = config.ids.gids.hass;
+    users.groups.hass.gid = config.ids.gids.hass;
   };
 }
diff --git a/nixos/modules/services/misc/ihaskell.nix b/nixos/modules/services/misc/ihaskell.nix
index 6da9cc8c47e6..11597706d0d1 100644
--- a/nixos/modules/services/misc/ihaskell.nix
+++ b/nixos/modules/services/misc/ihaskell.nix
@@ -38,23 +38,23 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.ihaskell = {
-      group = config.users.extraGroups.ihaskell.name;
+    users.users.ihaskell = {
+      group = config.users.groups.ihaskell.name;
       description = "IHaskell user";
       home = "/var/lib/ihaskell";
       createHome = true;
       uid = config.ids.uids.ihaskell;
     };
 
-    users.extraGroups.ihaskell.gid = config.ids.gids.ihaskell;
+    users.groups.ihaskell.gid = config.ids.gids.ihaskell;
 
     systemd.services.ihaskell = {
       description = "IHaskell notebook instance";
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       serviceConfig = {
-        User = config.users.extraUsers.ihaskell.name;
-        Group = config.users.extraGroups.ihaskell.name;
+        User = config.users.users.ihaskell.name;
+        Group = config.users.groups.ihaskell.name;
         ExecStart = "${pkgs.runtimeShell} -c \"cd $HOME;${ihaskell}/bin/ihaskell-notebook\"";
       };
     };
diff --git a/nixos/modules/services/misc/jackett.nix b/nixos/modules/services/misc/jackett.nix
index 87a41ee70b54..db72d36f2ac7 100644
--- a/nixos/modules/services/misc/jackett.nix
+++ b/nixos/modules/services/misc/jackett.nix
@@ -36,12 +36,12 @@ in
       };
     };
 
-    users.extraUsers.jackett = {
+    users.users.jackett = {
       uid = config.ids.uids.jackett;
       home = "/var/lib/jackett";
       group = "jackett";
     };
-    users.extraGroups.jackett.gid = config.ids.gids.jackett;
+    users.groups.jackett.gid = config.ids.gids.jackett;
 
   };
 }
diff --git a/nixos/modules/services/misc/mathics.nix b/nixos/modules/services/misc/mathics.nix
index 50715858881a..c588a30d76cd 100644
--- a/nixos/modules/services/misc/mathics.nix
+++ b/nixos/modules/services/misc/mathics.nix
@@ -26,23 +26,23 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.mathics = {
-      group = config.users.extraGroups.mathics.name;
+    users.users.mathics = {
+      group = config.users.groups.mathics.name;
       description = "Mathics user";
       home = "/var/lib/mathics";
       createHome = true;
       uid = config.ids.uids.mathics;
     };
 
-    users.extraGroups.mathics.gid = config.ids.gids.mathics;
+    users.groups.mathics.gid = config.ids.gids.mathics;
 
     systemd.services.mathics = {
       description = "Mathics notebook server";
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       serviceConfig = {
-        User = config.users.extraUsers.mathics.name;
-        Group = config.users.extraGroups.mathics.name;
+        User = config.users.users.mathics.name;
+        Group = config.users.groups.mathics.name;
         ExecStart = concatStringsSep " " [
           "${pkgs.mathics}/bin/mathicsserver"
           "--port" (toString cfg.port)
diff --git a/nixos/modules/services/misc/matrix-synapse.nix b/nixos/modules/services/misc/matrix-synapse.nix
index f7441988b272..3e3de9114708 100644
--- a/nixos/modules/services/misc/matrix-synapse.nix
+++ b/nixos/modules/services/misc/matrix-synapse.nix
@@ -635,7 +635,7 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers = [
+    users.users = [
       { name = "matrix-synapse";
         group = "matrix-synapse";
         home = cfg.dataDir;
@@ -644,7 +644,7 @@ in {
         uid = config.ids.uids.matrix-synapse;
       } ];
 
-    users.extraGroups = [
+    users.groups = [
       { name = "matrix-synapse";
         gid = config.ids.gids.matrix-synapse;
       } ];
diff --git a/nixos/modules/services/misc/mediatomb.nix b/nixos/modules/services/misc/mediatomb.nix
index 40ec2831ff09..e8e9c0946d7f 100644
--- a/nixos/modules/services/misc/mediatomb.nix
+++ b/nixos/modules/services/misc/mediatomb.nix
@@ -4,7 +4,6 @@ with lib;
 
 let
 
-  uid = config.ids.uids.mediatomb;
   gid = config.ids.gids.mediatomb;
   cfg = config.services.mediatomb;
 
@@ -267,12 +266,12 @@ in {
       serviceConfig.User = "${cfg.user}";
     };
 
-    users.extraGroups = optionalAttrs (cfg.group == "mediatomb") (singleton {
+    users.groups = optionalAttrs (cfg.group == "mediatomb") (singleton {
       name = "mediatomb";
       gid = gid;
     });
 
-    users.extraUsers = optionalAttrs (cfg.user == "mediatomb") (singleton {
+    users.users = optionalAttrs (cfg.user == "mediatomb") (singleton {
       name = "mediatomb";
       isSystemUser = true;
       group = cfg.group;
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
index 277ae9e292ce..e64df8dc7d2d 100644
--- a/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -33,7 +33,7 @@ let
       sh = pkgs.runtimeShell;
       binshDeps = pkgs.writeReferencesToFile sh;
     in
-      pkgs.runCommand "nix.conf" { extraOptions = cfg.extraOptions; } ''
+      pkgs.runCommand "nix.conf" { extraOptions = cfg.extraOptions; } (''
         ${optionalString (!isNix20) ''
           extraPaths=$(for i in $(cat ${binshDeps}); do if test -d $i; then echo $i; fi; done)
         ''}
@@ -62,7 +62,11 @@ let
         ''}
         $extraOptions
         END
-      '';
+      '' + optionalString cfg.checkConfig ''
+        echo "Checking that Nix can read nix.conf..."
+        ln -s $out ./nix.conf
+        NIX_CONF_DIR=$PWD ${cfg.package}/bin/nix show-config >/dev/null
+      '');
 
 in
 
@@ -126,11 +130,13 @@ in
         default = false;
         description = "
           If set, Nix will perform builds in a sandboxed environment that it
-          will set up automatically for each build.  This prevents
-          impurities in builds by disallowing access to dependencies
-          outside of the Nix store. This isn't enabled by default for
-          performance. It doesn't affect derivation hashes, so changing
-          this option will not trigger a rebuild of packages.
+          will set up automatically for each build. This prevents impurities
+          in builds by disallowing access to dependencies outside of the Nix
+          store by using network and mount namespaces in a chroot environment.
+          This isn't enabled by default for possible performance impacts due to
+          the initial setup time of a sandbox for each build. It doesn't affect
+          derivation hashes, so changing this option will not trigger a rebuild
+          of packages.
         ";
       };
 
@@ -340,7 +346,7 @@ in
         default =
           [
             "$HOME/.nix-defexpr/channels"
-            "nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos/nixpkgs"
+            "nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos"
             "nixos-config=/etc/nixos/configuration.nix"
             "/nix/var/nix/profiles/per-user/root/channels"
           ];
@@ -351,6 +357,13 @@ in
         '';
       };
 
+      checkConfig = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          If enabled (the default), checks that Nix can parse the generated nix.conf.
+        '';
+      };
     };
 
   };
@@ -437,7 +450,7 @@ in
 
     nix.nrBuildUsers = mkDefault (lib.max 32 cfg.maxJobs);
 
-    users.extraUsers = nixbldUsers;
+    users.users = nixbldUsers;
 
     services.xserver.displayManager.hiddenUsers = map ({ name, ... }: name) nixbldUsers;
 
diff --git a/nixos/modules/services/misc/nix-ssh-serve.nix b/nixos/modules/services/misc/nix-ssh-serve.nix
index 5bd9cf9086f1..87ed7f0a61b9 100644
--- a/nixos/modules/services/misc/nix-ssh-serve.nix
+++ b/nixos/modules/services/misc/nix-ssh-serve.nix
@@ -36,7 +36,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.nix-ssh = {
+    users.users.nix-ssh = {
       description = "Nix SSH store user";
       uid = config.ids.uids.nix-ssh;
       useDefaultShell = true;
@@ -55,7 +55,7 @@ in {
       Match All
     '';
 
-    users.extraUsers.nix-ssh.openssh.authorizedKeys.keys = cfg.keys;
+    users.users.nix-ssh.openssh.authorizedKeys.keys = cfg.keys;
 
   };
 }
diff --git a/nixos/modules/services/misc/nixos-manual.nix b/nixos/modules/services/misc/nixos-manual.nix
index 4bd1c20edf71..3916c3052e8b 100644
--- a/nixos/modules/services/misc/nixos-manual.nix
+++ b/nixos/modules/services/misc/nixos-manual.nix
@@ -99,7 +99,7 @@ in
 
     services.nixosManual.browser = mkOption {
       type = types.path;
-      default = "${pkgs.w3m-nox}/bin/w3m";
+      default = "${pkgs.w3m-nographics}/bin/w3m";
       description = ''
         Browser used to show the manual.
       '';
diff --git a/nixos/modules/services/misc/nzbget.nix b/nixos/modules/services/misc/nzbget.nix
index a186d57ceba2..a472b6c7157c 100644
--- a/nixos/modules/services/misc/nzbget.nix
+++ b/nixos/modules/services/misc/nzbget.nix
@@ -4,7 +4,7 @@ with lib;
 
 let
   cfg = config.services.nzbget;
-  nzbget = pkgs.nzbget; in {
+in {
   options = {
     services.nzbget = {
       enable = mkEnableOption "NZBGet";
@@ -86,14 +86,14 @@ let
       };
     };
 
-    users.extraUsers = mkIf (cfg.user == "nzbget") {
+    users.users = mkIf (cfg.user == "nzbget") {
       nzbget = {
         group = cfg.group;
         uid = config.ids.uids.nzbget;
       };
     };
 
-    users.extraGroups = mkIf (cfg.group == "nzbget") {
+    users.groups = mkIf (cfg.group == "nzbget") {
       nzbget = {
         gid = config.ids.gids.nzbget;
       };
diff --git a/nixos/modules/services/misc/octoprint.nix b/nixos/modules/services/misc/octoprint.nix
index 6883993a893b..baa7c3ade52e 100644
--- a/nixos/modules/services/misc/octoprint.nix
+++ b/nixos/modules/services/misc/octoprint.nix
@@ -86,13 +86,13 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = optionalAttrs (cfg.user == "octoprint") (singleton
+    users.users = optionalAttrs (cfg.user == "octoprint") (singleton
       { name = "octoprint";
         group = cfg.group;
         uid = config.ids.uids.octoprint;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "octoprint") (singleton
+    users.groups = optionalAttrs (cfg.group == "octoprint") (singleton
       { name = "octoprint";
         gid = config.ids.gids.octoprint;
       });
diff --git a/nixos/modules/services/misc/osrm.nix b/nixos/modules/services/misc/osrm.nix
index 7ec8b15906fc..f89f37ccd9df 100644
--- a/nixos/modules/services/misc/osrm.nix
+++ b/nixos/modules/services/misc/osrm.nix
@@ -69,7 +69,7 @@ in
       wantedBy = [ "multi-user.target" ];
 
       serviceConfig = {
-        User = config.users.extraUsers.osrm.name;
+        User = config.users.users.osrm.name;
         ExecStart = ''
           ${pkgs.osrm-backend}/bin/osrm-routed \
             --ip ${cfg.address} \
diff --git a/nixos/modules/services/misc/plex.nix b/nixos/modules/services/misc/plex.nix
index 46221ace3084..8fe5879c2764 100644
--- a/nixos/modules/services/misc/plex.nix
+++ b/nixos/modules/services/misc/plex.nix
@@ -4,7 +4,6 @@ with lib;
 
 let
   cfg = config.services.plex;
-  plex = pkgs.plex;
 in
 {
   options = {
@@ -157,14 +156,14 @@ in
       allowedUDPPorts = [ 1900 5353 32410 32412 32413 32414 ];
     };
 
-    users.extraUsers = mkIf (cfg.user == "plex") {
+    users.users = mkIf (cfg.user == "plex") {
       plex = {
         group = cfg.group;
         uid = config.ids.uids.plex;
       };
     };
 
-    users.extraGroups = mkIf (cfg.group == "plex") {
+    users.groups = mkIf (cfg.group == "plex") {
       plex = {
         gid = config.ids.gids.plex;
       };
diff --git a/nixos/modules/services/misc/plexpy.nix b/nixos/modules/services/misc/plexpy.nix
index df9f12581247..2a589fdfb27f 100644
--- a/nixos/modules/services/misc/plexpy.nix
+++ b/nixos/modules/services/misc/plexpy.nix
@@ -74,7 +74,7 @@ in
       };
     };
 
-    users.extraUsers = mkIf (cfg.user == "plexpy") {
+    users.users = mkIf (cfg.user == "plexpy") {
       plexpy = { group = cfg.group; uid = config.ids.uids.plexpy; };
     };
   };
diff --git a/nixos/modules/services/misc/pykms.nix b/nixos/modules/services/misc/pykms.nix
index 897e856e2a2d..a11296e1bd02 100644
--- a/nixos/modules/services/misc/pykms.nix
+++ b/nixos/modules/services/misc/pykms.nix
@@ -73,7 +73,7 @@ in {
     };
 
     users = {
-      extraUsers.pykms = {
+      users.pykms = {
         name = "pykms";
         group = "pykms";
         home  = home;
@@ -82,7 +82,7 @@ in {
         description = "PyKMS daemon user";
       };
 
-      extraGroups.pykms = {
+      groups.pykms = {
         gid = config.ids.gids.pykms;
       };
     };
diff --git a/nixos/modules/services/misc/radarr.nix b/nixos/modules/services/misc/radarr.nix
index 245ad9f9a6df..7738eacc6ae9 100644
--- a/nixos/modules/services/misc/radarr.nix
+++ b/nixos/modules/services/misc/radarr.nix
@@ -36,12 +36,12 @@ in
       };
     };
 
-    users.extraUsers.radarr = {
+    users.users.radarr = {
       uid = config.ids.uids.radarr;
       home = "/var/lib/radarr";
       group = "radarr";
     };
-    users.extraGroups.radarr.gid = config.ids.gids.radarr;
+    users.groups.radarr.gid = config.ids.gids.radarr;
 
   };
 }
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
index 3997b3f0dca0..9a9424449f80 100644
--- a/nixos/modules/services/misc/redmine.nix
+++ b/nixos/modules/services/misc/redmine.nix
@@ -128,13 +128,13 @@ in {
       }
     ];
 
-    users.extraUsers = [
+    users.users = [
       { name = "redmine";
         group = "redmine";
         uid = config.ids.uids.redmine;
       } ];
 
-    users.extraGroups = [
+    users.groups = [
       { name = "redmine";
         gid = config.ids.gids.redmine;
       } ];
diff --git a/nixos/modules/services/misc/ripple-data-api.nix b/nixos/modules/services/misc/ripple-data-api.nix
index dbca56b13335..042b496d35ee 100644
--- a/nixos/modules/services/misc/ripple-data-api.nix
+++ b/nixos/modules/services/misc/ripple-data-api.nix
@@ -185,7 +185,7 @@ in {
       ];
     };
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "ripple-data-api";
         description = "Ripple data api user";
         uid = config.ids.uids.ripple-data-api;
diff --git a/nixos/modules/services/misc/rippled.nix b/nixos/modules/services/misc/rippled.nix
index 8bcf35a8ad38..9d9a0ba44da5 100644
--- a/nixos/modules/services/misc/rippled.nix
+++ b/nixos/modules/services/misc/rippled.nix
@@ -406,7 +406,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "rippled";
         description = "Ripple server user";
         uid = config.ids.uids.rippled;
diff --git a/nixos/modules/services/misc/serviio.nix b/nixos/modules/services/misc/serviio.nix
index a6612e9c6adb..8808f2d21931 100644
--- a/nixos/modules/services/misc/serviio.nix
+++ b/nixos/modules/services/misc/serviio.nix
@@ -63,7 +63,7 @@ in {
       };
     };
 
-    users.extraUsers = [
+    users.users = [
       { 
         name = "serviio";
         group = "serviio";
@@ -74,7 +74,7 @@ in {
       }
     ];
 
-    users.extraGroups = [
+    users.groups = [
       { name = "serviio";} 
     ];
 
diff --git a/nixos/modules/services/misc/siproxd.nix b/nixos/modules/services/misc/siproxd.nix
index 9e8fb6c228f2..dcaf73aca448 100644
--- a/nixos/modules/services/misc/siproxd.nix
+++ b/nixos/modules/services/misc/siproxd.nix
@@ -161,7 +161,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "siproxyd";
       uid = config.ids.uids.siproxd;
     };
diff --git a/nixos/modules/services/misc/sonarr.nix b/nixos/modules/services/misc/sonarr.nix
index ecde2c33bfa9..edba4e6c23eb 100644
--- a/nixos/modules/services/misc/sonarr.nix
+++ b/nixos/modules/services/misc/sonarr.nix
@@ -36,12 +36,12 @@ in
       };
     };
 
-    users.extraUsers.sonarr = {
+    users.users.sonarr = {
       uid = config.ids.uids.sonarr;
       home = "/var/lib/sonarr";
       group = "sonarr";
     };
-    users.extraGroups.sonarr.gid = config.ids.gids.sonarr;
+    users.groups.sonarr.gid = config.ids.gids.sonarr;
 
   };
 }
diff --git a/nixos/modules/services/misc/subsonic.nix b/nixos/modules/services/misc/subsonic.nix
index c2efd53d413a..1612b197f35f 100644
--- a/nixos/modules/services/misc/subsonic.nix
+++ b/nixos/modules/services/misc/subsonic.nix
@@ -130,7 +130,7 @@ let cfg = config.services.subsonic; in {
                 ! [ -e "${cfg.home}" ] &&
                 [ -d "$oldHome" ] &&
                 [ $(${pkgs.coreutils}/bin/stat -c %u "$oldHome") -eq \
-                    ${toString config.users.extraUsers.subsonic.uid} ]; then
+                    ${toString config.users.users.subsonic.uid} ]; then
             logger Moving "$oldHome" to "${cfg.home}"
             ${pkgs.coreutils}/bin/mv -T "$oldHome" "${cfg.home}"
         fi
@@ -152,7 +152,7 @@ let cfg = config.services.subsonic; in {
       };
     };
 
-    users.extraUsers.subsonic = {
+    users.users.subsonic = {
       description = "Subsonic daemon user";
       home = cfg.home;
       createHome = true;
@@ -160,6 +160,6 @@ let cfg = config.services.subsonic; in {
       uid = config.ids.uids.subsonic;
     };
 
-    users.extraGroups.subsonic.gid = config.ids.gids.subsonic;
+    users.groups.subsonic.gid = config.ids.gids.subsonic;
   };
 }
diff --git a/nixos/modules/services/misc/taskserver/default.nix b/nixos/modules/services/misc/taskserver/default.nix
index ba9f52f1904b..7daf12f91714 100644
--- a/nixos/modules/services/misc/taskserver/default.nix
+++ b/nixos/modules/services/misc/taskserver/default.nix
@@ -7,16 +7,6 @@ let
 
   taskd = "${pkgs.taskserver}/bin/taskd";
 
-  mkVal = val:
-    if val == true then "true"
-    else if val == false then "false"
-    else if isList val then concatStringsSep ", " val
-    else toString val;
-
-  mkConfLine = key: val: let
-    result = "${key} = ${mkVal val}";
-  in optionalString (val != null && val != []) result;
-
   mkManualPkiOption = desc: mkOption {
     type = types.nullOr types.path;
     default = null;
diff --git a/nixos/modules/services/misc/uhub.nix b/nixos/modules/services/misc/uhub.nix
index 15071202b9c2..005951b9231e 100644
--- a/nixos/modules/services/misc/uhub.nix
+++ b/nixos/modules/services/misc/uhub.nix
@@ -161,11 +161,11 @@ in
   config = mkIf cfg.enable {
 
     users = {
-      extraUsers = singleton {
+      users = singleton {
         name = "uhub";
         uid = config.ids.uids.uhub;
       };
-      extraGroups = singleton {
+      groups = singleton {
         name = "uhub";
         gid = config.ids.gids.uhub;
       };
diff --git a/nixos/modules/services/misc/xmr-stak.nix b/nixos/modules/services/misc/xmr-stak.nix
index 57f439365471..a87878c31e0d 100644
--- a/nixos/modules/services/misc/xmr-stak.nix
+++ b/nixos/modules/services/misc/xmr-stak.nix
@@ -10,9 +10,6 @@ let
     inherit (cfg) openclSupport cudaSupport;
   };
 
-  xmrConfArg = optionalString (cfg.configText != "") ("-c " +
-    pkgs.writeText "xmr-stak-config.txt" cfg.configText);
-
 in
 
 {
@@ -29,22 +26,34 @@ in
         description = "List of parameters to pass to xmr-stak.";
       };
 
-      configText = mkOption {
-        type = types.lines;
-        default = "";
-        example = ''
-          "currency" : "monero",
-          "pool_list" :
-            [ { "pool_address" : "pool.supportxmr.com:5555",
-                "wallet_address" : "<long-hash>",
-                "pool_password" : "minername",
-                "pool_weight" : 1,
-              },
-            ],
+      configFiles = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = literalExample ''
+          {
+            "config.txt" = '''
+              "verbose_level" : 4,
+              "h_print_time" : 60,
+              "tls_secure_algo" : true,
+            ''';
+            "pools.txt" = '''
+              "currency" : "monero7",
+              "pool_list" :
+              [ { "pool_address" : "pool.supportxmr.com:443",
+                  "wallet_address" : "my-wallet-address",
+                  "rig_id" : "",
+                  "pool_password" : "nixos",
+                  "use_nicehash" : false,
+                  "use_tls" : true,
+                  "tls_fingerprint" : "",
+                  "pool_weight" : 23
+                },
+              ],
+            ''';
+          }
         '';
         description = ''
-          Verbatim xmr-stak config.txt. If empty, the <literal>-c</literal>
-          parameter will not be added to the xmr-stak command.
+          Content of config files like config.txt, pools.txt or cpu.txt.
         '';
       };
     };
@@ -58,10 +67,13 @@ in
       environment = mkIf cfg.cudaSupport {
         LD_LIBRARY_PATH = "${pkgs.linuxPackages_latest.nvidia_x11}/lib";
       };
-      script = ''
-        exec ${pkg}/bin/xmr-stak ${xmrConfArg} ${concatStringsSep " " cfg.extraArgs}
-      '';
+
+      preStart = concatStrings (flip mapAttrsToList cfg.configFiles (fn: content: ''
+        ln -sf '${pkgs.writeText "xmr-stak-${fn}" content}' '${fn}'
+      ''));
+
       serviceConfig = let rootRequired = cfg.openclSupport || cfg.cudaSupport; in {
+        ExecStart = "${pkg}/bin/xmr-stak ${concatStringsSep " " cfg.extraArgs}";
         # xmr-stak generates cpu and/or gpu configuration files
         WorkingDirectory = "/tmp";
         PrivateTmp = true;
@@ -70,4 +82,12 @@ in
       };
     };
   };
+
+  imports = [
+    (mkRemovedOptionModule ["services" "xmr-stak" "configText"] ''
+      This option was removed in favour of `services.xmr-stak.configFiles`
+      because the new config file `pools.txt` was introduced. You are
+      now able to define all other config files like cpu.txt or amd.txt.
+    '')
+  ];
 }
diff --git a/nixos/modules/services/misc/zookeeper.nix b/nixos/modules/services/misc/zookeeper.nix
index 91539592511c..cb7cc97d5a5c 100644
--- a/nixos/modules/services/misc/zookeeper.nix
+++ b/nixos/modules/services/misc/zookeeper.nix
@@ -144,7 +144,7 @@ in {
       '';
     };
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "zookeeper";
       uid = config.ids.uids.zookeeper;
       description = "Zookeeper daemon user";
diff --git a/nixos/modules/services/monitoring/bosun.nix b/nixos/modules/services/monitoring/bosun.nix
index 496838a131ba..8bf741adb6e3 100644
--- a/nixos/modules/services/monitoring/bosun.nix
+++ b/nixos/modules/services/monitoring/bosun.nix
@@ -153,13 +153,13 @@ in {
       };
     };
 
-    users.extraUsers.bosun = {
+    users.users.bosun = {
       description = "bosun user";
       group = "bosun";
       uid = config.ids.uids.bosun;
     };
 
-    users.extraGroups.bosun.gid = config.ids.gids.bosun;
+    users.groups.bosun.gid = config.ids.gids.bosun;
 
   };
 
diff --git a/nixos/modules/services/monitoring/collectd.nix b/nixos/modules/services/monitoring/collectd.nix
index dfbac3446e03..6606980cdad8 100644
--- a/nixos/modules/services/monitoring/collectd.nix
+++ b/nixos/modules/services/monitoring/collectd.nix
@@ -97,7 +97,7 @@ in {
       '';
     };
 
-    users.extraUsers = optional (cfg.user == "collectd") {
+    users.users = optional (cfg.user == "collectd") {
       name = "collectd";
     };
   };
diff --git a/nixos/modules/services/monitoring/dd-agent/dd-agent.nix b/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
index beaa2c01b298..cf65b6c28cf2 100644
--- a/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
+++ b/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
@@ -57,7 +57,7 @@ let
     instances:
       - use_mount: no
   '';
-  
+
   networkConfig = pkgs.writeText "network.yaml" ''
     init_config:
 
@@ -68,13 +68,13 @@ let
           - lo
           - lo0
   '';
-  
+
   postgresqlConfig = pkgs.writeText "postgres.yaml" cfg.postgresqlConfig;
   nginxConfig = pkgs.writeText "nginx.yaml" cfg.nginxConfig;
   mongoConfig = pkgs.writeText "mongo.yaml" cfg.mongoConfig;
   jmxConfig = pkgs.writeText "jmx.yaml" cfg.jmxConfig;
   processConfig = pkgs.writeText "process.yaml" cfg.processConfig;
-  
+
   etcfiles =
     let
       defaultConfd = import ./dd-agent-defaults.nix;
@@ -150,7 +150,7 @@ in {
       default = null;
       type = types.uniq (types.nullOr types.string);
     };
-    
+
     mongoConfig = mkOption {
       description = "MongoDB integration configuration";
       default = null;
@@ -166,7 +166,7 @@ in {
     processConfig = mkOption {
       description = ''
         Process integration configuration
- 
+
         See http://docs.datadoghq.com/integrations/process/
       '';
       default = null;
@@ -178,7 +178,7 @@ in {
   config = mkIf cfg.enable {
     environment.systemPackages = [ pkgs."dd-agent" pkgs.sysstat pkgs.procps ];
 
-    users.extraUsers.datadog = {
+    users.users.datadog = {
       description = "Datadog Agent User";
       uid = config.ids.uids.datadog;
       group = "datadog";
@@ -186,11 +186,11 @@ in {
       createHome = true;
     };
 
-    users.extraGroups.datadog.gid = config.ids.gids.datadog;
+    users.groups.datadog.gid = config.ids.gids.datadog;
 
     systemd.services.dd-agent = {
       description = "Datadog agent monitor";
-      path = [ pkgs."dd-agent" pkgs.python pkgs.sysstat pkgs.procps ];
+      path = [ pkgs."dd-agent" pkgs.python pkgs.sysstat pkgs.procps pkgs.gohai ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         ExecStart = "${pkgs.dd-agent}/bin/dd-agent foreground";
diff --git a/nixos/modules/services/monitoring/fusion-inventory.nix b/nixos/modules/services/monitoring/fusion-inventory.nix
index c3b869e00880..9c976c65ea49 100644
--- a/nixos/modules/services/monitoring/fusion-inventory.nix
+++ b/nixos/modules/services/monitoring/fusion-inventory.nix
@@ -46,7 +46,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "fusion-inventory";
       description = "FusionInventory user";
     };
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index eceb91525db4..3e801f9b838d 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -265,7 +265,7 @@ in {
       '';
     };
 
-    users.extraUsers.grafana = {
+    users.users.grafana = {
       uid = config.ids.uids.grafana;
       description = "Grafana user";
       home = cfg.dataDir;
diff --git a/nixos/modules/services/monitoring/graphite.nix b/nixos/modules/services/monitoring/graphite.nix
index 4b1ad34b3a4e..7ad26c137e51 100644
--- a/nixos/modules/services/monitoring/graphite.nix
+++ b/nixos/modules/services/monitoring/graphite.nix
@@ -638,13 +638,13 @@ in {
       cfg.web.enable || cfg.api.enable ||
       cfg.seyren.enable || cfg.pager.enable || cfg.beacon.enable
      ) {
-      users.extraUsers = singleton {
+      users.users = singleton {
         name = "graphite";
         uid = config.ids.uids.graphite;
         description = "Graphite daemon user";
         home = dataDir;
       };
-      users.extraGroups.graphite.gid = config.ids.gids.graphite;
+      users.groups.graphite.gid = config.ids.gids.graphite;
     })
   ];
 }
diff --git a/nixos/modules/services/monitoring/heapster.nix b/nixos/modules/services/monitoring/heapster.nix
index deee64aa41ea..fbdff2eb5dbe 100644
--- a/nixos/modules/services/monitoring/heapster.nix
+++ b/nixos/modules/services/monitoring/heapster.nix
@@ -49,7 +49,7 @@ in {
       };
     };
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "heapster";
       uid = config.ids.uids.heapster;
       description = "Heapster user";
diff --git a/nixos/modules/services/monitoring/munin.nix b/nixos/modules/services/monitoring/munin.nix
index 358ffd431dd4..ff9604c7dbcd 100644
--- a/nixos/modules/services/monitoring/munin.nix
+++ b/nixos/modules/services/monitoring/munin.nix
@@ -150,14 +150,14 @@ in
 
     environment.systemPackages = [ pkgs.munin ];
 
-    users.extraUsers = [{
+    users.users = [{
       name = "munin";
       description = "Munin monitoring user";
       group = "munin";
       uid = config.ids.uids.munin;
     }];
 
-    users.extraGroups = [{
+    users.groups = [{
       name = "munin";
       gid = config.ids.gids.munin;
     }];
diff --git a/nixos/modules/services/monitoring/nagios.nix b/nixos/modules/services/monitoring/nagios.nix
index 4914c5db97d2..3e1d727b416e 100644
--- a/nixos/modules/services/monitoring/nagios.nix
+++ b/nixos/modules/services/monitoring/nagios.nix
@@ -143,7 +143,7 @@ in
 
 
   config = mkIf cfg.enable {
-    users.extraUsers.nagios = {
+    users.users.nagios = {
       description = "Nagios user ";
       uid         = config.ids.uids.nagios;
       home        = nagiosState;
diff --git a/nixos/modules/services/monitoring/netdata.nix b/nixos/modules/services/monitoring/netdata.nix
index d23b329eeb25..eefddf5a206b 100644
--- a/nixos/modules/services/monitoring/netdata.nix
+++ b/nixos/modules/services/monitoring/netdata.nix
@@ -100,11 +100,11 @@ in {
     };
 
 
-    users.extraUsers = optional (cfg.user == defaultUser) {
+    users.users = optional (cfg.user == defaultUser) {
       name = defaultUser;
     };
 
-    users.extraGroups = optional (cfg.group == defaultUser) {
+    users.groups = optional (cfg.group == defaultUser) {
       name = defaultUser;
     };
 
diff --git a/nixos/modules/services/monitoring/prometheus/default.nix b/nixos/modules/services/monitoring/prometheus/default.nix
index 80122e69d167..5dda763bd56b 100644
--- a/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixos/modules/services/monitoring/prometheus/default.nix
@@ -471,8 +471,8 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraGroups.${promGroup}.gid = config.ids.gids.prometheus;
-    users.extraUsers.${promUser} = {
+    users.groups.${promGroup}.gid = config.ids.gids.prometheus;
+    users.users.${promUser} = {
       description = "Prometheus daemon user";
       uid = config.ids.uids.prometheus;
       group = promGroup;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 780448d8bad8..8d2c303a69e8 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -20,6 +20,7 @@ let
   exporterOpts = {
     blackbox  = import ./exporters/blackbox.nix  { inherit config lib pkgs; };
     collectd  = import ./exporters/collectd.nix  { inherit config lib pkgs; };
+    dnsmasq   = import ./exporters/dnsmasq.nix   { inherit config lib pkgs; };
     dovecot   = import ./exporters/dovecot.nix   { inherit config lib pkgs; };
     fritzbox  = import ./exporters/fritzbox.nix  { inherit config lib pkgs; };
     json      = import ./exporters/json.nix      { inherit config lib pkgs; };
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix b/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
new file mode 100644
index 000000000000..b1fab85109af
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.dnsmasq;
+in
+{
+  port = 9153;
+  extraOpts = {
+    dnsmasqListenAddress = mkOption {
+      type = types.str;
+      default = "localhost:53";
+      description = ''
+        Address on which dnsmasq listens.
+      '';
+    };
+    leasesPath = mkOption {
+      type = types.path;
+      default = "/var/lib/misc/dnsmasq.leases";
+      example = "/var/lib/dnsmasq/dnsmasq.leases";
+      description = ''
+        Path to the <literal>dnsmasq.leases</literal> file.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-dnsmasq-exporter}/bin/dnsmasq_exporter \
+          --listen ${cfg.listenAddress}:${toString cfg.port} \
+          --dnsmasq ${cfg.dnsmasqListenAddress} \
+          --leases_path ${cfg.leasesPath} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/node.nix b/nixos/modules/services/monitoring/prometheus/exporters/node.nix
index c85f5f9cfb2d..ee7bf39f199a 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/node.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/node.nix
@@ -27,6 +27,7 @@ in
   };
   serviceOpts = {
     serviceConfig = {
+      RuntimeDirectory = "prometheus-node-exporter";
       ExecStart = ''
         ${pkgs.prometheus-node-exporter}/bin/node_exporter \
           ${concatMapStringsSep " " (x: "--collector." + x) cfg.enabledCollectors} \
diff --git a/nixos/modules/services/monitoring/riemann-dash.nix b/nixos/modules/services/monitoring/riemann-dash.nix
index 523f74cb72b9..7eb4d888b0cc 100644
--- a/nixos/modules/services/monitoring/riemann-dash.nix
+++ b/nixos/modules/services/monitoring/riemann-dash.nix
@@ -51,9 +51,9 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraGroups.riemanndash.gid = config.ids.gids.riemanndash;
+    users.groups.riemanndash.gid = config.ids.gids.riemanndash;
 
-    users.extraUsers.riemanndash = {
+    users.users.riemanndash = {
       description = "riemann-dash daemon user";
       uid = config.ids.uids.riemanndash;
       group = "riemanndash";
diff --git a/nixos/modules/services/monitoring/riemann-tools.nix b/nixos/modules/services/monitoring/riemann-tools.nix
index de858813a762..4e8832dadc5e 100644
--- a/nixos/modules/services/monitoring/riemann-tools.nix
+++ b/nixos/modules/services/monitoring/riemann-tools.nix
@@ -40,9 +40,9 @@ in {
 
   config = mkIf cfg.enableHealth {
 
-    users.extraGroups.riemanntools.gid = config.ids.gids.riemanntools;
+    users.groups.riemanntools.gid = config.ids.gids.riemanntools;
 
-    users.extraUsers.riemanntools = {
+    users.users.riemanntools = {
       description = "riemann-tools daemon user";
       uid = config.ids.uids.riemanntools;
       group = "riemanntools";
diff --git a/nixos/modules/services/monitoring/riemann.nix b/nixos/modules/services/monitoring/riemann.nix
index ac5d0134a80d..237de53456f9 100644
--- a/nixos/modules/services/monitoring/riemann.nix
+++ b/nixos/modules/services/monitoring/riemann.nix
@@ -69,9 +69,9 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraGroups.riemann.gid = config.ids.gids.riemann;
+    users.groups.riemann.gid = config.ids.gids.riemann;
 
-    users.extraUsers.riemann = {
+    users.users.riemann = {
       description = "riemann daemon user";
       uid = config.ids.uids.riemann;
       group = "riemann";
diff --git a/nixos/modules/services/monitoring/scollector.nix b/nixos/modules/services/monitoring/scollector.nix
index 2684482c6184..6ecb21d628de 100644
--- a/nixos/modules/services/monitoring/scollector.nix
+++ b/nixos/modules/services/monitoring/scollector.nix
@@ -123,13 +123,13 @@ in {
       };
     };
 
-    users.extraUsers.scollector = {
+    users.users.scollector = {
       description = "scollector user";
       group = "scollector";
       uid = config.ids.uids.scollector;
     };
 
-    users.extraGroups.scollector.gid = config.ids.gids.scollector;
+    users.groups.scollector.gid = config.ids.gids.scollector;
 
   };
 
diff --git a/nixos/modules/services/monitoring/statsd.nix b/nixos/modules/services/monitoring/statsd.nix
index 7b0e9981cbb1..ea155821ecc9 100644
--- a/nixos/modules/services/monitoring/statsd.nix
+++ b/nixos/modules/services/monitoring/statsd.nix
@@ -125,7 +125,7 @@ in
       message = "Only builtin backends (graphite, console, repeater) or backends enumerated in `pkgs.nodePackages` are allowed!";
     }) cfg.backends;
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "statsd";
       uid = config.ids.uids.statsd;
       description = "Statsd daemon user";
diff --git a/nixos/modules/services/monitoring/telegraf.nix b/nixos/modules/services/monitoring/telegraf.nix
index 49dc9d8143e6..6bfcd7143e1c 100644
--- a/nixos/modules/services/monitoring/telegraf.nix
+++ b/nixos/modules/services/monitoring/telegraf.nix
@@ -62,7 +62,7 @@ in {
       };
     };
 
-    users.extraUsers = [{
+    users.users = [{
       name = "telegraf";
       uid = config.ids.uids.telegraf;
       description = "telegraf daemon user";
diff --git a/nixos/modules/services/monitoring/ups.nix b/nixos/modules/services/monitoring/ups.nix
index 29dc68f90cc9..bc755612fd9b 100644
--- a/nixos/modules/services/monitoring/ups.nix
+++ b/nixos/modules/services/monitoring/ups.nix
@@ -259,7 +259,7 @@ in
 
 
 /*
-    users.extraUsers = [
+    users.users = [
       { name = "nut";
         uid = 84;
         home = "/var/lib/nut";
@@ -269,7 +269,7 @@ in
       }
     ];
 
-    users.extraGroups = [
+    users.groups = [
       { name = "nut";
         gid = 84;
       }
diff --git a/nixos/modules/services/monitoring/vnstat.nix b/nixos/modules/services/monitoring/vnstat.nix
index ca56e4a7b958..cb2f8c07edb9 100644
--- a/nixos/modules/services/monitoring/vnstat.nix
+++ b/nixos/modules/services/monitoring/vnstat.nix
@@ -16,7 +16,7 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers.vnstatd = {
+    users.users.vnstatd = {
       isSystemUser = true;
       description = "vnstat daemon user";
       home = "/var/lib/vnstat";
diff --git a/nixos/modules/services/monitoring/zabbix-agent.nix b/nixos/modules/services/monitoring/zabbix-agent.nix
index 88a63b4bf161..87857225e7d7 100644
--- a/nixos/modules/services/monitoring/zabbix-agent.nix
+++ b/nixos/modules/services/monitoring/zabbix-agent.nix
@@ -68,7 +68,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = mkIf (!config.services.zabbixServer.enable) (singleton
+    users.users = mkIf (!config.services.zabbixServer.enable) (singleton
       { name = "zabbix";
         uid = config.ids.uids.zabbix;
         description = "Zabbix daemon user";
diff --git a/nixos/modules/services/monitoring/zabbix-server.nix b/nixos/modules/services/monitoring/zabbix-server.nix
index acd1279ddf47..f62d55457ed4 100644
--- a/nixos/modules/services/monitoring/zabbix-server.nix
+++ b/nixos/modules/services/monitoring/zabbix-server.nix
@@ -85,7 +85,7 @@ in
 
     services.postgresql.enable = useLocalPostgres;
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "zabbix";
         uid = config.ids.uids.zabbix;
         description = "Zabbix daemon user";
diff --git a/nixos/modules/services/network-filesystems/beegfs.nix b/nixos/modules/services/network-filesystems/beegfs.nix
index a6a2ec6cbc36..182fabf6405f 100644
--- a/nixos/modules/services/network-filesystems/beegfs.nix
+++ b/nixos/modules/services/network-filesystems/beegfs.nix
@@ -31,7 +31,7 @@ let
     connPortShift = ${toString cfg.connPortShift}
     storeAllowFirstRunInit = false
 
-    ${cfg.mgmtd.extraConfig}
+    ${cfg.meta.extraConfig}
   '';
 
   configStorage = name: cfg: pkgs.writeText "storage-${name}.conf" ''
diff --git a/nixos/modules/services/network-filesystems/ceph.nix b/nixos/modules/services/network-filesystems/ceph.nix
index 5de8ae79a246..4e3bc839d400 100644
--- a/nixos/modules/services/network-filesystems/ceph.nix
+++ b/nixos/modules/services/network-filesystems/ceph.nix
@@ -332,13 +332,13 @@ in
       in
         generators.toINI {} totalConfig;
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "ceph";
       uid = config.ids.uids.ceph;
       description = "Ceph daemon user";
     };
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = "ceph";
       gid = config.ids.gids.ceph;
     };
diff --git a/nixos/modules/services/network-filesystems/davfs2.nix b/nixos/modules/services/network-filesystems/davfs2.nix
index 6b2a770100c5..c16e12378d75 100644
--- a/nixos/modules/services/network-filesystems/davfs2.nix
+++ b/nixos/modules/services/network-filesystems/davfs2.nix
@@ -57,12 +57,12 @@ in
     environment.systemPackages = [ pkgs.davfs2 ];
     environment.etc."davfs2/davfs2.conf".source = cfgFile;
 
-    users.extraGroups = optionalAttrs (cfg.davGroup == "davfs2") (singleton {
+    users.groups = optionalAttrs (cfg.davGroup == "davfs2") (singleton {
       name = "davfs2";
       gid = config.ids.gids.davfs2;
     });
 
-    users.extraUsers = optionalAttrs (cfg.davUser == "davfs2") (singleton {
+    users.users = optionalAttrs (cfg.davUser == "davfs2") (singleton {
       name = "davfs2";
       createHome = false;
       group = cfg.davGroup;
diff --git a/nixos/modules/services/network-filesystems/ipfs.nix b/nixos/modules/services/network-filesystems/ipfs.nix
index e2122ddb8ede..21b664e5b2f9 100644
--- a/nixos/modules/services/network-filesystems/ipfs.nix
+++ b/nixos/modules/services/network-filesystems/ipfs.nix
@@ -186,6 +186,14 @@ in {
         default = [];
       };
 
+      localDiscovery = mkOption {
+        type = types.bool;
+        description = ''Whether to enable local discovery for the ipfs daemon.
+          This will allow ipfs to scan ports on your local network. Some hosting services will ban you if you do this.
+        '';
+        default = true;
+      };
+
       serviceFdlimit = mkOption {
         type = types.nullOr types.int;
         default = null;
@@ -204,7 +212,7 @@ in {
       user_allow_other
     ''; };
 
-    users.extraUsers = mkIf (cfg.user == "ipfs") {
+    users.users = mkIf (cfg.user == "ipfs") {
       ipfs = {
         group = cfg.group;
         home = cfg.dataDir;
@@ -214,7 +222,7 @@ in {
       };
     };
 
-    users.extraGroups = mkIf (cfg.group == "ipfs") {
+    users.groups = mkIf (cfg.group == "ipfs") {
       ipfs.gid = config.ids.gids.ipfs;
     };
 
@@ -232,7 +240,13 @@ in {
       '';
       script = ''
         if [[ ! -f ${cfg.dataDir}/config ]]; then
-          ipfs init ${optionalString cfg.emptyRepo "-e"}
+          ipfs init ${optionalString cfg.emptyRepo "-e"} \
+            ${optionalString (! cfg.localDiscovery) "--profile=server"}
+        else
+          ${if cfg.localDiscovery
+            then "ipfs config profile apply local-discovery"
+            else "ipfs config profile apply server"
+          }
         fi
       '';
 
diff --git a/nixos/modules/services/network-filesystems/openafs/client.nix b/nixos/modules/services/network-filesystems/openafs/client.nix
index 3826fe3edfd0..52c0966e05bc 100644
--- a/nixos/modules/services/network-filesystems/openafs/client.nix
+++ b/nixos/modules/services/network-filesystems/openafs/client.nix
@@ -1,6 +1,7 @@
-{ config, pkgs, lib, ... }:
+{ config, lib, pkgs, ... }:
 
-with import ./lib.nix { inherit lib; };
+# openafsMod, openafsBin, mkCellServDB
+with import ./lib.nix { inherit config lib pkgs; };
 
 let
   inherit (lib) getBin mkOption mkIf optionalString singleton types;
@@ -8,8 +9,8 @@ let
   cfg = config.services.openafsClient;
 
   cellServDB = pkgs.fetchurl {
-    url = http://dl.central.org/dl/cellservdb/CellServDB.2017-03-14;
-    sha256 = "1197z6c5xrijgf66rhaymnm5cvyg2yiy1i20y4ah4mrzmjx0m7sc";
+    url = http://dl.central.org/dl/cellservdb/CellServDB.2018-05-14;
+    sha256 = "1wmjn6mmyy2r8p10nlbdzs4nrqxy8a9pjyrdciy5nmppg4053rk2";
   };
 
   clientServDB = pkgs.writeText "client-cellServDB-${cfg.cellName}" (mkCellServDB cfg.cellName cfg.cellServDB);
@@ -21,8 +22,6 @@ let
     echo "${cfg.mountPoint}:${cfg.cache.directory}:${toString cfg.cache.blocks}" > $out/cacheinfo
   '';
 
-  openafsMod = config.boot.kernelPackages.openafs;
-  openafsBin = lib.getBin pkgs.openafs;
 in
 {
   ###### interface
@@ -147,6 +146,19 @@ in
         '';
       };
 
+      packages = {
+        module = mkOption {
+          default = config.boot.kernelPackages.openafs;
+          type = types.package;
+          description = "OpenAFS kernel module package. MUST match the userland package!";
+        };
+        programs = mkOption {
+          default = getBin pkgs.openafs;
+          type = types.package;
+          description = "OpenAFS programs package. MUST match the kernel module package!";
+        };
+      };
+
       sparse = mkOption {
         default = true;
         type = types.bool;
@@ -180,7 +192,7 @@ in
       }
     ];
 
-    environment.systemPackages = [ pkgs.openafs ];
+    environment.systemPackages = [ openafsBin ];
 
     environment.etc = {
       clientCellServDB = {
diff --git a/nixos/modules/services/network-filesystems/openafs/lib.nix b/nixos/modules/services/network-filesystems/openafs/lib.nix
index ecfc72d2eaf9..255740ac65ef 100644
--- a/nixos/modules/services/network-filesystems/openafs/lib.nix
+++ b/nixos/modules/services/network-filesystems/openafs/lib.nix
@@ -1,14 +1,15 @@
-{ lib, ...}:
+{ config, lib, pkgs, ...}:
 
 let
-  inherit (lib) concatStringsSep mkOption types;
+  inherit (lib) concatStringsSep getBin mkOption types;
 
 in rec {
 
   mkCellServDB = cellName: db: ''
     >${cellName}
   '' + (concatStringsSep "\n" (map (dbm: if (dbm.ip != "" && dbm.dnsname != "") then dbm.ip + " #" + dbm.dnsname else "")
-                                   db));
+                                   db))
+     + "\n";
 
   # CellServDB configuration type
   cellServDBConfig = {
@@ -25,4 +26,8 @@ in rec {
       description = "DNS full-qualified domain name of a database server";
     };
   };
+
+  openafsMod = config.services.openafsClient.packages.module;
+  openafsBin = config.services.openafsClient.packages.programs;
+  openafsSrv = config.services.openafsServer.package;
 }
diff --git a/nixos/modules/services/network-filesystems/openafs/server.nix b/nixos/modules/services/network-filesystems/openafs/server.nix
index 429eb945ac9e..aa8640fd240e 100644
--- a/nixos/modules/services/network-filesystems/openafs/server.nix
+++ b/nixos/modules/services/network-filesystems/openafs/server.nix
@@ -1,6 +1,7 @@
-{ config, pkgs, lib, ... }:
+{ config, lib, pkgs, ... }:
 
-with import ./lib.nix { inherit lib; };
+# openafsBin, openafsSrv, mkCellServDB
+with import ./lib.nix { inherit config lib pkgs; };
 
 let
   inherit (lib) concatStringsSep intersperse mapAttrsToList mkForce mkIf mkMerge mkOption optionalString types;
@@ -11,21 +12,21 @@ let
     checkbintime 3 0 5 0 0
   '' + (optionalString cfg.roles.database.enable ''
     bnode simple vlserver 1
-    parm ${openafsBin}/libexec/openafs/vlserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.vlserverArgs}
+    parm ${openafsSrv}/libexec/openafs/vlserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.vlserverArgs}
     end
     bnode simple ptserver 1
-    parm ${openafsBin}/libexec/openafs/ptserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.ptserverArgs}
+    parm ${openafsSrv}/libexec/openafs/ptserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.ptserverArgs}
     end
   '') + (optionalString cfg.roles.fileserver.enable ''
     bnode dafs dafs 1
-    parm ${openafsBin}/libexec/openafs/dafileserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.fileserverArgs}
-    parm ${openafsBin}/libexec/openafs/davolserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.volserverArgs}
-    parm ${openafsBin}/libexec/openafs/salvageserver ${cfg.roles.fileserver.salvageserverArgs}
-    parm ${openafsBin}/libexec/openafs/dasalvager ${cfg.roles.fileserver.salvagerArgs}
+    parm ${openafsSrv}/libexec/openafs/dafileserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.fileserverArgs}
+    parm ${openafsSrv}/libexec/openafs/davolserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.volserverArgs}
+    parm ${openafsSrv}/libexec/openafs/salvageserver ${cfg.roles.fileserver.salvageserverArgs}
+    parm ${openafsSrv}/libexec/openafs/dasalvager ${cfg.roles.fileserver.salvagerArgs}
     end
   '') + (optionalString (cfg.roles.database.enable && cfg.roles.backup.enable) ''
     bnode simple buserver 1
-    parm ${openafsBin}/libexec/openafs/buserver ${cfg.roles.backup.buserverArgs} ${optionalString (cfg.roles.backup.cellServDB != []) "-cellservdb /etc/openafs/backup/"}
+    parm ${openafsSrv}/libexec/openafs/buserver ${cfg.roles.backup.buserverArgs} ${optionalString (cfg.roles.backup.cellServDB != []) "-cellservdb /etc/openafs/backup/"}
     end
   ''));
 
@@ -39,8 +40,6 @@ let
 
   udpSizeStr = toString cfg.udpPacketSize;
 
-  openafsBin = lib.getBin pkgs.openafs;
-
 in {
 
   options = {
@@ -79,6 +78,12 @@ in {
         description = "Definition of all cell-local database server machines.";
       };
 
+      package = mkOption {
+        default = pkgs.openafs.server or pkgs.openafs;
+        type = types.package;
+        description = "OpenAFS package for the server binaries";
+      };
+
       roles = {
         fileserver = {
           enable = mkOption {
@@ -213,7 +218,7 @@ in {
       }
     ];
 
-    environment.systemPackages = [ pkgs.openafs ];
+    environment.systemPackages = [ openafsBin ];
 
     environment.etc = {
       bosConfig = {
@@ -244,7 +249,10 @@ in {
         after = [ "syslog.target" "network.target" ];
         wantedBy = [ "multi-user.target" ];
         restartIfChanged = false;
-        unitConfig.ConditionPathExists = [ "/etc/openafs/server/rxkad.keytab" ];
+        unitConfig.ConditionPathExists = [
+          "|/etc/openafs/server/rxkad.keytab"
+          "|/etc/openafs/server/KeyFileExt"
+        ];
         preStart = ''
           mkdir -m 0755 -p /var/openafs
           ${optionalString (netInfo != null) "cp ${netInfo} /var/openafs/netInfo"}
diff --git a/nixos/modules/services/network-filesystems/tahoe.nix b/nixos/modules/services/network-filesystems/tahoe.nix
index 80b34c48f1d2..534862a3c9e4 100644
--- a/nixos/modules/services/network-filesystems/tahoe.nix
+++ b/nixos/modules/services/network-filesystems/tahoe.nix
@@ -255,7 +255,7 @@ in
               cp /etc/tahoe-lafs/introducer-"${node}".cfg ${lib.escapeShellArg nodedir}/tahoe.cfg
             '';
           });
-        users.extraUsers = flip mapAttrs' cfg.introducers (node: _:
+        users.users = flip mapAttrs' cfg.introducers (node: _:
           nameValuePair "tahoe.introducer-${node}" {
             description = "Tahoe node user for introducer ${node}";
             isSystemUser = true;
@@ -355,7 +355,7 @@ in
               cp /etc/tahoe-lafs/${lib.escapeShellArg node}.cfg ${lib.escapeShellArg nodedir}/tahoe.cfg
             '';
           });
-        users.extraUsers = flip mapAttrs' cfg.nodes (node: _:
+        users.users = flip mapAttrs' cfg.nodes (node: _:
           nameValuePair "tahoe.${node}" {
             description = "Tahoe node user for node ${node}";
             isSystemUser = true;
diff --git a/nixos/modules/services/network-filesystems/xtreemfs.nix b/nixos/modules/services/network-filesystems/xtreemfs.nix
index 95d7641e8b53..c93e201da56c 100644
--- a/nixos/modules/services/network-filesystems/xtreemfs.nix
+++ b/nixos/modules/services/network-filesystems/xtreemfs.nix
@@ -432,14 +432,14 @@ in
 
     environment.systemPackages = [ xtreemfs ];
 
-    users.extraUsers.xtreemfs =
+    users.users.xtreemfs =
       { uid = config.ids.uids.xtreemfs;
         description = "XtreemFS user";
         createHome = true;
         home = home;
       };
 
-    users.extraGroups.xtreemfs =
+    users.groups.xtreemfs =
       { gid = config.ids.gids.xtreemfs;
       };
 
diff --git a/nixos/modules/services/network-filesystems/yandex-disk.nix b/nixos/modules/services/network-filesystems/yandex-disk.nix
index 44b0edf62018..e93f45b49867 100644
--- a/nixos/modules/services/network-filesystems/yandex-disk.nix
+++ b/nixos/modules/services/network-filesystems/yandex-disk.nix
@@ -73,7 +73,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = mkIf (cfg.user == null) [ {
+    users.users = mkIf (cfg.user == null) [ {
       name = u;
       uid = config.ids.uids.yandexdisk;
       group = "nogroup";
diff --git a/nixos/modules/services/networking/amuled.nix b/nixos/modules/services/networking/amuled.nix
index 9898f164c5cf..57f02542eafd 100644
--- a/nixos/modules/services/networking/amuled.nix
+++ b/nixos/modules/services/networking/amuled.nix
@@ -45,14 +45,14 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = mkIf (cfg.user == null) [
+    users.users = mkIf (cfg.user == null) [
       { name = "amule";
         description = "AMule daemon";
         group = "amule";
         uid = config.ids.uids.amule;
       } ];
 
-    users.extraGroups = mkIf (cfg.user == null) [
+    users.groups = mkIf (cfg.user == null) [
       { name = "amule";
         gid = config.ids.gids.amule;
       } ];
diff --git a/nixos/modules/services/networking/aria2.nix b/nixos/modules/services/networking/aria2.nix
index df9c92db2e54..98eb00861016 100644
--- a/nixos/modules/services/networking/aria2.nix
+++ b/nixos/modules/services/networking/aria2.nix
@@ -92,7 +92,7 @@ in
       allowedTCPPorts = [ config.services.aria2.rpcListenPort ];
     };
 
-    users.extraUsers.aria2 = {
+    users.users.aria2 = {
       group = "aria2";
       uid = config.ids.uids.aria2;
       description = "aria2 user";
@@ -100,7 +100,7 @@ in
       createHome = false;
     };
 
-    users.extraGroups.aria2.gid = config.ids.gids.aria2;
+    users.groups.aria2.gid = config.ids.gids.aria2;
 
     systemd.services.aria2 = {
       description = "aria2 Service";
diff --git a/nixos/modules/services/networking/asterisk.nix b/nixos/modules/services/networking/asterisk.nix
index 514204db33fa..b8ec2b25a227 100644
--- a/nixos/modules/services/networking/asterisk.nix
+++ b/nixos/modules/services/networking/asterisk.nix
@@ -211,7 +211,7 @@ in
 
     environment.etc.asterisk.source = asteriskEtc;
 
-    users.extraUsers.asterisk =
+    users.users.asterisk =
       { name = asteriskUser;
         group = asteriskGroup;
         uid = config.ids.uids.asterisk;
@@ -219,7 +219,7 @@ in
         home = varlibdir;
       };
 
-    users.extraGroups.asterisk =
+    users.groups.asterisk =
       { name = asteriskGroup;
         gid = config.ids.gids.asterisk;
       };
diff --git a/nixos/modules/services/networking/avahi-daemon.nix b/nixos/modules/services/networking/avahi-daemon.nix
index 9ccdacb20e91..81e11db10409 100644
--- a/nixos/modules/services/networking/avahi-daemon.nix
+++ b/nixos/modules/services/networking/avahi-daemon.nix
@@ -187,14 +187,14 @@ in
 
     services.avahi.hostName = mkDefault config.networking.hostName;
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "avahi";
         uid = config.ids.uids.avahi;
         description = "`avahi-daemon' privilege separation user";
         home = "/var/empty";
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "avahi";
         gid = config.ids.gids.avahi;
       };
diff --git a/nixos/modules/services/networking/bind.nix b/nixos/modules/services/networking/bind.nix
index 763283dfe7a2..abcd1ef6ff5d 100644
--- a/nixos/modules/services/networking/bind.nix
+++ b/nixos/modules/services/networking/bind.nix
@@ -27,6 +27,7 @@ let
         forwarders { ${concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
         directory "/var/run/named";
         pid-file "/var/run/named/named.pid";
+        ${cfg.extraOptions}
       };
 
       ${cfg.extraConfig}
@@ -141,6 +142,15 @@ in
         ";
       };
 
+      extraOptions = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra lines to be added verbatim to the options section of the
+          generated named configuration file.
+        '';
+      };
+
       configFile = mkOption {
         type = types.path;
         default = confFile;
@@ -160,7 +170,7 @@ in
 
   config = mkIf config.services.bind.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = bindUser;
         uid = config.ids.uids.bind;
         description = "BIND daemon user";
diff --git a/nixos/modules/services/networking/bird.nix b/nixos/modules/services/networking/bird.nix
index c25bd0fdc541..9f08cc0a79ed 100644
--- a/nixos/modules/services/networking/bird.nix
+++ b/nixos/modules/services/networking/bird.nix
@@ -60,11 +60,11 @@ let
           };
         };
         users = {
-          extraUsers.${variant} = {
+          users.${variant} = {
             description = "BIRD Internet Routing Daemon user";
             group = variant;
           };
-          extraGroups.${variant} = {};
+          groups.${variant} = {};
         };
       };
     };
diff --git a/nixos/modules/services/networking/bitlbee.nix b/nixos/modules/services/networking/bitlbee.nix
index bd26804788f3..392a8d5c2e7c 100644
--- a/nixos/modules/services/networking/bitlbee.nix
+++ b/nixos/modules/services/networking/bitlbee.nix
@@ -149,7 +149,7 @@ in
 
   config = mkIf config.services.bitlbee.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "bitlbee";
         uid = bitlbeeUid;
         description = "BitlBee user";
@@ -157,7 +157,7 @@ in
         createHome = true;
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "bitlbee";
         gid = config.ids.gids.bitlbee;
       };
diff --git a/nixos/modules/services/networking/btsync.nix b/nixos/modules/services/networking/btsync.nix
index 6e479a5860ac..33e85ef58e6e 100644
--- a/nixos/modules/services/networking/btsync.nix
+++ b/nixos/modules/services/networking/btsync.nix
@@ -284,7 +284,7 @@ in
 
     services.btsync.package = mkOptionDefault pkgs.bittorrentSync14;
 
-    users.extraUsers.btsync = {
+    users.users.btsync = {
       description     = "Bittorrent Sync Service user";
       home            = cfg.storagePath;
       createHome      = true;
@@ -292,7 +292,7 @@ in
       group           = "btsync";
     };
 
-    users.extraGroups = [
+    users.groups = [
       { name = "btsync";
       }];
 
diff --git a/nixos/modules/services/networking/charybdis.nix b/nixos/modules/services/networking/charybdis.nix
index c354ec61fe23..6d57faa9ac2b 100644
--- a/nixos/modules/services/networking/charybdis.nix
+++ b/nixos/modules/services/networking/charybdis.nix
@@ -71,14 +71,14 @@ in
 
   config = mkIf cfg.enable (lib.mkMerge [
     {
-      users.extraUsers = singleton {
+      users.users = singleton {
         name = cfg.user;
         description = "Charybdis IRC daemon user";
         uid = config.ids.uids.ircd;
         group = cfg.group;
       };
 
-      users.extraGroups = singleton {
+      users.groups = singleton {
         name = cfg.group;
         gid = config.ids.gids.ircd;
       };
diff --git a/nixos/modules/services/networking/chrony.nix b/nixos/modules/services/networking/chrony.nix
index 9bf266b38054..6a89002b42de 100644
--- a/nixos/modules/services/networking/chrony.nix
+++ b/nixos/modules/services/networking/chrony.nix
@@ -96,12 +96,12 @@ in
     # Make chronyc available in the system path
     environment.systemPackages = [ pkgs.chrony ];
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "chrony";
         gid = config.ids.gids.chrony;
       };
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "chrony";
         uid = config.ids.uids.chrony;
         group = "chrony";
@@ -109,7 +109,7 @@ in
         home = stateDir;
       };
 
-    systemd.services.timesyncd.enable = mkForce false;
+    services.timesyncd.enable = mkForce false;
 
     systemd.services.chronyd =
       { description = "chrony NTP daemon";
diff --git a/nixos/modules/services/networking/cjdns.nix b/nixos/modules/services/networking/cjdns.nix
index 12c2677c3368..39b62bdc7094 100644
--- a/nixos/modules/services/networking/cjdns.nix
+++ b/nixos/modules/services/networking/cjdns.nix
@@ -260,7 +260,8 @@ in
         RestartSec = 1;
         CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW CAP_SETUID";
         ProtectSystem = true;
-        MemoryDenyWriteExecute = true;
+        # Doesn't work on i686, causing service to fail
+        MemoryDenyWriteExecute = !pkgs.stdenv.isi686;
         ProtectHome = true;
         PrivateTmp = true;
       };
diff --git a/nixos/modules/services/networking/cntlm.nix b/nixos/modules/services/networking/cntlm.nix
index 3978a1969ce9..4e4e3104c3a8 100644
--- a/nixos/modules/services/networking/cntlm.nix
+++ b/nixos/modules/services/networking/cntlm.nix
@@ -117,7 +117,7 @@ in
       };
     };
 
-    users.extraUsers.cntlm = {
+    users.users.cntlm = {
       name = "cntlm";
       description = "cntlm system-wide daemon";
       isSystemUser = true;
diff --git a/nixos/modules/services/networking/consul.nix b/nixos/modules/services/networking/consul.nix
index 6333970cb338..ab3f81037681 100644
--- a/nixos/modules/services/networking/consul.nix
+++ b/nixos/modules/services/networking/consul.nix
@@ -155,7 +155,7 @@ in
   config = mkIf cfg.enable (
     mkMerge [{
 
-      users.extraUsers."consul" = {
+      users.users."consul" = {
         description = "Consul agent daemon user";
         uid = config.ids.uids.consul;
         # The shell is needed for health checks
diff --git a/nixos/modules/services/networking/coturn.nix b/nixos/modules/services/networking/coturn.nix
index b3c64490d97e..c430ce5af92a 100644
--- a/nixos/modules/services/networking/coturn.nix
+++ b/nixos/modules/services/networking/coturn.nix
@@ -294,12 +294,12 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers = [
+    users.users = [
       { name = "turnserver";
         uid = config.ids.uids.turnserver;
         description = "coturn TURN server user";
       } ];
-    users.extraGroups = [
+    users.groups = [
       { name = "turnserver";
         gid = config.ids.gids.turnserver;
         members = [ "turnserver" ];
diff --git a/nixos/modules/services/networking/dhcpd.nix b/nixos/modules/services/networking/dhcpd.nix
index fd7e317eee95..0b2063bc4246 100644
--- a/nixos/modules/services/networking/dhcpd.nix
+++ b/nixos/modules/services/networking/dhcpd.nix
@@ -197,7 +197,7 @@ in
   config = mkIf (cfg4.enable || cfg6.enable) {
 
     users = {
-      extraUsers.dhcpd = {
+      users.dhcpd = {
         uid = config.ids.uids.dhcpd;
         description = "DHCP daemon user";
       };
diff --git a/nixos/modules/services/networking/dnscache.nix b/nixos/modules/services/networking/dnscache.nix
index ba5c8e2d5e53..fc30f50317fe 100644
--- a/nixos/modules/services/networking/dnscache.nix
+++ b/nixos/modules/services/networking/dnscache.nix
@@ -84,7 +84,7 @@ in {
 
   config = mkIf config.services.dnscache.enable {
     environment.systemPackages = [ pkgs.djbdns ];
-    users.extraUsers.dnscache = {};
+    users.users.dnscache = {};
 
     systemd.services.dnscache = {
       description = "djbdns dnscache server";
diff --git a/nixos/modules/services/networking/dnschain.nix b/nixos/modules/services/networking/dnschain.nix
index ee1cd3600039..0c2add424bac 100644
--- a/nixos/modules/services/networking/dnschain.nix
+++ b/nixos/modules/services/networking/dnschain.nix
@@ -141,7 +141,7 @@ in
         dns = "127.0.0.1:${toString cfg.dns.port}";
       };
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = username;
       description = "DNSChain daemon user";
       home = dataDir;
diff --git a/nixos/modules/services/networking/dnscrypt-proxy.nix b/nixos/modules/services/networking/dnscrypt-proxy.nix
index 857657eea4db..8edcf925dbfa 100644
--- a/nixos/modules/services/networking/dnscrypt-proxy.nix
+++ b/nixos/modules/services/networking/dnscrypt-proxy.nix
@@ -145,6 +145,9 @@ in
       }
     ];
 
+    # make man 8 dnscrypt-proxy work
+    environment.systemPackages = [ pkgs.dnscrypt-proxy ];
+
     users.users.dnscrypt-proxy = {
       description = "dnscrypt-proxy daemon user";
       isSystemUser = true;
@@ -192,6 +195,7 @@ in
     security.apparmor.profiles = singleton (pkgs.writeText "apparmor-dnscrypt-proxy" ''
       ${pkgs.dnscrypt-proxy}/bin/dnscrypt-proxy {
         /dev/null rw,
+        /dev/random r,
         /dev/urandom r,
 
         /etc/passwd r,
@@ -211,6 +215,9 @@ in
         ${getLib pkgs.gcc.cc}/lib/libssp.so.* mr,
         ${getLib pkgs.libsodium}/lib/libsodium.so.* mr,
         ${getLib pkgs.systemd}/lib/libsystemd.so.* mr,
+        ${getLib pkgs.utillinuxMinimal.out}/lib/libmount.so.* mr,
+        ${getLib pkgs.utillinuxMinimal.out}/lib/libblkid.so.* mr,
+        ${getLib pkgs.utillinuxMinimal.out}/lib/libuuid.so.* mr,
         ${getLib pkgs.xz}/lib/liblzma.so.* mr,
         ${getLib pkgs.libgcrypt}/lib/libgcrypt.so.* mr,
         ${getLib pkgs.libgpgerror}/lib/libgpg-error.so.* mr,
diff --git a/nixos/modules/services/networking/dnsdist.nix b/nixos/modules/services/networking/dnsdist.nix
new file mode 100644
index 000000000000..12eee136e639
--- /dev/null
+++ b/nixos/modules/services/networking/dnsdist.nix
@@ -0,0 +1,61 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dnsdist;
+  configFile = pkgs.writeText "dndist.conf" ''
+    setLocal('${cfg.listenAddress}:${toString cfg.listenPort}')
+    ${cfg.extraConfig}
+    '';
+in {
+  options = {
+    services.dnsdist = {
+      enable = mkEnableOption "dnsdist domain name server";
+
+      listenAddress = mkOption {
+        type = types.str;
+        description = "Listen IP Address";
+        default = "0.0.0.0";
+      };
+      listenPort = mkOption {
+        type = types.int;
+        description = "Listen port";
+        default = 53;
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = ''
+        '';
+        description = ''
+          Extra lines to be added verbatim to dnsdist.conf.
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.dnsdist.enable {
+    systemd.services.dnsdist = {
+      description = "dnsdist load balancer";
+      wantedBy = [ "multi-user.target" ];
+      after = ["network.target"];
+
+      serviceConfig = {
+        Restart="on-failure";
+        RestartSec="1";
+        DynamicUser = true;
+        StartLimitInterval="0";
+        PrivateTmp=true;
+        PrivateDevices=true;
+        CapabilityBoundingSet="CAP_NET_BIND_SERVICE CAP_SETGID CAP_SETUID";
+        ExecStart = "${pkgs.dnsdist}/bin/dnsdist --supervised --disable-syslog --config ${configFile}";
+        ProtectSystem="full";
+        ProtectHome=true;
+        RestrictAddressFamilies="AF_UNIX AF_INET AF_INET6";
+        LimitNOFILE="16384";
+        TasksMax="8192";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/dnsmasq.nix b/nixos/modules/services/networking/dnsmasq.nix
index 91a3e54474ac..24d16046c63e 100644
--- a/nixos/modules/services/networking/dnsmasq.nix
+++ b/nixos/modules/services/networking/dnsmasq.nix
@@ -86,7 +86,7 @@ in
 
     services.dbus.packages = [ dnsmasq ];
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "dnsmasq";
       uid = config.ids.uids.dnsmasq;
       description = "Dnsmasq daemon user";
diff --git a/nixos/modules/services/networking/ejabberd.nix b/nixos/modules/services/networking/ejabberd.nix
index 82ed7fc4a837..ef5e2cee6f20 100644
--- a/nixos/modules/services/networking/ejabberd.nix
+++ b/nixos/modules/services/networking/ejabberd.nix
@@ -94,7 +94,7 @@ in {
   config = mkIf cfg.enable {
     environment.systemPackages = [ cfg.package ];
 
-    users.extraUsers = optionalAttrs (cfg.user == "ejabberd") (singleton
+    users.users = optionalAttrs (cfg.user == "ejabberd") (singleton
       { name = "ejabberd";
         group = cfg.group;
         home = cfg.spoolDir;
@@ -102,7 +102,7 @@ in {
         uid = config.ids.uids.ejabberd;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "ejabberd") (singleton
+    users.groups = optionalAttrs (cfg.group == "ejabberd") (singleton
       { name = "ejabberd";
         gid = config.ids.gids.ejabberd;
       });
diff --git a/nixos/modules/services/networking/firewall.nix b/nixos/modules/services/networking/firewall.nix
index c4bd0e7f9eef..36f1dd8d2479 100644
--- a/nixos/modules/services/networking/firewall.nix
+++ b/nixos/modules/services/networking/firewall.nix
@@ -148,38 +148,42 @@ let
     ip46tables -A nixos-fw -m conntrack --ctstate ESTABLISHED,RELATED -j nixos-fw-accept
 
     # Accept connections to the allowed TCP ports.
-    ${concatMapStrings (port:
+    ${concatStrings (mapAttrsToList (iface: cfg:
+      concatMapStrings (port:
         ''
-          ip46tables -A nixos-fw -p tcp --dport ${toString port} -j nixos-fw-accept
+          ip46tables -A nixos-fw -p tcp --dport ${toString port} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
         ''
       ) cfg.allowedTCPPorts
-    }
+    ) cfg.interfaces)}
 
     # Accept connections to the allowed TCP port ranges.
-    ${concatMapStrings (rangeAttr:
+    ${concatStrings (mapAttrsToList (iface: cfg:
+      concatMapStrings (rangeAttr:
         let range = toString rangeAttr.from + ":" + toString rangeAttr.to; in
         ''
-          ip46tables -A nixos-fw -p tcp --dport ${range} -j nixos-fw-accept
+          ip46tables -A nixos-fw -p tcp --dport ${range} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
         ''
       ) cfg.allowedTCPPortRanges
-    }
+    ) cfg.interfaces)}
 
     # Accept packets on the allowed UDP ports.
-    ${concatMapStrings (port:
+    ${concatStrings (mapAttrsToList (iface: cfg:
+      concatMapStrings (port:
         ''
-          ip46tables -A nixos-fw -p udp --dport ${toString port} -j nixos-fw-accept
+          ip46tables -A nixos-fw -p udp --dport ${toString port} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
         ''
       ) cfg.allowedUDPPorts
-    }
+    ) cfg.interfaces)}
 
     # Accept packets on the allowed UDP port ranges.
-    ${concatMapStrings (rangeAttr:
+    ${concatStrings (mapAttrsToList (iface: cfg:
+      concatMapStrings (rangeAttr:
         let range = toString rangeAttr.from + ":" + toString rangeAttr.to; in
         ''
-          ip46tables -A nixos-fw -p udp --dport ${range} -j nixos-fw-accept
+          ip46tables -A nixos-fw -p udp --dport ${range} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
         ''
       ) cfg.allowedUDPPortRanges
-    }
+    ) cfg.interfaces)}
 
     # Accept IPv4 multicast.  Not a big security risk since
     # probably nobody is listening anyway.
@@ -254,106 +258,30 @@ let
     fi
   '';
 
-in
-
-{
-
-  ###### interface
-
-  options = {
-
-    networking.firewall.enable = mkOption {
-      type = types.bool;
-      default = true;
-      description =
-        ''
-          Whether to enable the firewall.  This is a simple stateful
-          firewall that blocks connection attempts to unauthorised TCP
-          or UDP ports on this machine.  It does not affect packet
-          forwarding.
-        '';
-    };
-
-    networking.firewall.logRefusedConnections = mkOption {
-      type = types.bool;
-      default = true;
-      description =
-        ''
-          Whether to log rejected or dropped incoming connections.
-        '';
-    };
-
-    networking.firewall.logRefusedPackets = mkOption {
-      type = types.bool;
-      default = false;
-      description =
-        ''
-          Whether to log all rejected or dropped incoming packets.
-          This tends to give a lot of log messages, so it's mostly
-          useful for debugging.
-        '';
-    };
-
-    networking.firewall.logRefusedUnicastsOnly = mkOption {
-      type = types.bool;
-      default = true;
-      description =
-        ''
-          If <option>networking.firewall.logRefusedPackets</option>
-          and this option are enabled, then only log packets
-          specifically directed at this machine, i.e., not broadcasts
-          or multicasts.
-        '';
-    };
-
-    networking.firewall.rejectPackets = mkOption {
-      type = types.bool;
-      default = false;
-      description =
-        ''
-          If set, refused packets are rejected rather than dropped
-          (ignored).  This means that an ICMP "port unreachable" error
-          message is sent back to the client (or a TCP RST packet in
-          case of an existing connection).  Rejecting packets makes
-          port scanning somewhat easier.
-        '';
-    };
-
-    networking.firewall.trustedInterfaces = mkOption {
-      type = types.listOf types.str;
-      default = [ ];
-      example = [ "enp0s2" ];
-      description =
-        ''
-          Traffic coming in from these interfaces will be accepted
-          unconditionally.  Traffic from the loopback (lo) interface
-          will always be accepted.
-        '';
-    };
-
-    networking.firewall.allowedTCPPorts = mkOption {
+  commonOptions = {
+    allowedTCPPorts = mkOption {
       type = types.listOf types.int;
       default = [ ];
       example = [ 22 80 ];
       description =
-        ''
+        '' 
           List of TCP ports on which incoming connections are
           accepted.
         '';
     };
 
-    networking.firewall.allowedTCPPortRanges = mkOption {
+    allowedTCPPortRanges = mkOption {
       type = types.listOf (types.attrsOf types.int);
       default = [ ];
       example = [ { from = 8999; to = 9003; } ];
       description =
-        ''
+        '' 
           A range of TCP ports on which incoming connections are
           accepted.
         '';
     };
 
-    networking.firewall.allowedUDPPorts = mkOption {
+    allowedUDPPorts = mkOption {
       type = types.listOf types.int;
       default = [ ];
       example = [ 53 ];
@@ -363,7 +291,7 @@ in
         '';
     };
 
-    networking.firewall.allowedUDPPortRanges = mkOption {
+    allowedUDPPortRanges = mkOption {
       type = types.listOf (types.attrsOf types.int);
       default = [ ];
       example = [ { from = 60000; to = 61000; } ];
@@ -372,133 +300,226 @@ in
           Range of open UDP ports.
         '';
     };
+  };
 
-    networking.firewall.allowPing = mkOption {
-      type = types.bool;
-      default = true;
-      description =
-        ''
-          Whether to respond to incoming ICMPv4 echo requests
-          ("pings").  ICMPv6 pings are always allowed because the
-          larger address space of IPv6 makes network scanning much
-          less effective.
-        '';
-    };
+in
 
-    networking.firewall.pingLimit = mkOption {
-      type = types.nullOr (types.separatedString " ");
-      default = null;
-      example = "--limit 1/minute --limit-burst 5";
-      description =
-        ''
-          If pings are allowed, this allows setting rate limits
-          on them.  If non-null, this option should be in the form of
-          flags like "--limit 1/minute --limit-burst 5"
-        '';
-    };
+{
 
-    networking.firewall.checkReversePath = mkOption {
-      type = types.either types.bool (types.enum ["strict" "loose"]);
-      default = kernelHasRPFilter;
-      example = "loose";
-      description =
-        ''
-          Performs a reverse path filter test on a packet.  If a reply
-          to the packet would not be sent via the same interface that
-          the packet arrived on, it is refused.
+  ###### interface
 
-          If using asymmetric routing or other complicated routing, set
-          this option to loose mode or disable it and setup your own
-          counter-measures.
+  options = {
 
-          This option can be either true (or "strict"), "loose" (only
-          drop the packet if the source address is not reachable via any
-          interface) or false.  Defaults to the value of
-          kernelHasRPFilter.
+    networking.firewall = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            Whether to enable the firewall.  This is a simple stateful
+            firewall that blocks connection attempts to unauthorised TCP
+            or UDP ports on this machine.  It does not affect packet
+            forwarding.
+          '';
+      };
 
-          (needs kernel 3.3+)
-        '';
-    };
+      logRefusedConnections = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            Whether to log rejected or dropped incoming connections.
+          '';
+      };
 
-    networking.firewall.logReversePathDrops = mkOption {
-      type = types.bool;
-      default = false;
-      description =
-        ''
-          Logs dropped packets failing the reverse path filter test if
-          the option networking.firewall.checkReversePath is enabled.
-        '';
-    };
+      logRefusedPackets = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            Whether to log all rejected or dropped incoming packets.
+            This tends to give a lot of log messages, so it's mostly
+            useful for debugging.
+          '';
+      };
 
-    networking.firewall.connectionTrackingModules = mkOption {
-      type = types.listOf types.str;
-      default = [ ];
-      example = [ "ftp" "irc" "sane" "sip" "tftp" "amanda" "h323" "netbios_sn" "pptp" "snmp" ];
-      description =
-        ''
-          List of connection-tracking helpers that are auto-loaded.
-          The complete list of possible values is given in the example.
-
-          As helpers can pose as a security risk, it is advised to
-          set this to an empty list and disable the setting
-          networking.firewall.autoLoadConntrackHelpers unless you
-          know what you are doing. Connection tracking is disabled
-          by default.
-
-          Loading of helpers is recommended to be done through the
-          CT target.  More info:
-          https://home.regit.org/netfilter-en/secure-use-of-helpers/
-        '';
-    };
+      logRefusedUnicastsOnly = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            If <option>networking.firewall.logRefusedPackets</option>
+            and this option are enabled, then only log packets
+            specifically directed at this machine, i.e., not broadcasts
+            or multicasts.
+          '';
+      };
 
-    networking.firewall.autoLoadConntrackHelpers = mkOption {
-      type = types.bool;
-      default = false;
-      description =
-        ''
-          Whether to auto-load connection-tracking helpers.
-          See the description at networking.firewall.connectionTrackingModules
+      rejectPackets = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            If set, refused packets are rejected rather than dropped
+            (ignored).  This means that an ICMP "port unreachable" error
+            message is sent back to the client (or a TCP RST packet in
+            case of an existing connection).  Rejecting packets makes
+            port scanning somewhat easier.
+          '';
+      };
 
-          (needs kernel 3.5+)
-        '';
-    };
+      trustedInterfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "enp0s2" ];
+        description =
+          ''
+            Traffic coming in from these interfaces will be accepted
+            unconditionally.  Traffic from the loopback (lo) interface
+            will always be accepted.
+          '';
+      };
 
-    networking.firewall.extraCommands = mkOption {
-      type = types.lines;
-      default = "";
-      example = "iptables -A INPUT -p icmp -j ACCEPT";
-      description =
-        ''
-          Additional shell commands executed as part of the firewall
-          initialisation script.  These are executed just before the
-          final "reject" firewall rule is added, so they can be used
-          to allow packets that would otherwise be refused.
-        '';
-    };
+      allowPing = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            Whether to respond to incoming ICMPv4 echo requests
+            ("pings").  ICMPv6 pings are always allowed because the
+            larger address space of IPv6 makes network scanning much
+            less effective.
+          '';
+      };
 
-    networking.firewall.extraPackages = mkOption {
-      type = types.listOf types.package;
-      default = [ ];
-      example = literalExample "[ pkgs.ipset ]";
-      description =
-        ''
-          Additional packages to be included in the environment of the system
-          as well as the path of networking.firewall.extraCommands.
-        '';
-    };
+      pingLimit = mkOption {
+        type = types.nullOr (types.separatedString " ");
+        default = null;
+        example = "--limit 1/minute --limit-burst 5";
+        description =
+          ''
+            If pings are allowed, this allows setting rate limits
+            on them.  If non-null, this option should be in the form of
+            flags like "--limit 1/minute --limit-burst 5"
+          '';
+      };
 
-    networking.firewall.extraStopCommands = mkOption {
-      type = types.lines;
-      default = "";
-      example = "iptables -P INPUT ACCEPT";
-      description =
-        ''
-          Additional shell commands executed as part of the firewall
-          shutdown script.  These are executed just after the removal
-          of the NixOS input rule, or if the service enters a failed
-          state.
-        '';
-    };
+      checkReversePath = mkOption {
+        type = types.either types.bool (types.enum ["strict" "loose"]);
+        default = kernelHasRPFilter;
+        example = "loose";
+        description =
+          ''
+            Performs a reverse path filter test on a packet.  If a reply
+            to the packet would not be sent via the same interface that
+            the packet arrived on, it is refused.
+
+            If using asymmetric routing or other complicated routing, set
+            this option to loose mode or disable it and setup your own
+            counter-measures.
+
+            This option can be either true (or "strict"), "loose" (only
+            drop the packet if the source address is not reachable via any
+            interface) or false.  Defaults to the value of
+            kernelHasRPFilter.
+
+            (needs kernel 3.3+)
+          '';
+      };
+
+      logReversePathDrops = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            Logs dropped packets failing the reverse path filter test if
+            the option networking.firewall.checkReversePath is enabled.
+          '';
+      };
+
+      connectionTrackingModules = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "ftp" "irc" "sane" "sip" "tftp" "amanda" "h323" "netbios_sn" "pptp" "snmp" ];
+        description =
+          ''
+            List of connection-tracking helpers that are auto-loaded.
+            The complete list of possible values is given in the example.
+
+            As helpers can pose as a security risk, it is advised to
+            set this to an empty list and disable the setting
+            networking.firewall.autoLoadConntrackHelpers unless you
+            know what you are doing. Connection tracking is disabled
+            by default.
+
+            Loading of helpers is recommended to be done through the
+            CT target.  More info:
+            https://home.regit.org/netfilter-en/secure-use-of-helpers/
+          '';
+      };
+
+      autoLoadConntrackHelpers = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            Whether to auto-load connection-tracking helpers.
+            See the description at networking.firewall.connectionTrackingModules
+
+            (needs kernel 3.5+)
+          '';
+      };
+
+      extraCommands = mkOption {
+        type = types.lines;
+        default = "";
+        example = "iptables -A INPUT -p icmp -j ACCEPT";
+        description =
+          ''
+            Additional shell commands executed as part of the firewall
+            initialisation script.  These are executed just before the
+            final "reject" firewall rule is added, so they can be used
+            to allow packets that would otherwise be refused.
+          '';
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        example = literalExample "[ pkgs.ipset ]";
+        description =
+          ''
+            Additional packages to be included in the environment of the system
+            as well as the path of networking.firewall.extraCommands.
+          '';
+      };
+
+      extraStopCommands = mkOption {
+        type = types.lines;
+        default = "";
+        example = "iptables -P INPUT ACCEPT";
+        description =
+          ''
+            Additional shell commands executed as part of the firewall
+            shutdown script.  These are executed just after the removal
+            of the NixOS input rule, or if the service enters a failed
+            state.
+          '';
+      };
+
+      interfaces = mkOption {
+        default = {
+          default = mapAttrs (name: value: cfg."${name}") commonOptions;
+        };
+        type = with types; attrsOf (submodule [ { options = commonOptions; } ]);
+        description =
+          ''
+            Interface-specific open ports. Setting this value will override
+            all values of the <literal>networking.firewall.allowed*</literal>
+            options.
+          '';
+      };
+    } // commonOptions;
 
   };
 
diff --git a/nixos/modules/services/networking/freenet.nix b/nixos/modules/services/networking/freenet.nix
index 3903a2c708cb..3da3ab0c7df4 100644
--- a/nixos/modules/services/networking/freenet.nix
+++ b/nixos/modules/services/networking/freenet.nix
@@ -50,7 +50,7 @@ in
       serviceConfig.Nice = cfg.nice;
     };
 
-    users.extraUsers.freenet = {
+    users.users.freenet = {
       group = "freenet";
       description = "Freenet daemon user";
       home = varDir;
@@ -58,7 +58,7 @@ in
       uid = config.ids.uids.freenet;
     };
 
-    users.extraGroups.freenet.gid = config.ids.gids.freenet;
+    users.groups.freenet.gid = config.ids.gids.freenet;
   };
 
 }
diff --git a/nixos/modules/services/networking/freeradius.nix b/nixos/modules/services/networking/freeradius.nix
index 45cba1ce2770..e192b70c129c 100644
--- a/nixos/modules/services/networking/freeradius.nix
+++ b/nixos/modules/services/networking/freeradius.nix
@@ -59,7 +59,7 @@ in
   config = mkIf (cfg.enable) {
 
     users = {
-      extraUsers.radius = {
+      users.radius = {
         /*uid = config.ids.uids.radius;*/
         description = "Radius daemon user";
       };
diff --git a/nixos/modules/services/networking/gale.nix b/nixos/modules/services/networking/gale.nix
index fd83f9e3c1b7..7083d87c4073 100644
--- a/nixos/modules/services/networking/gale.nix
+++ b/nixos/modules/services/networking/gale.nix
@@ -104,7 +104,7 @@ in
          systemPackages = [ pkgs.gale ];
        };
 
-       users.extraUsers = [{
+       users.users = [{
          name = cfg.user;
          description = "Gale daemon";
          uid = config.ids.uids.gale;
@@ -113,7 +113,7 @@ in
          createHome = true;
        }];
 
-       users.extraGroups = [{
+       users.groups = [{
          name = cfg.group;
          gid = config.ids.gids.gale;
        }];
diff --git a/nixos/modules/services/networking/gateone.nix b/nixos/modules/services/networking/gateone.nix
index 78ff0b76198c..4456a95402ed 100644
--- a/nixos/modules/services/networking/gateone.nix
+++ b/nixos/modules/services/networking/gateone.nix
@@ -23,12 +23,12 @@ config = mkIf cfg.enable {
   environment.systemPackages = with pkgs.pythonPackages; [
     gateone pkgs.openssh pkgs.procps pkgs.coreutils pkgs.cacert];
 
-  users.extraUsers.gateone = {
+  users.users.gateone = {
     description = "GateOne privilege separation user";
     uid = config.ids.uids.gateone;
     home = cfg.settingsDir;
   };
-  users.extraGroups.gateone.gid = config.ids.gids.gateone;
+  users.groups.gateone.gid = config.ids.gids.gateone;
 
   systemd.services.gateone = with pkgs; {
     description = "GateOne web-based terminal";
diff --git a/nixos/modules/services/networking/gdomap.nix b/nixos/modules/services/networking/gdomap.nix
index b3fd91d037fa..3d829cb69135 100644
--- a/nixos/modules/services/networking/gdomap.nix
+++ b/nixos/modules/services/networking/gdomap.nix
@@ -2,9 +2,6 @@
 
 with lib;
 
-let
-  cfg = config.services.gdomap;
-in
 {
   #
   # interface
diff --git a/nixos/modules/services/networking/git-daemon.nix b/nixos/modules/services/networking/git-daemon.nix
index cd3fcd0f8f66..c0020349ec74 100644
--- a/nixos/modules/services/networking/git-daemon.nix
+++ b/nixos/modules/services/networking/git-daemon.nix
@@ -104,13 +104,13 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = if cfg.user != "git" then {} else singleton
+    users.users = if cfg.user != "git" then {} else singleton
       { name = "git";
         uid = config.ids.uids.git;
         description = "Git daemon user";
       };
 
-    users.extraGroups = if cfg.group != "git" then {} else singleton
+    users.groups = if cfg.group != "git" then {} else singleton
       { name = "git";
         gid = config.ids.gids.git;
       };
diff --git a/nixos/modules/services/networking/gnunet.nix b/nixos/modules/services/networking/gnunet.nix
index 008b09e81a57..6a1db81413c5 100644
--- a/nixos/modules/services/networking/gnunet.nix
+++ b/nixos/modules/services/networking/gnunet.nix
@@ -126,7 +126,7 @@ in
 
   config = mkIf config.services.gnunet.enable {
 
-    users.extraUsers.gnunet = {
+    users.users.gnunet = {
       group = "gnunet";
       description = "GNUnet User";
       home = homeDir;
@@ -134,7 +134,7 @@ in
       uid = config.ids.uids.gnunet;
     };
 
-    users.extraGroups.gnunet.gid = config.ids.gids.gnunet;
+    users.groups.gnunet.gid = config.ids.gids.gnunet;
 
     # The user tools that talk to `gnunetd' should come from the same source,
     # so install them globally.
diff --git a/nixos/modules/services/networking/hans.nix b/nixos/modules/services/networking/hans.nix
index dd34ef8d4ca1..20e57e4626ef 100644
--- a/nixos/modules/services/networking/hans.nix
+++ b/nixos/modules/services/networking/hans.nix
@@ -135,7 +135,7 @@ in
       };
     };
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = hansUser;
       description = "Hans daemon user";
     };
diff --git a/nixos/modules/services/networking/haproxy.nix b/nixos/modules/services/networking/haproxy.nix
index 09e48ec4bff0..0438d0bf8d86 100644
--- a/nixos/modules/services/networking/haproxy.nix
+++ b/nixos/modules/services/networking/haproxy.nix
@@ -52,11 +52,11 @@ with lib;
 
     environment.systemPackages = [ pkgs.haproxy ];
 
-    users.extraUsers.haproxy = {
+    users.users.haproxy = {
       group = "haproxy";
       uid = config.ids.uids.haproxy;
     };
 
-    users.extraGroups.haproxy.gid = config.ids.uids.haproxy;
+    users.groups.haproxy.gid = config.ids.uids.haproxy;
   };
 }
diff --git a/nixos/modules/services/networking/hostapd.nix b/nixos/modules/services/networking/hostapd.nix
index 63f56437d1c8..3af0441a89d8 100644
--- a/nixos/modules/services/networking/hostapd.nix
+++ b/nixos/modules/services/networking/hostapd.nix
@@ -29,7 +29,7 @@ let
     ctrl_interface_group=${cfg.group}
 
     ${if cfg.wpa then ''
-      wpa=1
+      wpa=2
       wpa_passphrase=${cfg.wpaPassphrase}
       '' else ""}
 
diff --git a/nixos/modules/services/networking/i2p.nix b/nixos/modules/services/networking/i2p.nix
index e6ee5fd1f957..3b6010531f13 100644
--- a/nixos/modules/services/networking/i2p.nix
+++ b/nixos/modules/services/networking/i2p.nix
@@ -11,14 +11,14 @@ in {
 
   ###### implementation
   config = mkIf cfg.enable {
-    users.extraUsers.i2p = {
+    users.users.i2p = {
       group = "i2p";
       description = "i2p User";
       home = homeDir;
       createHome = true;
       uid = config.ids.uids.i2p;
     };
-    users.extraGroups.i2p.gid = config.ids.gids.i2p;
+    users.groups.i2p.gid = config.ids.gids.i2p;
     systemd.services.i2p = {
       description = "I2P router with administration interface for hidden services";
       after = [ "network.target" ];
diff --git a/nixos/modules/services/networking/i2pd.nix b/nixos/modules/services/networking/i2pd.nix
index 8f5aeee4a16b..4f219fe56b4c 100644
--- a/nixos/modules/services/networking/i2pd.nix
+++ b/nixos/modules/services/networking/i2pd.nix
@@ -103,7 +103,7 @@ let
 
     ${flip concatMapStrings
       (collect (proto: proto ? port && proto ? address && proto ? name) cfg.proto)
-      (proto: let portStr = toString proto.port; in ''
+      (proto: ''
         [${proto.name}]
         enabled = ${boolToString proto.enable}
         address = ${proto.address}
@@ -122,7 +122,7 @@ let
     # DO NOT EDIT -- this file has been generated automatically.
     ${flip concatMapStrings
       (collect (tun: tun ? port && tun ? destination) cfg.outTunnels)
-      (tun: let portStr = toString tun.port; in ''
+      (tun: ''
         [${tun.name}]
         type = client
         destination = ${tun.destination}
@@ -456,7 +456,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.i2pd = {
+    users.users.i2pd = {
       group = "i2pd";
       description = "I2Pd User";
       home = homeDir;
@@ -464,7 +464,7 @@ in
       uid = config.ids.uids.i2pd;
     };
 
-    users.extraGroups.i2pd.gid = config.ids.gids.i2pd;
+    users.groups.i2pd.gid = config.ids.gids.i2pd;
 
     systemd.services.i2pd = {
       description = "Minimal I2P router";
diff --git a/nixos/modules/services/networking/iodine.nix b/nixos/modules/services/networking/iodine.nix
index 3f41421d27f7..58ad0df4ff20 100644
--- a/nixos/modules/services/networking/iodine.nix
+++ b/nixos/modules/services/networking/iodine.nix
@@ -140,11 +140,11 @@ in
       };
     };
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = iodinedUser;
       uid = config.ids.uids.iodined;
       description = "Iodine daemon user";
     };
-    users.extraGroups.iodined.gid = config.ids.gids.iodined;
+    users.groups.iodined.gid = config.ids.gids.iodined;
   };
 }
diff --git a/nixos/modules/services/networking/ircd-hybrid/default.nix b/nixos/modules/services/networking/ircd-hybrid/default.nix
index bd583fb020ec..2bd898edf897 100644
--- a/nixos/modules/services/networking/ircd-hybrid/default.nix
+++ b/nixos/modules/services/networking/ircd-hybrid/default.nix
@@ -112,14 +112,14 @@ in
 
   config = mkIf config.services.ircdHybrid.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "ircd";
         description = "IRCD owner";
         group = "ircd";
         uid = config.ids.uids.ircd;
       };
 
-    users.extraGroups.ircd.gid = config.ids.gids.ircd;
+    users.groups.ircd.gid = config.ids.gids.ircd;
 
     systemd.services."ircd-hybrid" = {
       description = "IRCD Hybrid server";
diff --git a/nixos/modules/services/networking/iwd.nix b/nixos/modules/services/networking/iwd.nix
index 344212ad8329..cfc536fc5b5f 100644
--- a/nixos/modules/services/networking/iwd.nix
+++ b/nixos/modules/services/networking/iwd.nix
@@ -28,6 +28,10 @@ in {
 
       serviceConfig.ExecStart = "${pkgs.iwd}/libexec/iwd";
     };
+
+    systemd.tmpfiles.rules = [
+      "d /var/lib/iwd 0700 root root -"
+    ];
   };
 
   meta.maintainers = with lib.maintainers; [ mic92 ];
diff --git a/nixos/modules/services/networking/kippo.nix b/nixos/modules/services/networking/kippo.nix
index 834de4fdc09f..40c38254a57c 100644
--- a/nixos/modules/services/networking/kippo.nix
+++ b/nixos/modules/services/networking/kippo.nix
@@ -73,12 +73,12 @@ rec {
         ${cfg.extraConfig}
     '';
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "kippo";
       description = "kippo web server privilege separation user";
       uid = 108; # why does config.ids.uids.kippo give an error?
     };
-    users.extraGroups = singleton { name = "kippo";gid=108; };
+    users.groups = singleton { name = "kippo";gid=108; };
 
     systemd.services.kippo = with pkgs; {
       description = "Kippo Web Server";
diff --git a/nixos/modules/services/networking/kresd.nix b/nixos/modules/services/networking/kresd.nix
index aac02b811d71..ca34ff9df4ef 100644
--- a/nixos/modules/services/networking/kresd.nix
+++ b/nixos/modules/services/networking/kresd.nix
@@ -62,13 +62,13 @@ in
   config = mkIf cfg.enable {
     environment.etc."kresd.conf".source = configFile; # not required
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "kresd";
         uid = config.ids.uids.kresd;
         group = "kresd";
         description = "Knot-resolver daemon user";
       };
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "kresd";
         gid = config.ids.gids.kresd;
       };
diff --git a/nixos/modules/services/networking/lambdabot.nix b/nixos/modules/services/networking/lambdabot.nix
index 5a61a9f96782..b7c8bd008fe1 100644
--- a/nixos/modules/services/networking/lambdabot.nix
+++ b/nixos/modules/services/networking/lambdabot.nix
@@ -67,7 +67,7 @@ in
       };
     };
 
-    users.extraUsers.lambdabot = {
+    users.users.lambdabot = {
       group = "lambdabot";
       description = "Lambdabot daemon user";
       home = "/var/lib/lambdabot";
@@ -75,7 +75,7 @@ in
       uid = config.ids.uids.lambdabot;
     };
 
-    users.extraGroups.lambdabot.gid = config.ids.gids.lambdabot;
+    users.groups.lambdabot.gid = config.ids.gids.lambdabot;
 
   };
 
diff --git a/nixos/modules/services/networking/lldpd.nix b/nixos/modules/services/networking/lldpd.nix
index db1534edfd7c..dec30cc92f6a 100644
--- a/nixos/modules/services/networking/lldpd.nix
+++ b/nixos/modules/services/networking/lldpd.nix
@@ -20,13 +20,13 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers._lldpd = {
+    users.users._lldpd = {
       description = "lldpd user";
       group = "_lldpd";
       home = "/var/run/lldpd";
       isSystemUser = true;
     };
-    users.extraGroups._lldpd = {};
+    users.groups._lldpd = {};
 
     environment.systemPackages = [ pkgs.lldpd ];
     systemd.packages = [ pkgs.lldpd ];
diff --git a/nixos/modules/services/networking/mailpile.nix b/nixos/modules/services/networking/mailpile.nix
index e164d41483c7..c42d3d5a44cb 100644
--- a/nixos/modules/services/networking/mailpile.nix
+++ b/nixos/modules/services/networking/mailpile.nix
@@ -41,14 +41,14 @@ in
 
   config = mkIf config.services.mailpile.enable {
 
-    users.extraUsers.mailpile =
+    users.users.mailpile =
       { uid = config.ids.uids.mailpile;
         description = "Mailpile user";
         createHome = true;
         home = "/var/lib/mailpile";
       };
 
-    users.extraGroups.mailpile =
+    users.groups.mailpile =
       { gid = config.ids.gids.mailpile;
       };
 
diff --git a/nixos/modules/services/networking/matterbridge.nix b/nixos/modules/services/networking/matterbridge.nix
index e2f478405953..1fd63348c16c 100644
--- a/nixos/modules/services/networking/matterbridge.nix
+++ b/nixos/modules/services/networking/matterbridge.nix
@@ -92,12 +92,12 @@ in
     warnings = optional options.services.matterbridge.configFile.isDefined
       "The option services.matterbridge.configFile is insecure and should be replaced with services.matterbridge.configPath";
 
-    users.extraUsers = optional (cfg.user == "matterbridge")
+    users.users = optional (cfg.user == "matterbridge")
       { name = "matterbridge";
         group = "matterbridge";
       };
 
-    users.extraGroups = optional (cfg.group == "matterbridge")
+    users.groups = optional (cfg.group == "matterbridge")
       { name = "matterbridge";
       };
 
diff --git a/nixos/modules/services/networking/minidlna.nix b/nixos/modules/services/networking/minidlna.nix
index 6401631bf620..1858f03cac1f 100644
--- a/nixos/modules/services/networking/minidlna.nix
+++ b/nixos/modules/services/networking/minidlna.nix
@@ -84,13 +84,13 @@ in
         '') cfg.mediaDirs}
       '';
 
-    users.extraUsers.minidlna = {
+    users.users.minidlna = {
       description = "MiniDLNA daemon user";
       group = "minidlna";
       uid = config.ids.uids.minidlna;
     };
 
-    users.extraGroups.minidlna.gid = config.ids.gids.minidlna;
+    users.groups.minidlna.gid = config.ids.gids.minidlna;
 
     systemd.services.minidlna =
       { description = "MiniDLNA Server";
diff --git a/nixos/modules/services/networking/mjpg-streamer.nix b/nixos/modules/services/networking/mjpg-streamer.nix
index 1286b0c7ef6c..e0a6c112e3cb 100644
--- a/nixos/modules/services/networking/mjpg-streamer.nix
+++ b/nixos/modules/services/networking/mjpg-streamer.nix
@@ -49,7 +49,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = optional (cfg.user == "mjpg-streamer") {
+    users.users = optional (cfg.user == "mjpg-streamer") {
       name = "mjpg-streamer";
       uid = config.ids.uids.mjpg-streamer;
       group = cfg.group;
diff --git a/nixos/modules/services/networking/monero.nix b/nixos/modules/services/networking/monero.nix
index 31379189f5de..8241c32bad07 100644
--- a/nixos/modules/services/networking/monero.nix
+++ b/nixos/modules/services/networking/monero.nix
@@ -197,7 +197,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "monero";
       uid  = config.ids.uids.monero;
       description = "Monero daemon user";
@@ -205,7 +205,7 @@ in
       createHome = true;
     };
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = "monero";
       gid  = config.ids.gids.monero;
     };
diff --git a/nixos/modules/services/networking/morty.nix b/nixos/modules/services/networking/morty.nix
new file mode 100644
index 000000000000..cc81e27e9399
--- /dev/null
+++ b/nixos/modules/services/networking/morty.nix
@@ -0,0 +1,96 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.morty;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.morty = {
+
+      enable = mkEnableOption
+        "Morty proxy server. See https://github.com/asciimoo/morty";
+
+      ipv6 = mkOption {
+        type = types.bool;
+        default = true;
+        description = "Allow IPv6 HTTP requests?";
+        defaultText = "Allow IPv6 HTTP requests.";
+      };
+
+      key = mkOption {
+        type = types.string;
+        default = "";
+        description = "HMAC url validation key (hexadecimal encoded).
+	Leave blank to disable. Without validation key, anyone can
+	submit proxy requests. Leave blank to disable.";
+        defaultText = "No HMAC url validation. Generate with echo -n somevalue | openssl dgst -sha1 -hmac somekey";
+      };
+
+      timeout = mkOption {
+        type = types.int;
+        default = 2;
+        description = "Request timeout in seconds.";
+        defaultText = "A resource now gets 2 seconds to respond.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.morty;
+        defaultText = "pkgs.morty";
+        description = "morty package to use.";
+      };
+
+      port = mkOption {
+        type = types.int;
+        default = 3000;
+        description = "Listing port";
+      };
+
+      listenAddress = mkOption {
+        type = types.string;
+        default = "127.0.0.1";
+        description = "The address on which the service listens";
+        defaultText = "127.0.0.1 (localhost)";
+      };
+
+    };
+
+  };
+
+  ###### Service definition
+
+  config = mkIf config.services.morty.enable {
+
+    users.users.morty =
+      { description = "Morty user";
+        createHome = true;
+        home = "/var/lib/morty";
+      };
+
+    systemd.services.morty =
+      {
+        description = "Morty sanitizing proxy server.";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = "morty";
+          ExecStart = ''${cfg.package}/bin/morty              \
+	    -listen ${cfg.listenAddress}:${toString cfg.port} \
+	    ${optionalString cfg.ipv6 "-ipv6"}                \
+	    ${optionalString (cfg.key != "") "-key " + cfg.key} \
+	  '';
+        };
+      };
+    environment.systemPackages = [ cfg.package ];
+
+  };
+}
diff --git a/nixos/modules/services/networking/mosquitto.nix b/nixos/modules/services/networking/mosquitto.nix
index d8135f4d0ffa..332dc541345e 100644
--- a/nixos/modules/services/networking/mosquitto.nix
+++ b/nixos/modules/services/networking/mosquitto.nix
@@ -218,7 +218,7 @@ in
         ) cfg.users);
     };
 
-    users.extraUsers.mosquitto = {
+    users.users.mosquitto = {
       description = "Mosquitto MQTT Broker Daemon owner";
       group = "mosquitto";
       uid = config.ids.uids.mosquitto;
@@ -226,7 +226,7 @@ in
       createHome = true;
     };
 
-    users.extraGroups.mosquitto.gid = config.ids.gids.mosquitto;
+    users.groups.mosquitto.gid = config.ids.gids.mosquitto;
 
   };
 }
diff --git a/nixos/modules/services/networking/murmur.nix b/nixos/modules/services/networking/murmur.nix
index 873d62dbf341..fcc813e6898f 100644
--- a/nixos/modules/services/networking/murmur.nix
+++ b/nixos/modules/services/networking/murmur.nix
@@ -238,7 +238,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers.murmur = {
+    users.users.murmur = {
       description     = "Murmur Service user";
       home            = "/var/lib/murmur";
       createHome      = true;
@@ -248,7 +248,7 @@ in
     systemd.services.murmur = {
       description = "Murmur Chat Service";
       wantedBy    = [ "multi-user.target" ];
-      after       = [ "network.target "];
+      after       = [ "network-online.target "];
 
       serviceConfig = {
         Type      = "forking";
diff --git a/nixos/modules/services/networking/namecoind.nix b/nixos/modules/services/networking/namecoind.nix
index 11f7d7e5caef..8de23b442f93 100644
--- a/nixos/modules/services/networking/namecoind.nix
+++ b/nixos/modules/services/networking/namecoind.nix
@@ -153,7 +153,7 @@ in
       config = ${configFile}
     '';
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "namecoin";
       uid  = config.ids.uids.namecoin;
       description = "Namecoin daemon user";
@@ -161,7 +161,7 @@ in
       createHome = true;
     };
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = "namecoin";
       gid  = config.ids.gids.namecoin;
     };
diff --git a/nixos/modules/services/networking/nat.nix b/nixos/modules/services/networking/nat.nix
index da3827c35e63..89d8590093dd 100644
--- a/nixos/modules/services/networking/nat.nix
+++ b/nixos/modules/services/networking/nat.nix
@@ -38,19 +38,19 @@ let
     # NAT the marked packets.
     ${optionalString (cfg.internalInterfaces != []) ''
       iptables -w -t nat -A nixos-nat-post -m mark --mark 1 \
-        -o ${cfg.externalInterface} ${dest}
+        ${optionalString (cfg.externalInterface != null) "-o ${cfg.externalInterface}"} ${dest}
     ''}
 
     # NAT packets coming from the internal IPs.
     ${concatMapStrings (range: ''
       iptables -w -t nat -A nixos-nat-post \
-        -s '${range}' -o ${cfg.externalInterface} ${dest}
+        -s '${range}' ${optionalString (cfg.externalInterface != null) "-o ${cfg.externalInterface}"} ${dest}
     '') cfg.internalIPs}
 
     # NAT from external ports to internal ports.
     ${concatMapStrings (fwd: ''
       iptables -w -t nat -A nixos-nat-pre \
-        -i ${cfg.externalInterface} -p ${fwd.proto} \
+        -i ${toString cfg.externalInterface} -p ${fwd.proto} \
         --dport ${builtins.toString fwd.sourcePort} \
         -j DNAT --to-destination ${fwd.destination}
 
@@ -81,7 +81,7 @@ let
 
     ${optionalString (cfg.dmzHost != null) ''
       iptables -w -t nat -A nixos-nat-pre \
-        -i ${cfg.externalInterface} -j DNAT \
+        -i ${toString cfg.externalInterface} -j DNAT \
         --to-destination ${cfg.dmzHost}
     ''}
 
@@ -134,7 +134,8 @@ in
     };
 
     networking.nat.externalInterface = mkOption {
-      type = types.str;
+      type = types.nullOr types.str;
+      default = null;
       example = "eth1";
       description =
         ''
@@ -236,6 +237,15 @@ in
     { networking.firewall.extraCommands = mkBefore flushNat; }
     (mkIf config.networking.nat.enable {
 
+      assertions = [
+        { assertion = (cfg.dmzHost != null)    -> (cfg.externalInterface != null);
+          message = "networking.nat.dmzHost requires networking.nat.externalInterface";
+        }
+        { assertion = (cfg.forwardPorts != []) -> (cfg.externalInterface != null);
+          message = "networking.nat.forwardPorts requires networking.nat.externalInterface";
+        }
+      ];
+
       environment.systemPackages = [ pkgs.iptables ];
 
       boot = {
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index f4c4adcaaeb8..b0bc1c83d6b7 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -6,21 +6,17 @@ with lib;
 let
   cfg = config.networking.networkmanager;
 
+  dynamicHostsEnabled =
+    cfg.dynamicHosts.enable && cfg.dynamicHosts.hostsDirs != {};
+
   # /var/lib/misc is for dnsmasq.leases.
   stateDirs = "/var/lib/NetworkManager /var/lib/dhclient /var/lib/misc";
 
-  dns =
-    if cfg.dns == "none" then "none"
-    else if cfg.dns == "dnsmasq" then "dnsmasq"
-    else if config.services.resolved.enable then "systemd-resolved"
-    else if config.services.unbound.enable then "unbound"
-    else "default";
-
   configFile = writeText "NetworkManager.conf" ''
     [main]
     plugins=keyfile
     dhcp=${cfg.dhcp}
-    dns=${dns}
+    dns=${cfg.dns}
 
     [keyfile]
     ${optionalString (cfg.unmanaged != [])
@@ -38,6 +34,8 @@ let
 
     [device]
     wifi.scan-rand-mac-address=${if cfg.wifi.scanRandMacAddress then "yes" else "no"}
+
+    ${cfg.extraConfig}
   '';
 
   /*
@@ -120,6 +118,14 @@ in {
         '';
       };
 
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Configuration appended to the generated NetworkManager.conf.
+        '';
+      };
+
       unmanaged = mkOption {
         type = types.listOf types.string;
         default = [];
@@ -207,19 +213,73 @@ in {
       };
 
       dns = mkOption {
-        type = types.enum [ "auto" "dnsmasq" "none" ];
-        default = "auto";
+        type = types.enum [ "default" "dnsmasq" "unbound" "systemd-resolved" "none" ];
+        default = "default";
         description = ''
+          Set the DNS (<literal>resolv.conf</literal>) processing mode.
+          </para>
+          <para>
           Options:
-            - auto: Check for systemd-resolved, unbound, or use default.
-            - dnsmasq:
-              Enable NetworkManager's dnsmasq integration. NetworkManager will run
-              dnsmasq as a local caching nameserver, using a "split DNS"
-              configuration if you are connected to a VPN, and then update
-              resolv.conf to point to the local nameserver.
-            - none:
-              Disable NetworkManager's DNS integration completely.
-              It will not touch your /etc/resolv.conf.
+          <variablelist>
+          <varlistentry>
+            <term><literal>"default"</literal></term>
+            <listitem><para>
+              NetworkManager will update <literal>/etc/resolv.conf</literal> to
+              reflect the nameservers provided by currently active connections.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><literal>"dnsmasq"</literal></term>
+            <listitem>
+              <para>
+                Enable NetworkManager's dnsmasq integration. NetworkManager will
+                run dnsmasq as a local caching nameserver, using a "split DNS"
+                configuration if you are connected to a VPN, and then update
+                <literal>resolv.conf</literal> to point to the local nameserver.
+              </para>
+              <para>
+                It is possible to pass custom options to the dnsmasq instance by
+                adding them to files in the
+                <literal>/etc/NetworkManager/dnsmasq.d/</literal> directory.
+              </para>
+              <para>
+                When multiple upstream servers are available, dnsmasq will
+                initially contact them in parallel and then use the fastest to
+                respond, probing again other servers after some time.  This
+                behavior can be modified passing the
+                <literal>all-servers</literal> or <literal>strict-order</literal>
+                options to dnsmasq (see the manual page for more details).
+              </para>
+              <para>
+                Note that this option causes NetworkManager to launch and manage
+                its own instance of the dnsmasq daemon, which is
+                <emphasis>not</emphasis> the same as setting
+                <literal>services.dnsmasq.enable = true;</literal>.
+              </para>
+            </listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><literal>"unbound"</literal></term>
+            <listitem><para>
+              NetworkManager will talk to unbound and dnssec-triggerd,
+              providing a "split DNS" configuration with DNSSEC support.
+              <literal>/etc/resolv.conf</literal> will be managed by
+              dnssec-trigger daemon.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><literal>"systemd-resolved"</literal></term>
+            <listitem><para>
+              NetworkManager will push the DNS configuration to systemd-resolved.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><literal>"none"</literal></term>
+            <listitem><para>
+              NetworkManager will not modify resolv.conf.
+            </para></listitem>
+          </varlistentry>
+          </variablelist>
         '';
       };
 
@@ -260,6 +320,52 @@ in {
           so you don't need to to that yourself.
         '';
       };
+
+      dynamicHosts = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            Enabling this option requires the
+            <option>networking.networkmanager.dns</option> option to be
+            set to <literal>dnsmasq</literal>. If enabled, the directories
+            defined by the
+            <option>networking.networkmanager.dynamicHosts.hostsDirs</option>
+            option will be set up when the service starts. The dnsmasq instance
+            managed by NetworkManager will then watch those directories for
+            hosts files (see the <literal>--hostsdir</literal> option of
+            dnsmasq). This way a non-privileged user can add or override DNS
+            entries on the local system (depending on what hosts directories
+            that are configured)..
+          '';
+        };
+        hostsDirs = mkOption {
+          type = with types; attrsOf (submodule {
+            options = {
+              user = mkOption {
+                type = types.str;
+                default = "root";
+                description = ''
+                  The user that will own the hosts directory.
+                '';
+              };
+              group = mkOption {
+                type = types.str;
+                default = "root";
+                description = ''
+                  The group that will own the hosts directory.
+                '';
+              };
+            };
+          });
+          default = {};
+          description = ''
+            Defines a set of directories (relative to
+            <literal>/run/NetworkManager/hostdirs</literal>) that dnsmasq will
+            watch for hosts files.
+          '';
+        };
+      };
     };
   };
 
@@ -268,10 +374,17 @@ in {
 
   config = mkIf cfg.enable {
 
-    assertions = [{
-      assertion = config.networking.wireless.enable == false;
-      message = "You can not use networking.networkmanager with networking.wireless";
-    }];
+    assertions = [
+      { assertion = config.networking.wireless.enable == false;
+        message = "You can not use networking.networkmanager with networking.wireless";
+      }
+      { assertion = !dynamicHostsEnabled || (dynamicHostsEnabled && cfg.dns == "dnsmasq");
+        message = ''
+          To use networking.networkmanager.dynamicHosts you also need to set
+          networking.networkmanager.dns = "dnsmasq"
+        '';
+      }
+    ];
 
     environment.etc = with cfg.basePackages; [
       { source = configFile;
@@ -305,11 +418,17 @@ in {
       ++ lib.imap1 (i: s: {
         inherit (s) source;
         target = "NetworkManager/dispatcher.d/${dispatcherTypesSubdirMap.${s.type}}03userscript${lib.fixedWidthNumber 4 i}";
-      }) cfg.dispatcherScripts;
+      }) cfg.dispatcherScripts
+      ++ optional (dynamicHostsEnabled)
+           { target = "NetworkManager/dnsmasq.d/dyndns.conf";
+             text = concatMapStrings (n: ''
+               hostsdir=/run/NetworkManager/hostsdirs/${n}
+             '') (attrNames cfg.dynamicHosts.hostsDirs);
+           };
 
     environment.systemPackages = cfg.packages;
 
-    users.extraGroups = [{
+    users.groups = [{
       name = "networkmanager";
       gid = config.ids.gids.networkmanager;
     }
@@ -317,7 +436,7 @@ in {
       name = "nm-openvpn";
       gid = config.ids.gids.nm-openvpn;
     }];
-    users.extraUsers = [{
+    users.users = [{
       name = "nm-openvpn";
       uid = config.ids.uids.nm-openvpn;
       extraGroups = [ "networkmanager" ];
@@ -341,6 +460,21 @@ in {
       '';
     };
 
+    systemd.services.nm-setup-hostsdirs = mkIf dynamicHostsEnabled {
+      wantedBy = [ "network-manager.service" ];
+      before = [ "network-manager.service" ];
+      partOf = [ "network-manager.service" ];
+      script = concatStrings (mapAttrsToList (n: d: ''
+        mkdir -p "/run/NetworkManager/hostsdirs/${n}"
+        chown "${d.user}:${d.group}" "/run/NetworkManager/hostsdirs/${n}"
+        chmod 0775 "/run/NetworkManager/hostsdirs/${n}"
+      '') cfg.dynamicHosts.hostsDirs);
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExist = true;
+      };
+    };
+
     # Turn off NixOS' network management
     networking = {
       useDHCP = false;
diff --git a/nixos/modules/services/networking/ngircd.nix b/nixos/modules/services/networking/ngircd.nix
index 6a5290ffdee2..4b2fa7795922 100644
--- a/nixos/modules/services/networking/ngircd.nix
+++ b/nixos/modules/services/networking/ngircd.nix
@@ -51,7 +51,7 @@ in {
       serviceConfig.User = "ngircd";
     };
 
-    users.extraUsers.ngircd = {
+    users.users.ngircd = {
       uid = config.ids.uids.ngircd;
       description = "ngircd user.";
     };
diff --git a/nixos/modules/services/networking/nix-serve.nix b/nixos/modules/services/networking/nix-serve.nix
index 8499e7c0f7c4..e83cad949ae8 100644
--- a/nixos/modules/services/networking/nix-serve.nix
+++ b/nixos/modules/services/networking/nix-serve.nix
@@ -64,7 +64,7 @@ in
       };
     };
 
-    users.extraUsers.nix-serve = {
+    users.users.nix-serve = {
       description = "Nix-serve user";
       uid = config.ids.uids.nix-serve;
     };
diff --git a/nixos/modules/services/networking/nntp-proxy.nix b/nixos/modules/services/networking/nntp-proxy.nix
index 7eebecb23b00..d24d6f77a491 100644
--- a/nixos/modules/services/networking/nntp-proxy.nix
+++ b/nixos/modules/services/networking/nntp-proxy.nix
@@ -210,7 +210,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = proxyUser;
         uid = config.ids.uids.nntp-proxy;
         description = "NNTP-Proxy daemon user";
diff --git a/nixos/modules/services/networking/nsd.nix b/nixos/modules/services/networking/nsd.nix
index fc910e59c323..cde47bf23eae 100644
--- a/nixos/modules/services/networking/nsd.nix
+++ b/nixos/modules/services/networking/nsd.nix
@@ -897,12 +897,12 @@ in
 
     environment.systemPackages = [ nsdPkg ];
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = username;
       gid = config.ids.gids.nsd;
     };
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = username;
       description = "NSD service user";
       home = stateDir;
diff --git a/nixos/modules/services/networking/ntpd.nix b/nixos/modules/services/networking/ntpd.nix
index 88e6dbf22b9e..342350d49ab3 100644
--- a/nixos/modules/services/networking/ntpd.nix
+++ b/nixos/modules/services/networking/ntpd.nix
@@ -67,7 +67,7 @@ in
     environment.systemPackages = [ pkgs.ntp ];
     services.timesyncd.enable = mkForce false;
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = ntpUser;
         uid = config.ids.uids.ntp;
         description = "NTP daemon user";
diff --git a/nixos/modules/services/networking/nylon.nix b/nixos/modules/services/networking/nylon.nix
index 4864ecf3f92f..b7b59d95bf02 100644
--- a/nixos/modules/services/networking/nylon.nix
+++ b/nixos/modules/services/networking/nylon.nix
@@ -151,7 +151,7 @@ in
 
   config = mkIf (length(enabledNylons) > 0) {
 
-    users.extraUsers.nylon = {
+    users.users.nylon = {
       group = "nylon";
       description = "Nylon SOCKS Proxy";
       home = homeDir;
@@ -159,7 +159,7 @@ in
       uid = config.ids.uids.nylon;
     };
 
-    users.extraGroups.nylon.gid = config.ids.gids.nylon;
+    users.groups.nylon.gid = config.ids.gids.nylon;
 
     systemd.services = fold (a: b: a // b) {} nylonUnits;
 
diff --git a/nixos/modules/services/networking/oidentd.nix b/nixos/modules/services/networking/oidentd.nix
index ba7acd879546..8cf34623ab5e 100644
--- a/nixos/modules/services/networking/oidentd.nix
+++ b/nixos/modules/services/networking/oidentd.nix
@@ -32,13 +32,13 @@ with lib;
           optionalString config.networking.enableIPv6 " -a ::";
     };
 
-    users.extraUsers.oidentd = {
+    users.users.oidentd = {
       description = "Ident Protocol daemon user";
       group = "oidentd";
       uid = config.ids.uids.oidentd;
     };
 
-    users.extraGroups.oidentd.gid = config.ids.gids.oidentd;
+    users.groups.oidentd.gid = config.ids.gids.oidentd;
 
   };
 
diff --git a/nixos/modules/services/networking/openntpd.nix b/nixos/modules/services/networking/openntpd.nix
index 4bb9da54fe09..57638ebc9c01 100644
--- a/nixos/modules/services/networking/openntpd.nix
+++ b/nixos/modules/services/networking/openntpd.nix
@@ -7,7 +7,7 @@ let
 
   package = pkgs.openntpd_nixos;
 
-  cfgFile = pkgs.writeText "openntpd.conf" ''
+  configFile = ''
     ${concatStringsSep "\n" (map (s: "server ${s}") cfg.servers)}
     ${cfg.extraConfig}
   '';
@@ -31,8 +31,8 @@ in
       type = with types; lines;
       default = "";
       example = ''
-        listen on 127.0.0.1 
-        listen on ::1 
+        listen on 127.0.0.1
+        listen on ::1
       '';
       description = ''
         Additional text appended to <filename>openntpd.conf</filename>.
@@ -57,7 +57,9 @@ in
     # Add ntpctl to the environment for status checking
     environment.systemPackages = [ package ];
 
-    users.extraUsers = singleton {
+    environment.etc."ntpd.conf".text = configFile;
+
+    users.users = singleton {
       name = "ntp";
       uid = config.ids.uids.ntp;
       description = "OpenNTP daemon user";
@@ -71,7 +73,7 @@ in
       before = [ "time-sync.target" ];
       after = [ "dnsmasq.service" "bind.service" "network-online.target" ];
       serviceConfig = {
-        ExecStart = "${package}/sbin/ntpd -f ${cfgFile} -p ${pidFile} ${cfg.extraOptions}";
+        ExecStart = "${package}/sbin/ntpd -p ${pidFile} ${cfg.extraOptions}";
         Type = "forking";
         PIDFile = pidFile;
       };
diff --git a/nixos/modules/services/networking/openvpn.nix b/nixos/modules/services/networking/openvpn.nix
index a418839d22b8..b94b4026fd91 100644
--- a/nixos/modules/services/networking/openvpn.nix
+++ b/nixos/modules/services/networking/openvpn.nix
@@ -131,6 +131,9 @@ in
               Configuration of this OpenVPN instance.  See
               <citerefentry><refentrytitle>openvpn</refentrytitle><manvolnum>8</manvolnum></citerefentry>
               for details.
+
+              To import an external config file, use the following definition:
+              <literal>config = "config /path/to/config.ovpn"</literal>
             '';
           };
 
diff --git a/nixos/modules/services/networking/owamp.nix b/nixos/modules/services/networking/owamp.nix
new file mode 100644
index 000000000000..821a0258f4be
--- /dev/null
+++ b/nixos/modules/services/networking/owamp.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.owamp;
+in
+{
+
+  ###### interface
+
+  options = {
+    services.owamp.enable = mkEnableOption ''Enable OWAMP server'';
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    users.users = singleton {
+      name = "owamp";
+      group = "owamp";
+      description = "Owamp daemon";
+    };
+
+    users.groups = singleton {
+      name = "owamp";
+    };
+
+    systemd.services.owamp = {
+      description = "Owamp server";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart="${pkgs.owamp}/bin/owampd -R /run/owamp -d /run/owamp -v -Z ";
+        PrivateTmp = true;
+        Restart = "always";
+        Type="simple";
+        User = "owamp";
+        Group = "owamp";
+        RuntimeDirectory = "owamp";
+        StateDirectory = "owamp";
+        AmbientCapabilities = "cap_net_bind_service";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/pdns-recursor.nix b/nixos/modules/services/networking/pdns-recursor.nix
index 26be72d2a61e..d07deb9dcc67 100644
--- a/nixos/modules/services/networking/pdns-recursor.nix
+++ b/nixos/modules/services/networking/pdns-recursor.nix
@@ -128,7 +128,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.extraUsers."${username}" = {
+    users.users."${username}" = {
       home = dataDir;
       createHome = true;
       uid = config.ids.uids.pdns-recursor;
diff --git a/nixos/modules/services/networking/pdnsd.nix b/nixos/modules/services/networking/pdnsd.nix
index f4467b818958..f5b174dd7b7b 100644
--- a/nixos/modules/services/networking/pdnsd.nix
+++ b/nixos/modules/services/networking/pdnsd.nix
@@ -62,14 +62,14 @@ in
     };
 
   config = mkIf cfg.enable {
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = pdnsdUser;
       uid = config.ids.uids.pdnsd;
       group = pdnsdGroup;
       description = "pdnsd user";
     };
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = pdnsdGroup;
       gid = config.ids.gids.pdnsd;
     };
diff --git a/nixos/modules/services/networking/polipo.nix b/nixos/modules/services/networking/polipo.nix
index 847fc88ead4c..529115a1c6e1 100644
--- a/nixos/modules/services/networking/polipo.nix
+++ b/nixos/modules/services/networking/polipo.nix
@@ -85,7 +85,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "polipo";
         uid = config.ids.uids.polipo;
         description = "Polipo caching proxy user";
@@ -93,7 +93,7 @@ in
         createHome = true;
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "polipo";
         gid = config.ids.gids.polipo;
         members = [ "polipo" ];
diff --git a/nixos/modules/services/networking/prayer.nix b/nixos/modules/services/networking/prayer.nix
index 8cd4a0823534..f63f86496bee 100644
--- a/nixos/modules/services/networking/prayer.nix
+++ b/nixos/modules/services/networking/prayer.nix
@@ -72,14 +72,14 @@ in
   config = mkIf config.services.prayer.enable {
     environment.systemPackages = [ prayer ];
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = prayerUser;
         uid = config.ids.uids.prayer;
         description = "Prayer daemon user";
         home = stateDir;
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = prayerGroup;
         gid = config.ids.gids.prayer;
       };
diff --git a/nixos/modules/services/networking/prosody.nix b/nixos/modules/services/networking/prosody.nix
index 1b4f81f6b56e..a37ef98caec6 100644
--- a/nixos/modules/services/networking/prosody.nix
+++ b/nixos/modules/services/networking/prosody.nix
@@ -487,7 +487,7 @@ in
         '') cfg.virtualHosts) }
     '';
 
-    users.extraUsers.prosody = mkIf (cfg.user == "prosody") {
+    users.users.prosody = mkIf (cfg.user == "prosody") {
       uid = config.ids.uids.prosody;
       description = "Prosody user";
       createHome = true;
@@ -495,7 +495,7 @@ in
       home = "${cfg.dataDir}";
     };
 
-    users.extraGroups.prosody = mkIf (cfg.group == "prosody") {
+    users.groups.prosody = mkIf (cfg.group == "prosody") {
       gid = config.ids.gids.prosody;
     };
 
diff --git a/nixos/modules/services/networking/quagga.nix b/nixos/modules/services/networking/quagga.nix
index 22204e53203c..5acdd5af8f8f 100644
--- a/nixos/modules/services/networking/quagga.nix
+++ b/nixos/modules/services/networking/quagga.nix
@@ -95,26 +95,25 @@ in
 {
 
   ###### interface
-
-  options.services.quagga =
+  imports = [
     {
-
-      zebra = (serviceOptions "zebra") // {
-
-        enable = mkOption {
-          type = types.bool;
-          default = any isEnabled services;
-          description = ''
-            Whether to enable the Zebra routing manager.
-
-            The Zebra routing manager is automatically enabled
-            if any routing protocols are configured.
-          '';
+      options.services.quagga = {
+        zebra = (serviceOptions "zebra") // {
+          enable = mkOption {
+            type = types.bool;
+            default = any isEnabled services;
+            description = ''
+              Whether to enable the Zebra routing manager.
+
+              The Zebra routing manager is automatically enabled
+              if any routing protocols are configured.
+            '';
+          };
         };
-
       };
-
-    } // (genAttrs services serviceOptions);
+    }
+    { options.services.quagga = (genAttrs services serviceOptions); }
+  ];
 
   ###### implementation
 
diff --git a/nixos/modules/services/networking/quassel.nix b/nixos/modules/services/networking/quassel.nix
index bc7d6912b5ce..d850bb8b1305 100644
--- a/nixos/modules/services/networking/quassel.nix
+++ b/nixos/modules/services/networking/quassel.nix
@@ -72,14 +72,14 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = mkIf (cfg.user == null) [
+    users.users = mkIf (cfg.user == null) [
       { name = "quassel";
         description = "Quassel IRC client daemon";
         group = "quassel";
         uid = config.ids.uids.quassel;
       }];
 
-    users.extraGroups = mkIf (cfg.user == null) [
+    users.groups = mkIf (cfg.user == null) [
       { name = "quassel";
         gid = config.ids.gids.quassel;
       }];
diff --git a/nixos/modules/services/networking/radicale.nix b/nixos/modules/services/networking/radicale.nix
index 97ee05046ff0..2afab5ee3b3e 100644
--- a/nixos/modules/services/networking/radicale.nix
+++ b/nixos/modules/services/networking/radicale.nix
@@ -59,7 +59,7 @@ in
   config = mkIf cfg.enable {
     environment.systemPackages = [ cfg.package ];
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "radicale";
         uid = config.ids.uids.radicale;
         description = "radicale user";
@@ -67,7 +67,7 @@ in
         createHome = true;
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "radicale";
         gid = config.ids.gids.radicale;
       };
diff --git a/nixos/modules/services/networking/radvd.nix b/nixos/modules/services/networking/radvd.nix
index 85d7f9e4a41b..020faa34922a 100644
--- a/nixos/modules/services/networking/radvd.nix
+++ b/nixos/modules/services/networking/radvd.nix
@@ -52,7 +52,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.radvd =
+    users.users.radvd =
       { uid = config.ids.uids.radvd;
         description = "Router Advertisement Daemon User";
       };
diff --git a/nixos/modules/services/networking/rdnssd.nix b/nixos/modules/services/networking/rdnssd.nix
index a102242eae71..887772f6e5f0 100644
--- a/nixos/modules/services/networking/rdnssd.nix
+++ b/nixos/modules/services/networking/rdnssd.nix
@@ -64,7 +64,7 @@ in
       };
     };
 
-    users.extraUsers.rdnssd = {
+    users.users.rdnssd = {
       description = "RDNSSD Daemon User";
       uid = config.ids.uids.rdnssd;
     };
diff --git a/nixos/modules/services/networking/resilio.nix b/nixos/modules/services/networking/resilio.nix
index 2956a5ecbc04..ee7f82ac7bee 100644
--- a/nixos/modules/services/networking/resilio.nix
+++ b/nixos/modules/services/networking/resilio.nix
@@ -236,7 +236,7 @@ in
         }
       ];
 
-    users.extraUsers.rslsync = {
+    users.users.rslsync = {
       description     = "Resilio Sync Service user";
       home            = cfg.storagePath;
       createHome      = true;
@@ -244,7 +244,7 @@ in
       group           = "rslsync";
     };
 
-    users.extraGroups = [ { name = "rslsync"; } ];
+    users.groups = [ { name = "rslsync"; } ];
 
     systemd.services.resilio = with pkgs; {
       description = "Resilio Sync Service";
diff --git a/nixos/modules/services/networking/rpcbind.nix b/nixos/modules/services/networking/rpcbind.nix
index cddcb09054e0..0a5df6987092 100644
--- a/nixos/modules/services/networking/rpcbind.nix
+++ b/nixos/modules/services/networking/rpcbind.nix
@@ -37,7 +37,7 @@ with lib;
       wantedBy = [ "multi-user.target" ];
     };
 
-    users.extraUsers.rpc = {
+    users.users.rpc = {
       group = "nogroup";
       uid = config.ids.uids.rpc;
     };
diff --git a/nixos/modules/services/networking/sabnzbd.nix b/nixos/modules/services/networking/sabnzbd.nix
index cacf753fdcd7..62b24d4377f8 100644
--- a/nixos/modules/services/networking/sabnzbd.nix
+++ b/nixos/modules/services/networking/sabnzbd.nix
@@ -41,7 +41,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.sabnzbd = {
+    users.users.sabnzbd = {
           uid = config.ids.uids.sabnzbd;
           group = "sabnzbd";
           description = "sabnzbd user";
@@ -49,7 +49,7 @@ in
           createHome = true;
     };
 
-    users.extraGroups.sabnzbd = {
+    users.groups.sabnzbd = {
       gid = config.ids.gids.sabnzbd;
     };
 
diff --git a/nixos/modules/services/networking/searx.nix b/nixos/modules/services/networking/searx.nix
index c7a128ae212d..9412d0ef8a62 100644
--- a/nixos/modules/services/networking/searx.nix
+++ b/nixos/modules/services/networking/searx.nix
@@ -47,14 +47,14 @@ in
 
   config = mkIf config.services.searx.enable {
 
-    users.extraUsers.searx =
+    users.users.searx =
       { uid = config.ids.uids.searx;
         description = "Searx user";
         createHome = true;
         home = "/var/lib/searx";
       };
 
-    users.extraGroups.searx =
+    users.groups.searx =
       { gid = config.ids.gids.searx;
       };
 
diff --git a/nixos/modules/services/networking/seeks.nix b/nixos/modules/services/networking/seeks.nix
index f5bc60be3457..40729225b6d0 100644
--- a/nixos/modules/services/networking/seeks.nix
+++ b/nixos/modules/services/networking/seeks.nix
@@ -46,14 +46,14 @@ in
 
   config = mkIf config.services.seeks.enable {
 
-    users.extraUsers.seeks =
+    users.users.seeks =
       { uid = config.ids.uids.seeks;
         description = "Seeks user";
         createHome = true;
         home = "/var/lib/seeks";
       };
 
-    users.extraGroups.seeks =
+    users.groups.seeks =
       { gid = config.ids.gids.seeks;
       };
 
diff --git a/nixos/modules/services/networking/shairport-sync.nix b/nixos/modules/services/networking/shairport-sync.nix
index 908de9efd6fb..0b87140b0d8d 100644
--- a/nixos/modules/services/networking/shairport-sync.nix
+++ b/nixos/modules/services/networking/shairport-sync.nix
@@ -55,7 +55,7 @@ in
     services.avahi.publish.enable = true;
     services.avahi.publish.userServices = true;
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = cfg.user;
         description = "Shairport user";
         isSystemUser = true;
diff --git a/nixos/modules/services/networking/shout.nix b/nixos/modules/services/networking/shout.nix
index 3664c2857739..9784f1d160f3 100644
--- a/nixos/modules/services/networking/shout.nix
+++ b/nixos/modules/services/networking/shout.nix
@@ -82,7 +82,7 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "shout";
       uid = config.ids.uids.shout;
       description = "Shout daemon user";
diff --git a/nixos/modules/services/networking/smokeping.nix b/nixos/modules/services/networking/smokeping.nix
index c5c131cb4c50..9ba6e48f417c 100644
--- a/nixos/modules/services/networking/smokeping.nix
+++ b/nixos/modules/services/networking/smokeping.nix
@@ -278,7 +278,7 @@ in
       "fping6".source = "${pkgs.fping}/bin/fping6";
     };
     environment.systemPackages = [ pkgs.fping ];
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = cfg.user;
       isNormalUser = false;
       isSystemUser = true;
diff --git a/nixos/modules/services/networking/sniproxy.nix b/nixos/modules/services/networking/sniproxy.nix
index 4d0f36923293..0345c12d3afe 100644
--- a/nixos/modules/services/networking/sniproxy.nix
+++ b/nixos/modules/services/networking/sniproxy.nix
@@ -82,14 +82,14 @@ in
       };
     };
 
-    users.extraUsers = mkIf (cfg.user == "sniproxy") {
+    users.users = mkIf (cfg.user == "sniproxy") {
       sniproxy = {
         group = cfg.group;
         uid = config.ids.uids.sniproxy;
       };
     };
 
-    users.extraGroups = mkIf (cfg.group == "sniproxy") {
+    users.groups = mkIf (cfg.group == "sniproxy") {
       sniproxy = {
         gid = config.ids.gids.sniproxy;
       };
diff --git a/nixos/modules/services/networking/spiped.nix b/nixos/modules/services/networking/spiped.nix
index 005d7182351a..e60d9abf42a6 100644
--- a/nixos/modules/services/networking/spiped.nix
+++ b/nixos/modules/services/networking/spiped.nix
@@ -171,8 +171,8 @@ in
       message   = "A pipe must either encrypt or decrypt";
     }) cfg.config;
 
-    users.extraGroups.spiped.gid = config.ids.gids.spiped;
-    users.extraUsers.spiped = {
+    users.groups.spiped.gid = config.ids.gids.spiped;
+    users.users.spiped = {
       description = "Secure Pipe Service user";
       group       = "spiped";
       uid         = config.ids.uids.spiped;
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index aab1203086ce..7b2d1920f0f1 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -49,7 +49,7 @@ let
         ${concatMapStrings (f: readFile f + "\n") u.openssh.authorizedKeys.keyFiles}
       '';
     };
-    usersWithKeys = attrValues (flip filterAttrs config.users.extraUsers (n: u:
+    usersWithKeys = attrValues (flip filterAttrs config.users.users (n: u:
       length u.openssh.authorizedKeys.keys != 0 || length u.openssh.authorizedKeys.keyFiles != 0
     ));
   in listToAttrs (map mkAuthKeyFile usersWithKeys);
@@ -198,6 +198,10 @@ in
           [ { type = "rsa"; bits = 4096; path = "/etc/ssh/ssh_host_rsa_key"; }
             { type = "ed25519"; path = "/etc/ssh/ssh_host_ed25519_key"; }
           ];
+        example =
+          [ { type = "rsa"; bits = 4096; path = "/etc/ssh/ssh_host_rsa_key"; rounds = 100; openSSHFormat = true; }
+            { type = "ed25519"; path = "/etc/ssh/ssh_host_ed25519_key"; rounds = 100; comment = "key comment"; }
+          ];
         description = ''
           NixOS can automatically generate SSH host keys.  This option
           specifies the path, type and size of each key.  See
@@ -272,6 +276,31 @@ in
         '';
       };
 
+      logLevel = mkOption {
+        type = types.enum [ "QUIET" "FATAL" "ERROR" "INFO" "VERBOSE" "DEBUG" "DEBUG1" "DEBUG2" "DEBUG3" ];
+        default = "VERBOSE";
+        description = ''
+          Gives the verbosity level that is used when logging messages from sshd(8). The possible values are:
+          QUIET, FATAL, ERROR, INFO, VERBOSE, DEBUG, DEBUG1, DEBUG2, and DEBUG3. The default is VERBOSE. DEBUG and DEBUG1
+          are equivalent. DEBUG2 and DEBUG3 each specify higher levels of debugging output. Logging with a DEBUG level
+          violates the privacy of users and is not recommended.
+
+          LogLevel VERBOSE logs user's key fingerprint on login.
+          Needed to have a clear audit track of which key was used to log in.
+        '';
+      };
+
+      useDns = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Specifies whether sshd(8) should look up the remote host name, and to check that the resolved host name for
+          the remote IP address maps back to the very same IP address.
+          If this option is set to no (the default) then only addresses and not host names may be used in
+          ~/.ssh/authorized_keys from and sshd_config Match Host directives.
+        '';
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
@@ -301,7 +330,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.sshd =
+    users.users.sshd =
       { isSystemUser = true;
         description = "SSH privilege separation user";
       };
@@ -309,7 +338,9 @@ in
     services.openssh.moduliFile = mkDefault "${cfgc.package}/etc/ssh/moduli";
 
     environment.etc = authKeysFiles //
-      { "ssh/moduli".source = cfg.moduliFile; };
+      { "ssh/moduli".source = cfg.moduliFile;
+        "ssh/sshd_config".text = cfg.extraConfig;
+      };
 
     systemd =
       let
@@ -331,7 +362,14 @@ in
 
                 ${flip concatMapStrings cfg.hostKeys (k: ''
                   if ! [ -f "${k.path}" ]; then
-                      ssh-keygen -t "${k.type}" ${if k ? bits then "-b ${toString k.bits}" else ""} -f "${k.path}" -N ""
+                      ssh-keygen \
+                        -t "${k.type}" \
+                        ${if k ? bits then "-b ${toString k.bits}" else ""} \
+                        ${if k ? rounds then "-a ${toString k.rounds}" else ""} \
+                        ${if k ? comment then "-C '${k.comment}'" else ""} \
+                        ${if k ? openSSHFormat && k.openSSHFormat then "-o" else ""} \
+                        -f "${k.path}" \
+                        -N ""
                   fi
                 '')}
               '';
@@ -340,7 +378,7 @@ in
               { ExecStart =
                   (optionalString cfg.startWhenNeeded "-") +
                   "${cfgc.package}/bin/sshd " + (optionalString cfg.startWhenNeeded "-i ") +
-                  "-f ${pkgs.writeText "sshd_config" cfg.extraConfig}";
+                  "-f /etc/ssh/sshd_config";
                 KillMode = "process";
               } // (if cfg.startWhenNeeded then {
                 StandardInput = "socket";
@@ -377,6 +415,9 @@ in
         unixAuth = cfg.passwordAuthentication;
       };
 
+    # These values are merged with the ones defined externally, see:
+    # https://github.com/NixOS/nixpkgs/pull/10155
+    # https://github.com/NixOS/nixpkgs/pull/41745
     services.openssh.authorizedKeysFiles =
       [ ".ssh/authorized_keys" ".ssh/authorized_keys2" "/etc/ssh/authorized_keys.d/%u" ];
 
@@ -426,9 +467,14 @@ in
         Ciphers ${concatStringsSep "," cfg.ciphers}
         MACs ${concatStringsSep "," cfg.macs}
 
-        # LogLevel VERBOSE logs user's key fingerprint on login.
-        # Needed to have a clear audit track of which key was used to log in.
-        LogLevel VERBOSE
+        LogLevel ${cfg.logLevel}
+
+        ${if cfg.useDns then ''
+          UseDNS yes
+        '' else ''
+          UseDNS no
+        ''}
+
       '';
 
     assertions = [{ assertion = if cfg.forwardX11 then cfgc.setXAuthLocation else true;
diff --git a/nixos/modules/services/networking/sslh.nix b/nixos/modules/services/networking/sslh.nix
index e3d65c49fbf2..0222e8ce8b58 100644
--- a/nixos/modules/services/networking/sslh.nix
+++ b/nixos/modules/services/networking/sslh.nix
@@ -4,15 +4,14 @@ with lib;
 
 let
   cfg = config.services.sslh;
+  user = "sslh";
   configFile = pkgs.writeText "sslh.conf" ''
     verbose: ${boolToString cfg.verbose};
     foreground: true;
     inetd: false;
     numeric: false;
-    transparent: false;
+    transparent: ${boolToString cfg.transparent};
     timeout: "${toString cfg.timeout}";
-    user: "nobody";
-    pidfile: "${cfg.pidfile}";
 
     listen:
     (
@@ -50,16 +49,16 @@ in
         description = "Timeout in seconds.";
       };
 
-      pidfile = mkOption {
-        type = types.path;
-        default = "/run/sslh.pid";
-        description = "PID file path for sslh daemon.";
+      transparent = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Will the services behind sslh (Apache, sshd and so on) see the external IP and ports as if the external world connected directly to them";
       };
 
       listenAddress = mkOption {
         type = types.str;
-        default = config.networking.hostName;
-        description = "Listening hostname.";
+        default = "0.0.0.0";
+        description = "Listening address or hostname.";
       };
 
       port = mkOption {
@@ -76,14 +75,91 @@ in
     };
   };
 
-  config = mkIf cfg.enable {
-    systemd.services.sslh = {
-      description = "Applicative Protocol Multiplexer (e.g. share SSH and HTTPS on the same port)";
-      after = [ "network.target" ];
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig.ExecStart = "${pkgs.sslh}/bin/sslh -F${configFile}";
-      serviceConfig.KillMode = "process";
-      serviceConfig.PIDFile = "${cfg.pidfile}";
-    };
-  };
+  config = mkMerge [
+    (mkIf cfg.enable {
+      users.users.${user} = {
+        description = "sslh daemon user";
+        isSystemUser = true;
+      };
+
+      systemd.services.sslh = {
+        description = "Applicative Protocol Multiplexer (e.g. share SSH and HTTPS on the same port)";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          User                 = user;
+          Group                = "nogroup";
+          PermissionsStartOnly = true;
+          Restart              = "always";
+          RestartSec           = "1s";
+          ExecStart            = "${pkgs.sslh}/bin/sslh -F${configFile}";
+          KillMode             = "process";
+          AmbientCapabilities  = "CAP_NET_BIND_SERVICE CAP_NET_ADMIN CAP_SETGID CAP_SETUID";
+          PrivateTmp           = true;
+          PrivateDevices       = true;
+          ProtectSystem        = "full";
+          ProtectHome          = true;
+        };
+      };
+    })
+
+    # code from https://github.com/yrutschle/sslh#transparent-proxy-support
+    # the only difference is using iptables mark 0x2 instead of 0x1 to avoid conflicts with nixos/nat module
+    (mkIf (cfg.enable && cfg.transparent) {
+      # Set route_localnet = 1 on all interfaces so that ssl can use "localhost" as destination
+      boot.kernel.sysctl."net.ipv4.conf.default.route_localnet" = 1;
+      boot.kernel.sysctl."net.ipv4.conf.all.route_localnet"     = 1;
+
+      systemd.services.sslh = let
+        iptablesCommands = [
+          # DROP martian packets as they would have been if route_localnet was zero
+          # Note: packets not leaving the server aren't affected by this, thus sslh will still work
+          { table = "raw";    command = "PREROUTING  ! -i lo -d 127.0.0.0/8 -j DROP"; }
+          { table = "mangle"; command = "POSTROUTING ! -o lo -s 127.0.0.0/8 -j DROP"; }
+          # Mark all connections made by ssl for special treatment (here sslh is run as user ${user})
+          { table = "nat";    command = "OUTPUT -m owner --uid-owner ${user} -p tcp --tcp-flags FIN,SYN,RST,ACK SYN -j CONNMARK --set-xmark 0x02/0x0f"; }
+          # Outgoing packets that should go to sslh instead have to be rerouted, so mark them accordingly (copying over the connection mark)
+          { table = "mangle"; command = "OUTPUT ! -o lo -p tcp -m connmark --mark 0x02/0x0f -j CONNMARK --restore-mark --mask 0x0f"; }
+        ];
+        ip6tablesCommands = [
+          { table = "raw";    command = "PREROUTING  ! -i lo -d ::1/128     -j DROP"; }
+          { table = "mangle"; command = "POSTROUTING ! -o lo -s ::1/128     -j DROP"; }
+          { table = "nat";    command = "OUTPUT -m owner --uid-owner ${user} -p tcp --tcp-flags FIN,SYN,RST,ACK SYN -j CONNMARK --set-xmark 0x02/0x0f"; }
+          { table = "mangle"; command = "OUTPUT ! -o lo -p tcp -m connmark --mark 0x02/0x0f -j CONNMARK --restore-mark --mask 0x0f"; }
+        ];
+      in {
+        path = [ pkgs.iptables pkgs.iproute pkgs.procps ];
+
+        preStart = ''
+          # Cleanup old iptables entries which might be still there
+          ${concatMapStringsSep "\n" ({table, command}: "while iptables -w -t ${table} -D ${command} 2>/dev/null; do echo; done") iptablesCommands}
+          ${concatMapStringsSep "\n" ({table, command}:       "iptables -w -t ${table} -A ${command}"                           ) iptablesCommands}
+
+          # Configure routing for those marked packets
+          ip rule  add fwmark 0x2 lookup 100
+          ip route add local 0.0.0.0/0 dev lo table 100
+
+        '' + optionalString config.networking.enableIPv6 ''
+          ${concatMapStringsSep "\n" ({table, command}: "while ip6tables -w -t ${table} -D ${command} 2>/dev/null; do echo; done") ip6tablesCommands}
+          ${concatMapStringsSep "\n" ({table, command}:       "ip6tables -w -t ${table} -A ${command}"                           ) ip6tablesCommands}
+
+          ip -6 rule  add fwmark 0x2 lookup 100
+          ip -6 route add local ::/0 dev lo table 100
+        '';
+
+        postStop = ''
+          ${concatMapStringsSep "\n" ({table, command}: "iptables -w -t ${table} -D ${command}") iptablesCommands}
+
+          ip rule  del fwmark 0x2 lookup 100
+          ip route del local 0.0.0.0/0 dev lo table 100
+        '' + optionalString config.networking.enableIPv6 ''
+          ${concatMapStringsSep "\n" ({table, command}: "ip6tables -w -t ${table} -D ${command}") ip6tablesCommands}
+
+          ip -6 rule  del fwmark 0x2 lookup 100
+          ip -6 route del local ::/0 dev lo table 100
+        '';
+      };
+    })
+  ];
 }
diff --git a/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix b/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix
index ad211f41eef0..b16d299917fe 100644
--- a/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix
+++ b/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix
@@ -938,9 +938,12 @@ in {
         protection.
       '';
 
-      hw_offload = mkYesNoParam no ''
+      hw_offload = mkEnumParam ["yes" "no" "auto"] "no" ''
         Enable hardware offload for this CHILD_SA, if supported by the IPsec
-        implementation.
+        implementation. The value <literal>yes</literal> enforces offloading
+        and the installation will fail if it's not supported by either kernel or
+        device. The value <literal>auto</literal> enables offloading, if it's
+        supported, but the installation does not fail otherwise.
       '';
 
       start_action = mkEnumParam ["none" "trap" "start"] "none" ''
diff --git a/nixos/modules/services/networking/supplicant.nix b/nixos/modules/services/networking/supplicant.nix
index dc90a4bcc620..3c4321ab9e9d 100644
--- a/nixos/modules/services/networking/supplicant.nix
+++ b/nixos/modules/services/networking/supplicant.nix
@@ -183,7 +183,7 @@ in
 
       example = literalExample ''
         { "wlan0 wlan1" = {
-            configFile = "/etc/wpa_supplicant";
+            configFile.path = "/etc/wpa_supplicant.conf";
             userControlled.group = "network";
             extraConf = '''
               ap_scan=1
diff --git a/nixos/modules/services/networking/supybot.nix b/nixos/modules/services/networking/supybot.nix
index 2cfb9fc9b923..64eb11068329 100644
--- a/nixos/modules/services/networking/supybot.nix
+++ b/nixos/modules/services/networking/supybot.nix
@@ -45,7 +45,7 @@ in
 
     environment.systemPackages = [ pkgs.pythonPackages.limnoria ];
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "supybot";
       uid = config.ids.uids.supybot;
       group = "supybot";
@@ -54,7 +54,7 @@ in
       createHome = true;
     };
 
-    users.extraGroups.supybot = {
+    users.groups.supybot = {
       name = "supybot";
       gid = config.ids.gids.supybot;
     };
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index e485c073cbdd..c610b3b66606 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -103,7 +103,7 @@ in {
     systemd.packages = [ pkgs.syncthing ];
 
     users = mkIf (cfg.user == defaultUser) {
-      extraUsers."${defaultUser}" =
+      users."${defaultUser}" =
         { group = cfg.group;
           home  = cfg.dataDir;
           createHome = true;
@@ -111,7 +111,7 @@ in {
           description = "Syncthing daemon user";
         };
 
-      extraGroups."${defaultUser}".gid =
+      groups."${defaultUser}".gid =
         config.ids.gids.syncthing;
     };
 
diff --git a/nixos/modules/services/networking/tcpcrypt.nix b/nixos/modules/services/networking/tcpcrypt.nix
index ee005e11aa32..a0ccb9950094 100644
--- a/nixos/modules/services/networking/tcpcrypt.nix
+++ b/nixos/modules/services/networking/tcpcrypt.nix
@@ -29,7 +29,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "tcpcryptd";
       uid = config.ids.uids.tcpcryptd;
       description = "tcpcrypt daemon user";
diff --git a/nixos/modules/services/networking/tinc.nix b/nixos/modules/services/networking/tinc.nix
index e3c9b5282b8c..35cdddc590b8 100644
--- a/nixos/modules/services/networking/tinc.nix
+++ b/nixos/modules/services/networking/tinc.nix
@@ -163,12 +163,7 @@ in
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" ];
         path = [ data.package ];
-        restartTriggers =
-          let
-            drvlist = [ config.environment.etc."tinc/${network}/tinc.conf".source ]
-                        ++ mapAttrsToList (host: _: config.environment.etc."tinc/${network}/hosts/${host}".source) data.hosts;
-          in # drvlist might be too long to be used directly
-            [ (builtins.hashString "sha256" (concatMapStrings (d: d.outPath) drvlist)) ];
+        restartTriggers = [ config.environment.etc."tinc/${network}/tinc.conf".source ];
         serviceConfig = {
           Type = "simple";
           Restart = "always";
@@ -207,13 +202,14 @@ in
           ${concatStringsSep "\n" (mapAttrsToList (network: data:
             optionalString (versionAtLeast data.package.version "1.1pre") ''
               makeWrapper ${data.package}/bin/tinc "$out/bin/tinc.${network}" \
-                --add-flags "--pidfile=/run/tinc.${network}.pid"
+                --add-flags "--pidfile=/run/tinc.${network}.pid" \
+                --add-flags "--config=/etc/tinc/${network}"
             '') cfg.networks)}
         '';
       };
     in [ cli-wrappers ];
 
-    users.extraUsers = flip mapAttrs' cfg.networks (network: _:
+    users.users = flip mapAttrs' cfg.networks (network: _:
       nameValuePair ("tinc.${network}") ({
         description = "Tinc daemon user for ${network}";
         isSystemUser = true;
diff --git a/nixos/modules/services/networking/tinydns.nix b/nixos/modules/services/networking/tinydns.nix
index 184888ef05da..7d5db71601ef 100644
--- a/nixos/modules/services/networking/tinydns.nix
+++ b/nixos/modules/services/networking/tinydns.nix
@@ -32,7 +32,7 @@ with lib;
   config = mkIf config.services.tinydns.enable {
     environment.systemPackages = [ pkgs.djbdns ];
 
-    users.extraUsers.tinydns = {};
+    users.users.tinydns = {};
 
     systemd.services.tinydns = {
       description = "djbdns tinydns server";
diff --git a/nixos/modules/services/networking/tox-bootstrapd.nix b/nixos/modules/services/networking/tox-bootstrapd.nix
index cb0e6b158651..1d3492151690 100644
--- a/nixos/modules/services/networking/tox-bootstrapd.nix
+++ b/nixos/modules/services/networking/tox-bootstrapd.nix
@@ -56,7 +56,7 @@ in
 
   config = mkIf config.services.toxBootstrapd.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "tox-bootstrapd";
         uid = config.ids.uids.tox-bootstrapd;
         description = "Tox bootstrap daemon user";
diff --git a/nixos/modules/services/networking/toxvpn.nix b/nixos/modules/services/networking/toxvpn.nix
index 5e13402d7645..f5baea9222be 100644
--- a/nixos/modules/services/networking/toxvpn.nix
+++ b/nixos/modules/services/networking/toxvpn.nix
@@ -57,7 +57,7 @@ with lib;
 
     environment.systemPackages = [ pkgs.toxvpn ];
 
-    users.extraUsers = {
+    users.users = {
       toxvpn = {
         uid        = config.ids.uids.toxvpn;
         home       = "/var/lib/toxvpn";
diff --git a/nixos/modules/services/networking/tvheadend.nix b/nixos/modules/services/networking/tvheadend.nix
index cdd8747ba898..f495c39967e8 100644
--- a/nixos/modules/services/networking/tvheadend.nix
+++ b/nixos/modules/services/networking/tvheadend.nix
@@ -3,7 +3,7 @@
 with lib;
 
 let cfg     = config.services.tvheadend;
-    pidFile = "${config.users.extraUsers.tvheadend.home}/tvheadend.pid";
+    pidFile = "${config.users.users.tvheadend.home}/tvheadend.pid";
 in
 
 {
@@ -25,7 +25,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers.tvheadend = {
+    users.users.tvheadend = {
       description = "Tvheadend Service user";
       home        = "/var/lib/tvheadend";
       createHome  = true;
diff --git a/nixos/modules/services/networking/unbound.nix b/nixos/modules/services/networking/unbound.nix
index f069a9883a7f..1a35979ad44c 100644
--- a/nixos/modules/services/networking/unbound.nix
+++ b/nixos/modules/services/networking/unbound.nix
@@ -60,7 +60,7 @@ in
       };
 
       interfaces = mkOption {
-        default = [ "127.0.0.1" "::1" ];
+        default = [ "127.0.0.1" ] ++ optional config.networking.enableIPv6 "::1";
         type = types.listOf types.str;
         description = "What addresses the server should listen on.";
       };
@@ -112,8 +112,8 @@ in
         mkdir -m 0755 -p ${stateDir}/dev/
         cp ${confFile} ${stateDir}/unbound.conf
         ${optionalString cfg.enableRootTrustAnchor ''
-        ${pkgs.unbound}/bin/unbound-anchor -a ${rootTrustAnchorFile} || echo "Root anchor updated!"
-        chown unbound ${stateDir} ${rootTrustAnchorFile}
+          ${pkgs.unbound}/bin/unbound-anchor -a ${rootTrustAnchorFile} || echo "Root anchor updated!"
+          chown unbound ${stateDir} ${rootTrustAnchorFile}
         ''}
         touch ${stateDir}/dev/random
         ${pkgs.utillinux}/bin/mount --bind -n /dev/urandom ${stateDir}/dev/random
@@ -126,9 +126,14 @@ in
         ProtectSystem = true;
         ProtectHome = true;
         PrivateDevices = true;
+        Restart = "always";
+        RestartSec = "5s";
       };
     };
 
+    # If networkmanager is enabled, ask it to interface with unbound.
+    networking.networkmanager.dns = "unbound";
+
   };
 
 }
diff --git a/nixos/modules/services/networking/unifi.nix b/nixos/modules/services/networking/unifi.nix
index 94958bfdd83e..ac10e77ba306 100644
--- a/nixos/modules/services/networking/unifi.nix
+++ b/nixos/modules/services/networking/unifi.nix
@@ -114,7 +114,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.extraUsers.unifi = {
+    users.users.unifi = {
       uid = config.ids.uids.unifi;
       description = "UniFi controller daemon user";
       home = "${stateDir}";
diff --git a/nixos/modules/services/networking/vsftpd.nix b/nixos/modules/services/networking/vsftpd.nix
index 6b3d658bd852..1f9107c3ce9c 100644
--- a/nixos/modules/services/networking/vsftpd.nix
+++ b/nixos/modules/services/networking/vsftpd.nix
@@ -193,7 +193,7 @@ in
         message = "vsftpd: If forceLocalLoginsSSL or forceLocalDataSSL is true then a rsaCertFile must be provided!";
       };
 
-    users.extraUsers =
+    users.users =
       [ { name = "vsftpd";
           uid = config.ids.uids.vsftpd;
           description = "VSFTPD user";
@@ -207,7 +207,7 @@ in
           home = cfg.anonymousUserHome;
         };
 
-    users.extraGroups.ftp.gid = config.ids.gids.ftp;
+    users.groups.ftp.gid = config.ids.gids.ftp;
 
     # If you really have to access root via FTP use mkOverride or userlistDeny
     # = false and whitelist root
diff --git a/nixos/modules/services/networking/wireguard.nix b/nixos/modules/services/networking/wireguard.nix
index 0591917c7423..acb4778d8485 100644
--- a/nixos/modules/services/networking/wireguard.nix
+++ b/nixos/modules/services/networking/wireguard.nix
@@ -193,7 +193,7 @@ let
         after = [ "network.target" ];
         wantedBy = [ "multi-user.target" ];
         environment.DEVICE = name;
-        path = with pkgs; [ kmod iproute wireguard ];
+        path = with pkgs; [ kmod iproute wireguard-tools ];
 
         serviceConfig = {
           Type = "oneshot";
@@ -279,7 +279,7 @@ in
   config = mkIf (cfg.interfaces != {}) {
 
     boot.extraModulePackages = [ kernel.wireguard ];
-    environment.systemPackages = [ pkgs.wireguard ];
+    environment.systemPackages = [ pkgs.wireguard-tools ];
 
     systemd.services = mapAttrs' generateUnit cfg.interfaces;
 
diff --git a/nixos/modules/services/networking/xrdp.nix b/nixos/modules/services/networking/xrdp.nix
index bf23c6ae6192..0e882873b4ba 100644
--- a/nixos/modules/services/networking/xrdp.nix
+++ b/nixos/modules/services/networking/xrdp.nix
@@ -97,6 +97,7 @@ in
     # xrdp can run X11 program even if "services.xserver.enable = false"
     environment.pathsToLink =
       [ "/etc/xdg" "/share/xdg" "/share/applications" "/share/icons" "/share/pixmaps" ];
+    fonts.enableDefaultFonts = mkDefault true;
 
     systemd = {
       services.xrdp = {
diff --git a/nixos/modules/services/networking/zerotierone.nix b/nixos/modules/services/networking/zerotierone.nix
index cd1617b8e2ba..4c1ee75d536c 100644
--- a/nixos/modules/services/networking/zerotierone.nix
+++ b/nixos/modules/services/networking/zerotierone.nix
@@ -47,7 +47,7 @@ in
     };
 
     # ZeroTier does not issue DHCP leases, but some strangers might...
-    networking.dhcpcd.denyInterfaces = [ "zt0" ];
+    networking.dhcpcd.denyInterfaces = [ "zt*" ];
 
     # ZeroTier receives UDP transmissions on port 9993 by default
     networking.firewall.allowedUDPPorts = [ 9993 ];
diff --git a/nixos/modules/services/networking/znc.nix b/nixos/modules/services/networking/znc.nix
index 72313ab2ee14..f817db2ad000 100644
--- a/nixos/modules/services/networking/znc.nix
+++ b/nixos/modules/services/networking/znc.nix
@@ -26,7 +26,6 @@ let
   };
 
   # Keep znc.conf in nix store, then symlink or copy into `dataDir`, depending on `mutable`.
-  notNull = a: ! isNull a;
   mkZncConf = confOpts: ''
     Version = 1.6.3
     ${concatMapStrings (n: "LoadModule = ${n}\n") confOpts.modules}
@@ -36,6 +35,7 @@ let
             IPv4 = true
             IPv6 = true
             SSL = ${boolToString confOpts.useSSL}
+            ${lib.optionalString (confOpts.uriPrefix != null) "URIPrefix = ${confOpts.uriPrefix}"}
     </Listener>
 
     <User ${confOpts.userName}>
@@ -310,6 +310,16 @@ in
           '';
         };
 
+        uriPrefix = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          example = "/znc/";
+          description = ''
+            An optional URI prefix for the ZNC web interface. Can be
+            used to make ZNC available behind a reverse proxy.
+          '';
+        };
+
         extraZncConf = mkOption {
           default = "";
           type = types.lines;
@@ -402,7 +412,7 @@ in
       script = "${pkgs.znc}/bin/znc --foreground --datadir ${cfg.dataDir} ${toString cfg.extraFlags}";
     };
 
-    users.extraUsers = optional (cfg.user == defaultUser)
+    users.users = optional (cfg.user == defaultUser)
       { name = defaultUser;
         description = "ZNC server daemon owner";
         group = defaultUser;
@@ -411,7 +421,7 @@ in
         createHome = true;
       };
 
-    users.extraGroups = optional (cfg.user == defaultUser)
+    users.groups = optional (cfg.user == defaultUser)
       { name = defaultUser;
         gid = config.ids.gids.znc;
         members = [ defaultUser ];
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index c4147986439c..dbf18ec1d114 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -124,6 +124,16 @@ in
         '';
       };
 
+      startWhenNeeded = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          If set, CUPS is socket-activated; that is,
+          instead of having it permanently running as a daemon,
+          systemd will start it on the first incoming connection.
+        '';
+      };
+
       listenAddresses = mkOption {
         type = types.listOf types.str;
         default = [ "localhost:631" ];
@@ -268,7 +278,7 @@ in
 
   config = mkIf config.services.printing.enable {
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "cups";
         uid = config.ids.uids.cups;
         group = "lp";
@@ -287,8 +297,13 @@ in
 
     systemd.packages = [ cups.out ];
 
+    systemd.sockets.cups = mkIf cfg.startWhenNeeded {
+      wantedBy = [ "sockets.target" ];
+      listenStreams = map (x: replaceStrings ["localhost"] ["127.0.0.1"] (removePrefix "*:" x)) cfg.listenAddresses;
+    };
+
     systemd.services.cups =
-      { wantedBy = [ "multi-user.target" ];
+      { wantedBy = optionals (!cfg.startWhenNeeded) [ "multi-user.target" ];
         wants = [ "network.target" ];
         after = [ "network.target" ];
 
diff --git a/nixos/modules/services/scheduling/atd.nix b/nixos/modules/services/scheduling/atd.nix
index 77a3f6b51e80..a32907647a0d 100644
--- a/nixos/modules/services/scheduling/atd.nix
+++ b/nixos/modules/services/scheduling/atd.nix
@@ -57,14 +57,14 @@ in
 
     security.pam.services.atd = {};
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "atd";
         uid = config.ids.uids.atd;
         description = "atd user";
         home = "/var/empty";
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "atd";
         gid = config.ids.gids.atd;
       };
diff --git a/nixos/modules/services/scheduling/chronos.nix b/nixos/modules/services/scheduling/chronos.nix
index 6c39997fec88..9a8ed4c09ac1 100644
--- a/nixos/modules/services/scheduling/chronos.nix
+++ b/nixos/modules/services/scheduling/chronos.nix
@@ -49,6 +49,6 @@ in {
       };
     };
 
-    users.extraUsers.chronos.uid = config.ids.uids.chronos;
+    users.users.chronos.uid = config.ids.uids.chronos;
   };
 }
diff --git a/nixos/modules/services/scheduling/fcron.nix b/nixos/modules/services/scheduling/fcron.nix
index e3b6b638f5a7..ae3828977753 100644
--- a/nixos/modules/services/scheduling/fcron.nix
+++ b/nixos/modules/services/scheduling/fcron.nix
@@ -115,7 +115,7 @@ in
       ];
 
     environment.systemPackages = [ pkgs.fcron ];
-    users.extraUsers.fcron = {
+    users.users.fcron = {
       uid = config.ids.uids.fcron;
       home = "/var/spool/fcron";
       group = "fcron";
@@ -128,6 +128,7 @@ in
         owner = "fcron";
         group = "fcron";
         setgid = true;
+        setuid = true;
       };
       fcrondyn = {
         source = "${pkgs.fcron}/bin/fcrondyn";
diff --git a/nixos/modules/services/scheduling/marathon.nix b/nixos/modules/services/scheduling/marathon.nix
index 19c9a708f21f..0961a67770e1 100644
--- a/nixos/modules/services/scheduling/marathon.nix
+++ b/nixos/modules/services/scheduling/marathon.nix
@@ -93,6 +93,6 @@ in {
       };
     };
 
-    users.extraUsers.${cfg.user} = { };
+    users.users.${cfg.user} = { };
   };
 }
diff --git a/nixos/modules/services/search/hound.nix b/nixos/modules/services/search/hound.nix
index a94a851e80ec..6740928db9a7 100644
--- a/nixos/modules/services/search/hound.nix
+++ b/nixos/modules/services/search/hound.nix
@@ -88,12 +88,12 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.extraGroups = optional (cfg.group == "hound") {
+    users.groups = optional (cfg.group == "hound") {
       name = "hound";
       gid = config.ids.gids.hound;
     };
 
-    users.extraUsers = optional (cfg.user == "hound") {
+    users.users = optional (cfg.user == "hound") {
       name = "hound";
       description = "hound code search";
       createHome = true;
diff --git a/nixos/modules/services/search/kibana.nix b/nixos/modules/services/search/kibana.nix
index 9d7d2d799189..5885a72c6628 100644
--- a/nixos/modules/services/search/kibana.nix
+++ b/nixos/modules/services/search/kibana.nix
@@ -193,7 +193,7 @@ in {
 
     environment.systemPackages = [ cfg.package ];
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = "kibana";
       uid = config.ids.uids.kibana;
       description = "Kibana service user";
diff --git a/nixos/modules/services/security/clamav.nix b/nixos/modules/services/security/clamav.nix
index 4161c61ed375..9ad0095252de 100644
--- a/nixos/modules/services/security/clamav.nix
+++ b/nixos/modules/services/security/clamav.nix
@@ -79,7 +79,7 @@ in
   config = mkIf (cfg.updater.enable || cfg.daemon.enable) {
     environment.systemPackages = [ pkg ];
 
-    users.extraUsers = singleton {
+    users.users = singleton {
       name = clamavUser;
       uid = config.ids.uids.clamav;
       group = clamavGroup;
@@ -87,7 +87,7 @@ in
       home = stateDir;
     };
 
-    users.extraGroups = singleton {
+    users.groups = singleton {
       name = clamavGroup;
       gid = config.ids.gids.clamav;
     };
diff --git a/nixos/modules/services/security/fprot.nix b/nixos/modules/services/security/fprot.nix
index a12aa01503e3..b1ca4ab23452 100644
--- a/nixos/modules/services/security/fprot.nix
+++ b/nixos/modules/services/security/fprot.nix
@@ -53,14 +53,14 @@ in {
       target = "f-prot.conf";
     };
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = fprotUser;
         uid = config.ids.uids.fprot;
         description = "F-Prot daemon user";
         home = stateDir;
       };
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = fprotGroup;
         gid = config.ids.gids.fprot;
       };
diff --git a/nixos/modules/services/security/munge.nix b/nixos/modules/services/security/munge.nix
index 919c2c2b0e15..5bca15833544 100644
--- a/nixos/modules/services/security/munge.nix
+++ b/nixos/modules/services/security/munge.nix
@@ -35,7 +35,15 @@ in
 
     environment.systemPackages = [ pkgs.munge ];
 
-    systemd.services.munged = { 
+    users.users.munge = {
+      description   = "Munge daemon user";
+      isSystemUser  = true;
+      group         = "munge";
+    };
+
+    users.groups.munge = {};
+
+    systemd.services.munged = {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
 
@@ -44,14 +52,20 @@ in
       preStart = ''
         chmod 0700 ${cfg.password}
         mkdir -p /var/lib/munge -m 0711
+        chown -R munge:munge /var/lib/munge
         mkdir -p /var/log/munge -m 0700
+        chown -R munge:munge /var/log/munge
         mkdir -p /run/munge -m 0755
+        chown -R munge:munge /run/munge
       '';
 
       serviceConfig = {
         ExecStart = "${pkgs.munge}/bin/munged --syslog --key-file ${cfg.password}";
         PIDFile = "/run/munge/munged.pid";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        PermissionsStartOnly = "true";
+        User = "munge";
+        Group = "munge";
       };
 
     };
diff --git a/nixos/modules/services/security/oauth2_proxy.nix b/nixos/modules/services/security/oauth2_proxy.nix
index 433d97c2a7d7..0c5fe8c0ef5f 100644
--- a/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixos/modules/services/security/oauth2_proxy.nix
@@ -72,6 +72,7 @@ let
 
   mapConfig = key: attr:
   if (!isNull attr && attr != []) then (
+    if isDerivation attr then mapConfig key (toString attr) else
     if (builtins.typeOf attr) == "set" then concatStringsSep " "
       (mapAttrsToList (name: value: mapConfig (key + "-" + name) value) attr) else
     if (builtins.typeOf attr) == "list" then concatMapStringsSep " " (mapConfig key) attr else
@@ -543,7 +544,7 @@ in
       cookie.secret = mkDefault null;
     };
 
-    users.extraUsers.oauth2_proxy = {
+    users.users.oauth2_proxy = {
       description = "OAuth2 Proxy";
     };
 
diff --git a/nixos/modules/services/security/oauth2_proxy_nginx.nix b/nixos/modules/services/security/oauth2_proxy_nginx.nix
new file mode 100644
index 000000000000..2aa2c57fd22c
--- /dev/null
+++ b/nixos/modules/services/security/oauth2_proxy_nginx.nix
@@ -0,0 +1,64 @@
+{ pkgs, config, lib, ... }:
+with lib;
+let
+  cfg = config.services.oauth2_proxy.nginx;
+in
+{
+  options.services.oauth2_proxy.nginx = {
+    proxy = mkOption {
+      type = types.string;
+      default = config.services.oauth2_proxy.httpAddress;
+      description = ''
+        The address of the reverse proxy endpoint for oauth2_proxy
+      '';
+    };
+    virtualHosts = mkOption {
+      type = types.listOf types.string;
+      default = [];
+      description = ''
+        A list of nginx virtual hosts to put behind the oauth2 proxy
+      '';
+    };
+  };
+  config.services.oauth2_proxy = mkIf (cfg.virtualHosts != [] && (hasPrefix "127.0.0.1:" cfg.proxy)) {
+    enable = true;
+  };
+  config.services.nginx = mkMerge ((optional (cfg.virtualHosts != []) {
+    recommendedProxySettings = true; # needed because duplicate headers
+  }) ++ (map (vhost: {
+    virtualHosts.${vhost} = {
+      locations."/oauth2/" = {
+        proxyPass = cfg.proxy;
+        extraConfig = ''
+          proxy_set_header X-Scheme                $scheme;
+          proxy_set_header X-Auth-Request-Redirect $request_uri;
+        '';
+      };
+      locations."/oauth2/auth" = {
+        proxyPass = cfg.proxy;
+        extraConfig = ''
+          proxy_set_header X-Scheme         $scheme;
+          # nginx auth_request includes headers but not body
+          proxy_set_header Content-Length   "";
+          proxy_pass_request_body           off;
+        '';
+      };
+      locations."/".extraConfig = ''
+        auth_request /oauth2/auth;
+        error_page 401 = /oauth2/sign_in;
+
+        # pass information via X-User and X-Email headers to backend,
+        # requires running with --set-xauthrequest flag
+        auth_request_set $user   $upstream_http_x_auth_request_user;
+        auth_request_set $email  $upstream_http_x_auth_request_email;
+        proxy_set_header X-User  $user;
+        proxy_set_header X-Email $email;
+
+        # if you enabled --cookie-refresh, this is needed for it to work with auth_request
+        auth_request_set $auth_cookie $upstream_http_set_cookie;
+        add_header Set-Cookie $auth_cookie;
+      '';
+
+    };
+  }) cfg.virtualHosts));
+}
diff --git a/nixos/modules/services/security/tor.nix b/nixos/modules/services/security/tor.nix
index 806252f49b8d..dcb41d187c2b 100644
--- a/nixos/modules/services/security/tor.nix
+++ b/nixos/modules/services/security/tor.nix
@@ -39,7 +39,7 @@ let
     ''}
 
     ${optint "ControlPort" cfg.controlPort}
-    ${optionalString cfg.controlSocket.enable "ControlSocket ${torRunDirectory}/control GroupWritable RelaxDirModeCheck"}
+    ${optionalString cfg.controlSocket.enable "ControlPort unix:${torRunDirectory}/control GroupWritable RelaxDirModeCheck"}
   ''
   # Client connection config
   + optionalString cfg.client.enable ''
@@ -360,7 +360,7 @@ in
 
                 <important>
                   <para>
-                    WARNING: THE FOLLOWING PARAGRAPH IS NOT LEGAL ADVISE.
+                    WARNING: THE FOLLOWING PARAGRAPH IS NOT LEGAL ADVICE.
                     Consult with your lawer when in doubt.
                   </para>
 
@@ -686,8 +686,8 @@ in
         always create a container/VM with a separate Tor daemon instance.
       '';
 
-    users.extraGroups.tor.gid = config.ids.gids.tor;
-    users.extraUsers.tor =
+    users.groups.tor.gid = config.ids.gids.tor;
+    users.users.tor =
       { description = "Tor Daemon User";
         createHome  = true;
         home        = torDirectory;
@@ -695,19 +695,38 @@ in
         uid         = config.ids.uids.tor;
       };
 
+    # We have to do this instead of using RuntimeDirectory option in
+    # the service below because systemd has no way to set owners of
+    # RuntimeDirectory and putting this into the service below
+    # requires that service to relax it's sandbox since this needs
+    # writable /run
+    systemd.services.tor-init =
+      { description = "Tor Daemon Init";
+        wantedBy = [ "tor.service" ];
+        after = [ "local-fs.target" ];
+        script = ''
+          install -m 0700 -o tor -g tor -d ${torDirectory} ${torDirectory}/onion
+          install -m 0750 -o tor -g tor -d ${torRunDirectory}
+        '';
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+      };
+
     systemd.services.tor =
       { description = "Tor Daemon";
         path = [ pkgs.tor ];
 
         wantedBy = [ "multi-user.target" ];
-        after    = [ "network.target" ];
+        after    = [ "tor-init.service" "network.target" ];
         restartTriggers = [ torRcFile ];
 
         serviceConfig =
           { Type         = "simple";
             # Translated from the upstream contrib/dist/tor.service.in
             ExecStartPre = "${pkgs.tor}/bin/tor -f ${torRcFile} --verify-config";
-            ExecStart    = "${pkgs.tor}/bin/tor -f ${torRcFile} --RunAsDaemon 0";
+            ExecStart    = "${pkgs.tor}/bin/tor -f ${torRcFile}";
             ExecReload   = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
             KillSignal   = "SIGINT";
             TimeoutSec   = 30;
@@ -715,20 +734,18 @@ in
             LimitNOFILE  = 32768;
 
             # Hardening
-            # Note: DevicePolicy is set to 'closed', although the
-            # minimal permissions are really:
-            #   DeviceAllow /dev/null rw
-            #   DeviceAllow /dev/urandom r
-            # .. but we can't specify DeviceAllow multiple times. 'closed'
-            # is close enough.
-            RuntimeDirectory        = "tor";
-            StateDirectory          = [ "tor" "tor/onion" ];
-            PrivateTmp              = "yes";
-            DevicePolicy            = "closed";
-            InaccessibleDirectories = "/home";
-            ReadOnlyDirectories     = "/";
-            ReadWriteDirectories    = [torDirectory torRunDirectory];
+            # this seems to unshare /run despite what systemd.exec(5) says
+            PrivateTmp              = mkIf (!cfg.controlSocket.enable) "yes";
+            PrivateDevices          = "yes";
+            ProtectHome             = "yes";
+            ProtectSystem           = "strict";
+            InaccessiblePaths       = "/home";
+            ReadOnlyPaths           = "/";
+            ReadWritePaths          = [ torDirectory torRunDirectory ];
             NoNewPrivileges         = "yes";
+
+            # tor.service.in has this in, but this line it fails to spawn a namespace when using hidden services
+            #CapabilityBoundingSet   = "CAP_SETUID CAP_SETGID CAP_NET_BIND_SERVICE";
           };
       };
 
diff --git a/nixos/modules/services/security/vault.nix b/nixos/modules/services/security/vault.nix
index 146afec344ab..47c70cf0687b 100644
--- a/nixos/modules/services/security/vault.nix
+++ b/nixos/modules/services/security/vault.nix
@@ -97,13 +97,13 @@ in
       }
     ];
 
-    users.extraUsers.vault = {
+    users.users.vault = {
       name = "vault";
       group = "vault";
       uid = config.ids.uids.vault;
       description = "Vault daemon user";
     };
-    users.extraGroups.vault.gid = config.ids.gids.vault;
+    users.groups.vault.gid = config.ids.gids.vault;
 
     systemd.services.vault = {
       description = "Vault server daemon";
diff --git a/nixos/modules/services/system/dbus.nix b/nixos/modules/services/system/dbus.nix
index 643bec188142..e04580218442 100644
--- a/nixos/modules/services/system/dbus.nix
+++ b/nixos/modules/services/system/dbus.nix
@@ -71,14 +71,14 @@ in
         target = "dbus-1";
       };
 
-    users.extraUsers.messagebus = {
+    users.users.messagebus = {
       uid = config.ids.uids.messagebus;
       description = "D-Bus system message bus daemon user";
       home = homeDir;
       group = "messagebus";
     };
 
-    users.extraGroups.messagebus.gid = config.ids.gids.messagebus;
+    users.groups.messagebus.gid = config.ids.gids.messagebus;
 
     systemd.packages = [ pkgs.dbus.daemon ];
 
@@ -100,6 +100,7 @@ in
       # Don't restart dbus-daemon. Bad things tend to happen if we do.
       reloadIfChanged = true;
       restartTriggers = [ configDir ];
+      environment = { LD_LIBRARY_PATH = config.system.nssModules.path; };
     };
 
     systemd.user = {
diff --git a/nixos/modules/services/system/kerberos.nix b/nixos/modules/services/system/kerberos.nix
index 4f2e2fdf662b..d85dee089827 100644
--- a/nixos/modules/services/system/kerberos.nix
+++ b/nixos/modules/services/system/kerberos.nix
@@ -41,8 +41,8 @@ in
         flags = "REUSE NAMEINARGS";
         protocol = "tcp";
         user = "root";
-        server = "${pkgs.tcp_wrappers}/sbin/tcpd";
-        serverArgs = "${pkgs.heimdalFull}/sbin/kadmind";
+        server = "${pkgs.tcp_wrappers}/bin/tcpd";
+        serverArgs = "${pkgs.heimdalFull}/bin/kadmind";
       };
 
     systemd.services.kdc = {
@@ -51,13 +51,13 @@ in
       preStart = ''
         mkdir -m 0755 -p ${stateDir}
       '';
-      script = "${heimdalFull}/sbin/kdc";
+      script = "${heimdalFull}/bin/kdc";
     };
 
     systemd.services.kpasswdd = {
       description = "Kerberos Password Changing daemon";
       wantedBy = [ "multi-user.target" ];
-      script = "${heimdalFull}/sbin/kpasswdd";
+      script = "${heimdalFull}/bin/kpasswdd";
     };
   };
 
diff --git a/nixos/modules/services/system/nscd.nix b/nixos/modules/services/system/nscd.nix
index eb4b5281c7c6..11a30ea81ba9 100644
--- a/nixos/modules/services/system/nscd.nix
+++ b/nixos/modules/services/system/nscd.nix
@@ -41,7 +41,7 @@ in
   config = mkIf cfg.enable {
     environment.etc."nscd.conf".text = cfg.config;
 
-    users.extraUsers.nscd =
+    users.users.nscd =
       { isSystemUser = true;
         description = "Name service cache daemon user";
       };
diff --git a/nixos/modules/services/system/saslauthd.nix b/nixos/modules/services/system/saslauthd.nix
index 281716cf1860..c8ddca9a0db6 100644
--- a/nixos/modules/services/system/saslauthd.nix
+++ b/nixos/modules/services/system/saslauthd.nix
@@ -4,7 +4,6 @@ with lib;
 
 let
 
-  nssModulesPath = config.system.nssModules.path;
   cfg = config.services.saslauthd;
 
 in
diff --git a/nixos/modules/services/system/uptimed.nix b/nixos/modules/services/system/uptimed.nix
index b20d60968032..3c9978ab2269 100644
--- a/nixos/modules/services/system/uptimed.nix
+++ b/nixos/modules/services/system/uptimed.nix
@@ -20,7 +20,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers.uptimed = {
+    users.users.uptimed = {
       description = "Uptimed daemon user";
       home        = stateDir;
       createHome  = true;
diff --git a/nixos/modules/services/torrent/deluge.nix b/nixos/modules/services/torrent/deluge.nix
index bff22cd13594..84f0437b9411 100644
--- a/nixos/modules/services/torrent/deluge.nix
+++ b/nixos/modules/services/torrent/deluge.nix
@@ -55,7 +55,7 @@ in {
 
     environment.systemPackages = [ pkgs.deluge ];
 
-    users.extraUsers.deluge = {
+    users.users.deluge = {
       group = "deluge";
       uid = config.ids.uids.deluge;
       home = "/var/lib/deluge/";
@@ -63,6 +63,6 @@ in {
       description = "Deluge Daemon user";
     };
 
-    users.extraGroups.deluge.gid = config.ids.gids.deluge;
+    users.groups.deluge.gid = config.ids.gids.deluge;
   };
 }
diff --git a/nixos/modules/services/torrent/peerflix.nix b/nixos/modules/services/torrent/peerflix.nix
index 2e3dd9902d72..bed6661f84d6 100644
--- a/nixos/modules/services/torrent/peerflix.nix
+++ b/nixos/modules/services/torrent/peerflix.nix
@@ -58,6 +58,6 @@ in {
       };
     };
 
-    users.extraUsers.peerflix.uid = config.ids.uids.peerflix;
+    users.users.peerflix.uid = config.ids.uids.peerflix;
   };
 }
diff --git a/nixos/modules/services/torrent/transmission.nix b/nixos/modules/services/torrent/transmission.nix
index 3564afd77f41..96413d2dd563 100644
--- a/nixos/modules/services/torrent/transmission.nix
+++ b/nixos/modules/services/torrent/transmission.nix
@@ -13,12 +13,6 @@ let
   settingsDir = "${homeDir}/.config/transmission-daemon";
   settingsFile = pkgs.writeText "settings.json" (builtins.toJSON fullSettings);
 
-  # Strings must be quoted, ints and bools must not (for settings.json).
-  toOption = x:
-    if isBool x then boolToString x
-    else if isInt x then toString x
-    else toString ''"${x}"'';
-
   # for users in group "transmission" to have access to torrents
   fullSettings = { umask = 2; download-dir = downloadDir; incomplete-dir = incompleteDir; } // cfg.settings;
 
@@ -113,8 +107,8 @@ in
     # It's useful to have transmission in path, e.g. for remote control
     environment.systemPackages = [ pkgs.transmission ];
 
-    users.extraGroups.transmission.gid = config.ids.gids.transmission;
-    users.extraUsers.transmission = {
+    users.groups.transmission.gid = config.ids.gids.transmission;
+    users.users.transmission = {
       group = "transmission";
       uid = config.ids.uids.transmission;
       description = "Transmission BitTorrent user";
diff --git a/nixos/modules/services/web-apps/atlassian/confluence.nix b/nixos/modules/services/web-apps/atlassian/confluence.nix
index 84c41b6e53c2..f896d92fd6fc 100644
--- a/nixos/modules/services/web-apps/atlassian/confluence.nix
+++ b/nixos/modules/services/web-apps/atlassian/confluence.nix
@@ -137,12 +137,12 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers."${cfg.user}" = {
+    users.users."${cfg.user}" = {
       isSystemUser = true;
       group = cfg.group;
     };
 
-    users.extraGroups."${cfg.group}" = {};
+    users.groups."${cfg.group}" = {};
 
     systemd.services.confluence = {
       description = "Atlassian Confluence";
diff --git a/nixos/modules/services/web-apps/atlassian/crowd.nix b/nixos/modules/services/web-apps/atlassian/crowd.nix
index 0ac941b6ec99..b6cb9f3b7c41 100644
--- a/nixos/modules/services/web-apps/atlassian/crowd.nix
+++ b/nixos/modules/services/web-apps/atlassian/crowd.nix
@@ -103,12 +103,12 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers."${cfg.user}" = {
+    users.users."${cfg.user}" = {
       isSystemUser = true;
       group = cfg.group;
     };
 
-    users.extraGroups."${cfg.group}" = {};
+    users.groups."${cfg.group}" = {};
 
     systemd.services.atlassian-crowd = {
       description = "Atlassian Crowd";
@@ -126,12 +126,13 @@ in
       };
 
       preStart = ''
-        mkdir -p ${cfg.home}/{logs,work,database}
+        rm -rf ${cfg.home}/work
+        mkdir -p ${cfg.home}/{logs,database,work}
 
         mkdir -p /run/atlassian-crowd
         ln -sf ${cfg.home}/{database,work,server.xml} /run/atlassian-crowd
 
-        chown -R ${cfg.user} ${cfg.home}
+        chown -R ${cfg.user}:${cfg.group} ${cfg.home}
 
         sed -e 's,port="8095",port="${toString cfg.listenPort}" address="${cfg.listenAddress}",' \
         '' + (lib.optionalString cfg.proxy.enable ''
diff --git a/nixos/modules/services/web-apps/atlassian/jira.nix b/nixos/modules/services/web-apps/atlassian/jira.nix
index 13c5951524d9..f5ec0a5f31b8 100644
--- a/nixos/modules/services/web-apps/atlassian/jira.nix
+++ b/nixos/modules/services/web-apps/atlassian/jira.nix
@@ -141,12 +141,12 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers."${cfg.user}" = {
+    users.users."${cfg.user}" = {
       isSystemUser = true;
       group = cfg.group;
     };
 
-    users.extraGroups."${cfg.group}" = {};
+    users.groups."${cfg.group}" = {};
 
     systemd.services.atlassian-jira = {
       description = "Atlassian JIRA";
diff --git a/nixos/modules/services/web-apps/frab.nix b/nixos/modules/services/web-apps/frab.nix
index d5329ef03c89..fb95e024817c 100644
--- a/nixos/modules/services/web-apps/frab.nix
+++ b/nixos/modules/services/web-apps/frab.nix
@@ -6,7 +6,6 @@ let
   cfg = config.services.frab;
 
   package = pkgs.frab;
-  ruby = package.ruby;
 
   databaseConfig = builtins.toJSON { production = cfg.database; };
 
@@ -174,14 +173,14 @@ in
   config = mkIf cfg.enable {
     environment.systemPackages = [ frab-rake ];
 
-    users.extraUsers = [
+    users.users = [
       { name = cfg.user;
         group = cfg.group;
         home = "${cfg.statePath}";
       }
     ];
 
-    users.extraGroups = [ { name = cfg.group; } ];
+    users.groups = [ { name = cfg.group; } ];
 
     systemd.services.frab = {
       after = [ "network.target" "gitlab.service" ];
diff --git a/nixos/modules/services/web-apps/matomo.nix b/nixos/modules/services/web-apps/matomo.nix
index ef6ac9698e21..42affb06b51f 100644
--- a/nixos/modules/services/web-apps/matomo.nix
+++ b/nixos/modules/services/web-apps/matomo.nix
@@ -109,13 +109,13 @@ in {
         message = "Either services.matomo.nginx or services.matomo.nginx.webServerUser is mandatory";
     }];
 
-    users.extraUsers.${user} = {
+    users.users.${user} = {
       isSystemUser = true;
       createHome = true;
       home = dataDir;
       group  = user;
     };
-    users.extraGroups.${user} = {};
+    users.groups.${user} = {};
 
     systemd.services.matomo_setup_update = {
       # everything needs to set up and up to date before matomo php files are executed
@@ -241,6 +241,6 @@ in {
 
   meta = {
     doc = ./matomo-doc.xml;
-    maintainers = with stdenv.lib.maintainers; [ florianjacob ];
+    maintainers = with lib.maintainers; [ florianjacob ];
   };
 }
diff --git a/nixos/modules/services/web-apps/mattermost.nix b/nixos/modules/services/web-apps/mattermost.nix
index be74a2b1955b..8c7fc4056adc 100644
--- a/nixos/modules/services/web-apps/mattermost.nix
+++ b/nixos/modules/services/web-apps/mattermost.nix
@@ -25,7 +25,7 @@ in
 {
   options = {
     services.mattermost = {
-      enable = mkEnableOption "Mattermost chat platform";
+      enable = mkEnableOption "Mattermost chat server";
 
       statePath = mkOption {
         type = types.str;
@@ -146,14 +146,14 @@ in
 
   config = mkMerge [
     (mkIf cfg.enable {
-      users.extraUsers = optionalAttrs (cfg.user == "mattermost") (singleton {
+      users.users = optionalAttrs (cfg.user == "mattermost") (singleton {
         name = "mattermost";
         group = cfg.group;
         uid = config.ids.uids.mattermost;
         home = cfg.statePath;
       });
 
-      users.extraGroups = optionalAttrs (cfg.group == "mattermost") (singleton {
+      users.groups = optionalAttrs (cfg.group == "mattermost") (singleton {
         name = "mattermost";
         gid = config.ids.gids.mattermost;
       });
@@ -167,7 +167,7 @@ in
       '';
 
       systemd.services.mattermost = {
-        description = "Mattermost chat platform service";
+        description = "Mattermost chat service";
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" "postgresql.service" ];
 
@@ -201,13 +201,13 @@ in
           PermissionsStartOnly = true;
           User = cfg.user;
           Group = cfg.group;
-          ExecStart = "${pkgs.mattermost}/bin/mattermost-platform";
+          ExecStart = "${pkgs.mattermost}/bin/mattermost";
           WorkingDirectory = "${cfg.statePath}";
-          JoinsNamespaceOf = mkIf cfg.localDatabaseCreate "postgresql.service";
           Restart = "always";
           RestartSec = "10";
           LimitNOFILE = "49152";
         };
+        unitConfig.JoinsNamespaceOf = mkIf cfg.localDatabaseCreate "postgresql.service";
       };
     })
     (mkIf cfg.matterircd.enable {
diff --git a/nixos/modules/services/web-apps/nexus.nix b/nixos/modules/services/web-apps/nexus.nix
index d5bd0f12febb..050f8757fa5f 100644
--- a/nixos/modules/services/web-apps/nexus.nix
+++ b/nixos/modules/services/web-apps/nexus.nix
@@ -13,6 +13,12 @@ in
     services.nexus = {
       enable = mkEnableOption "Sonatype Nexus3 OSS service";
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.nexus;
+        description = "Package which runs Nexus3";
+      };
+
       user = mkOption {
         type = types.str;
         default = "nexus";
@@ -55,10 +61,10 @@ in
           -XX:LogFile=${cfg.home}/nexus3/log/jvm.log
           -XX:-OmitStackTraceInFastThrow
           -Djava.net.preferIPv4Stack=true
-          -Dkaraf.home=${pkgs.nexus}
-          -Dkaraf.base=${pkgs.nexus}
-          -Dkaraf.etc=${pkgs.nexus}/etc/karaf
-          -Djava.util.logging.config.file=${pkgs.nexus}/etc/karaf/java.util.logging.properties
+          -Dkaraf.home=${cfg.package}
+          -Dkaraf.base=${cfg.package}
+          -Dkaraf.etc=${cfg.package}/etc/karaf
+          -Djava.util.logging.config.file=${cfg.package}/etc/karaf/java.util.logging.properties
           -Dkaraf.data=${cfg.home}/nexus3
           -Djava.io.tmpdir=${cfg.home}/nexus3/tmp
           -Dkaraf.startLocalConsole=false
@@ -74,12 +80,12 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.extraUsers."${cfg.user}" = {
+    users.users."${cfg.user}" = {
       isSystemUser = true;
       group = cfg.group;
     };
 
-    users.extraGroups."${cfg.group}" = {};
+    users.groups."${cfg.group}" = {};
 
     systemd.services.nexus = {
       description = "Sonatype Nexus3";
@@ -112,7 +118,7 @@ in
         fi
       '';
 
-      script = "${pkgs.nexus}/bin/nexus run";
+      script = "${cfg.package}/bin/nexus run";
 
       serviceConfig = {
         User = cfg.user;
@@ -124,5 +130,5 @@ in
     };
   };
 
-  meta.maintainers = with stdenv.lib.maintainers; [ ironpinguin ];
+  meta.maintainers = with lib.maintainers; [ ironpinguin ];
 }
diff --git a/nixos/modules/services/web-apps/restya-board.nix b/nixos/modules/services/web-apps/restya-board.nix
index cee725e8fe5f..bc6689bdb271 100644
--- a/nixos/modules/services/web-apps/restya-board.nix
+++ b/nixos/modules/services/web-apps/restya-board.nix
@@ -358,13 +358,13 @@ in
       '';
     };
 
-    users.extraUsers.restya-board = {
+    users.users.restya-board = {
       isSystemUser = true;
       createHome = false;
       home = runDir;
       group  = "restya-board";
     };
-    users.extraGroups.restya-board = {};
+    users.groups.restya-board = {};
 
     services.postgresql.enable = mkIf (isNull cfg.database.host) true;
 
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index 610c6463a5eb..2b171aa1b2b2 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -76,6 +76,8 @@ let
       define('SMTP_FROM_NAME', '${escape ["'" "\\"] cfg.email.fromName}');
       define('SMTP_FROM_ADDRESS', '${escape ["'" "\\"] cfg.email.fromAddress}');
       define('DIGEST_SUBJECT', '${escape ["'" "\\"] cfg.email.digestSubject}');
+
+      ${cfg.extraConfig}
   '';
 
  in {
@@ -431,6 +433,26 @@ let
         '';
       };
 
+      pluginPackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = ''
+          List of plugins to install. The list elements are expected to
+          be derivations. All elements in this derivation are automatically
+          copied to the <literal>plugins.local</literal> directory.
+        '';
+      };
+
+      themePackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = ''
+          List of themes to install. The list elements are expected to
+          be derivations. All elements in this derivation are automatically
+          copied to the <literal>themes.local</literal> directory.
+        '';
+      };
+
       logDestination = mkOption {
         type = types.enum ["" "sql" "syslog"];
         default = "sql";
@@ -441,6 +463,14 @@ let
           error.log).
         '';
       };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Additional lines to append to <literal>config.php</literal>.
+        '';
+      };
     };
   };
 
@@ -517,6 +547,16 @@ let
           rm -rf "${cfg.root}/*"
           mkdir -m 755 -p "${cfg.root}"
           cp -r "${pkgs.tt-rss}/"* "${cfg.root}"
+          ${optionalString (cfg.pluginPackages != []) ''
+            for plugin in ${concatStringsSep " " cfg.pluginPackages}; do
+              cp -r "$plugin"/* "${cfg.root}/plugins.local/"
+            done
+          ''}
+          ${optionalString (cfg.themePackages != []) ''
+            for theme in ${concatStringsSep " " cfg.themePackages}; do
+              cp -r "$theme"/* "${cfg.root}/themes.local/"
+            done
+          ''}
           ln -sf "${tt-rss-config}" "${cfg.root}/config.php"
           chown -R "${cfg.user}" "${cfg.root}"
           chmod -R 755 "${cfg.root}"
@@ -584,8 +624,8 @@ let
     };
 
     users = optionalAttrs (cfg.user == "tt_rss") {
-      extraUsers.tt_rss.group = "tt_rss";
-      extraGroups.tt_rss = {};
+      users.tt_rss.group = "tt_rss";
+      groups.tt_rss = {};
     };
   };
 }
diff --git a/nixos/modules/services/web-apps/virtlyst.nix b/nixos/modules/services/web-apps/virtlyst.nix
new file mode 100644
index 000000000000..e5c0bff2168a
--- /dev/null
+++ b/nixos/modules/services/web-apps/virtlyst.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.virtlyst;
+  stateDir = "/var/lib/virtlyst";
+
+  ini = pkgs.writeText "virtlyst-config.ini" ''
+    [wsgi]
+    master = true
+    threads = auto
+    http-socket = ${cfg.httpSocket}
+    application = ${pkgs.virtlyst}/lib/libVirtlyst.so
+    chdir2 = ${stateDir}
+    static-map = /static=${pkgs.virtlyst}/root/static
+
+    [Cutelyst]
+    production = true
+    DatabasePath = virtlyst.sqlite
+    TemplatePath = ${pkgs.virtlyst}/root/src
+
+    [Rules]
+    cutelyst.* = true
+    virtlyst.* = true
+  '';
+
+in
+
+{
+
+  options.services.virtlyst = {
+    enable = mkEnableOption "Virtlyst libvirt web interface";
+
+    adminPassword = mkOption {
+      type = types.str;
+      description = ''
+        Initial admin password with which the database will be seeded.
+      '';
+    };
+
+    httpSocket = mkOption {
+      type = types.str;
+      default = "localhost:3000";
+      description = ''
+        IP and/or port to which to bind the http socket.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.virtlyst = {
+      home = stateDir;
+      createHome = true;
+      group = mkIf config.virtualisation.libvirtd.enable "libvirtd";
+    };
+
+    systemd.services.virtlyst = {
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        VIRTLYST_ADMIN_PASSWORD = cfg.adminPassword;
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.cutelyst}/bin/cutelyst-wsgi2 --ini ${ini}";
+        User = "virtlyst";
+        WorkingDirectory = stateDir;
+      };
+    };
+  };
+
+}
diff --git a/nixos/modules/services/web-servers/apache-httpd/default.nix b/nixos/modules/services/web-servers/apache-httpd/default.nix
index f9f2511f45dc..73607c6f9a3b 100644
--- a/nixos/modules/services/web-servers/apache-httpd/default.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/default.nix
@@ -98,11 +98,6 @@ let
   allSubservices = mainSubservices ++ concatMap subservicesFor mainCfg.virtualHosts;
 
 
-  # !!! should be in lib
-  writeTextInDir = name: text:
-    pkgs.runCommand name {inherit text;} "mkdir -p $out; echo -n \"$text\" > $out/$name";
-
-
   enableSSL = any (vhost: vhost.enableSSL) allHosts;
 
 
@@ -656,16 +651,16 @@ in
                      message = "SSL is enabled for httpd, but sslServerCert and/or sslServerKey haven't been specified."; }
                  ];
 
-    warnings = map (cfg: ''apache-httpd's port option is deprecated. Use listen = [{/*ip = "*"; */ port = ${toString cfg.port}";}]; instead'' ) (lib.filter (cfg: cfg.port != 0) allHosts);
+    warnings = map (cfg: ''apache-httpd's port option is deprecated. Use listen = [{/*ip = "*"; */ port = ${toString cfg.port};}]; instead'' ) (lib.filter (cfg: cfg.port != 0) allHosts);
 
-    users.extraUsers = optionalAttrs (mainCfg.user == "wwwrun") (singleton
+    users.users = optionalAttrs (mainCfg.user == "wwwrun") (singleton
       { name = "wwwrun";
         group = mainCfg.group;
         description = "Apache httpd user";
         uid = config.ids.uids.wwwrun;
       });
 
-    users.extraGroups = optionalAttrs (mainCfg.group == "wwwrun") (singleton
+    users.groups = optionalAttrs (mainCfg.group == "wwwrun") (singleton
       { name = "wwwrun";
         gid = config.ids.gids.wwwrun;
       });
diff --git a/nixos/modules/services/web-servers/apache-httpd/owncloud.nix b/nixos/modules/services/web-servers/apache-httpd/owncloud.nix
index 82b8bf3e30db..6345a9a56935 100644
--- a/nixos/modules/services/web-servers/apache-httpd/owncloud.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/owncloud.nix
@@ -4,17 +4,6 @@ with lib;
 
 let
 
-  httpd = serverInfo.serverConfig.package;
-
-  version24 = !versionOlder httpd.version "2.4";
-
-  allGranted = if version24 then ''
-    Require all granted
-  '' else ''
-    Order allow,deny
-    Allow from all
-  '';
-
   owncloudConfig = pkgs.writeText "config.php"
     ''
       <?php
diff --git a/nixos/modules/services/web-servers/apache-httpd/trac.nix b/nixos/modules/services/web-servers/apache-httpd/trac.nix
index 35b9ab56087c..28b411a64b6f 100644
--- a/nixos/modules/services/web-servers/apache-httpd/trac.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/trac.nix
@@ -12,8 +12,6 @@ let
     apacheHttpd = httpd;
   };
 
-  pythonLib = p: "${p}/";
-
   httpd = serverInfo.serverConfig.package;
 
   versionPre24 = versionOlder httpd.version "2.4";
diff --git a/nixos/modules/services/web-servers/caddy.nix b/nixos/modules/services/web-servers/caddy.nix
index fe65fba42a46..4237cfdd9198 100644
--- a/nixos/modules/services/web-servers/caddy.nix
+++ b/nixos/modules/services/web-servers/caddy.nix
@@ -93,13 +93,13 @@ in {
       };
     };
 
-    users.extraUsers.caddy = {
+    users.users.caddy = {
       group = "caddy";
       uid = config.ids.uids.caddy;
       home = cfg.dataDir;
       createHome = true;
     };
 
-    users.extraGroups.caddy.gid = config.ids.uids.caddy;
+    users.groups.caddy.gid = config.ids.uids.caddy;
   };
 }
diff --git a/nixos/modules/services/web-servers/hitch/default.nix b/nixos/modules/services/web-servers/hitch/default.nix
index 895d02827f71..a6c4cbea1225 100644
--- a/nixos/modules/services/web-servers/hitch/default.nix
+++ b/nixos/modules/services/web-servers/hitch/default.nix
@@ -102,7 +102,7 @@ with lib;
 
     environment.systemPackages = [ pkgs.hitch ];
 
-    users.extraUsers.hitch.group = "hitch";
-    users.extraGroups.hitch = {};
+    users.users.hitch.group = "hitch";
+    users.groups.hitch = {};
   };
 }
diff --git a/nixos/modules/services/web-servers/hydron.nix b/nixos/modules/services/web-servers/hydron.nix
new file mode 100644
index 000000000000..49a18f5e7b28
--- /dev/null
+++ b/nixos/modules/services/web-servers/hydron.nix
@@ -0,0 +1,105 @@
+{ config, lib, pkgs, ... }:
+
+let cfg = config.services.hydron;
+in with lib; {
+  options.services.hydron = {
+    enable = mkEnableOption "hydron";
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/hydron";
+      example = "/home/okina/hydron";
+      description = "Location where hydron runs and stores data.";
+    };
+
+    interval = mkOption {
+      type = types.str;
+      default = "hourly";
+      example = "06:00";
+      description = ''
+        How often we run hydron import and possibly fetch tags. Runs by default every hour.
+
+        The format is described in
+        <citerefentry><refentrytitle>systemd.time</refentrytitle>
+        <manvolnum>7</manvolnum></citerefentry>.
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "127.0.0.1:8010";
+      description = "Listen on a specific IP address and port.";
+    };
+
+    importPaths = mkOption {
+      type = types.listOf types.path;
+      default = [];
+      example = [ "/home/okina/Pictures" ];
+      description = "Paths that hydron will recursively import.";
+    };
+
+    fetchTags = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Fetch tags for imported images and webm from gelbooru.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.hydron = {
+      description = "hydron";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        # Ensure folder exists and permissions are correct
+        mkdir -p ${escapeShellArg cfg.dataDir}/images
+        chmod 750 ${escapeShellArg cfg.dataDir}
+        chown -R hydron:hydron ${escapeShellArg cfg.dataDir}
+      '';
+
+      serviceConfig = {
+        PermissionsStartOnly = true;
+        User = "hydron";
+        Group = "hydron";
+        ExecStart = "${pkgs.hydron}/bin/hydron serve"
+        + optionalString (cfg.listenAddress != null) " -a ${cfg.listenAddress}";
+      };
+    };
+
+    systemd.services.hydron-fetch = {
+      description = "Import paths into hydron and possibly fetch tags";
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "hydron";
+        Group = "hydron";
+        ExecStart = "${pkgs.hydron}/bin/hydron import "
+        + optionalString cfg.fetchTags "-f "
+        + (escapeShellArg cfg.dataDir) + "/images " + (escapeShellArgs cfg.importPaths);
+      };
+    };
+
+    systemd.timers.hydron-fetch = {
+      description = "Automatically import paths into hydron and possibly fetch tags";
+      after = [ "network.target" ];
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = cfg.interval;
+    };
+
+    users = {
+      groups.hydron.gid = config.ids.gids.hydron;
+      
+      users.hydron = {
+        description = "hydron server service user";
+        home = cfg.dataDir;
+        createHome = true;
+        group = "hydron";
+        uid = config.ids.uids.hydron;
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ chiiruno ];
+}
diff --git a/nixos/modules/services/web-servers/lighttpd/cgit.nix b/nixos/modules/services/web-servers/lighttpd/cgit.nix
index 710fecc0c05c..e6a054c296dc 100644
--- a/nixos/modules/services/web-servers/lighttpd/cgit.nix
+++ b/nixos/modules/services/web-servers/lighttpd/cgit.nix
@@ -4,8 +4,15 @@ with lib;
 
 let
   cfg = config.services.lighttpd.cgit;
+  pathPrefix = if stringLength cfg.subdir == 0 then "" else "/" + cfg.subdir;
   configFile = pkgs.writeText "cgitrc"
     ''
+      # default paths to static assets
+      css=${pathPrefix}/cgit.css
+      logo=${pathPrefix}/cgit.png
+      favicon=${pathPrefix}/favicon.ico
+
+      # user configuration
       ${cfg.configText}
     '';
 in
@@ -18,8 +25,17 @@ in
       type = types.bool;
       description = ''
         If true, enable cgit (fast web interface for git repositories) as a
-        sub-service in lighttpd. cgit will be accessible at
-        http://yourserver/cgit
+        sub-service in lighttpd.
+      '';
+    };
+
+    subdir = mkOption {
+      default = "cgit";
+      example = "";
+      type = types.str;
+      description = ''
+        The subdirectory in which to serve cgit. The web application will be
+        accessible at http://yourserver/''${subdir}
       '';
     };
 
@@ -48,14 +64,14 @@ in
     services.lighttpd.enableModules = [ "mod_cgi" "mod_alias" "mod_setenv" ];
 
     services.lighttpd.extraConfig = ''
-      $HTTP["url"] =~ "^/cgit" {
+      $HTTP["url"] =~ "^/${cfg.subdir}" {
           cgi.assign = (
               "cgit.cgi" => "${pkgs.cgit}/cgit/cgit.cgi"
           )
           alias.url = (
-              "/cgit.css" => "${pkgs.cgit}/cgit/cgit.css",
-              "/cgit.png" => "${pkgs.cgit}/cgit/cgit.png",
-              "/cgit"     => "${pkgs.cgit}/cgit/cgit.cgi"
+              "${pathPrefix}/cgit.css" => "${pkgs.cgit}/cgit/cgit.css",
+              "${pathPrefix}/cgit.png" => "${pkgs.cgit}/cgit/cgit.png",
+              "${pathPrefix}"          => "${pkgs.cgit}/cgit/cgit.cgi"
           )
           setenv.add-environment = (
               "CGIT_CONFIG" => "${configFile}"
diff --git a/nixos/modules/services/web-servers/lighttpd/default.nix b/nixos/modules/services/web-servers/lighttpd/default.nix
index d23e810dcc62..7a3df26e47a6 100644
--- a/nixos/modules/services/web-servers/lighttpd/default.nix
+++ b/nixos/modules/services/web-servers/lighttpd/default.nix
@@ -245,12 +245,12 @@ in
       serviceConfig.KillSignal = "SIGINT";
     };
 
-    users.extraUsers.lighttpd = {
+    users.users.lighttpd = {
       group = "lighttpd";
       description = "lighttpd web server privilege separation user";
       uid = config.ids.uids.lighttpd;
     };
 
-    users.extraGroups.lighttpd.gid = config.ids.gids.lighttpd;
+    users.groups.lighttpd.gid = config.ids.gids.lighttpd;
   };
 }
diff --git a/nixos/modules/services/web-servers/lighttpd/inginious.nix b/nixos/modules/services/web-servers/lighttpd/inginious.nix
deleted file mode 100644
index 8c813d116a52..000000000000
--- a/nixos/modules/services/web-servers/lighttpd/inginious.nix
+++ /dev/null
@@ -1,261 +0,0 @@
-{ config, lib, pkgs, ... }:
-with lib;
-
-let
-  cfg = config.services.lighttpd.inginious;
-  inginious = pkgs.inginious;
-  execName = "inginious-${if cfg.useLTI then "lti" else "webapp"}";
-
-  inginiousConfigFile = if cfg.configFile != null then cfg.configFile else pkgs.writeText "inginious.yaml" ''
-    # Backend; can be:
-    # - "local" (run containers on the same machine)
-    # - "remote" (connect to distant docker daemon and auto start agents) (choose this if you use boot2docker)
-    # - "remote_manual" (connect to distant and manually installed agents)
-    backend: "${cfg.backendType}"
-
-    ## TODO (maybe): Add an option for the "remote" backend in this NixOS module.
-    # List of remote docker daemon to which the backend will try
-    # to connect (backend: remote only)
-    #docker_daemons:
-    #  - # Host of the docker daemon *from the webapp*
-    #    remote_host: "some.remote.server"
-    #    # Port of the distant docker daemon *from the webapp*
-    #    remote_docker_port: "2375"
-    #    # A mandatory port used by the backend and the agent that will be automatically started.
-    #    # Needs to be available on the remote host, and to be open in the firewall.
-    #    remote_agent_port: "63456"
-    #    # Does the remote docker requires tls? Defaults to false.
-    #    # Parameter can be set to true or path to the certificates
-    #    #use_tls: false
-    #    # Link to the docker daemon *from the host that runs the docker daemon*. Defaults to:
-    #    #local_location: "unix:///var/run/docker.sock"
-    #    # Path to the cgroups "mount" *from the host that runs the docker daemon*. Defaults to:
-    #    #cgroups_location: "/sys/fs/cgroup"
-    #    # Name that will be used to reference the agent
-    #    #"agent_name": "inginious-agent"
-
-    # List of remote agents to which the backend will try
-    # to connect (backend: remote_manual only)
-    # Example:
-    #agents:
-    #  - host: "192.168.59.103"
-    #    port: 5001
-    agents:
-    ${lib.concatMapStrings (agent:
-      "  - host: \"${agent.host}\"\n" +
-      "    port: ${agent.port}\n"
-    ) cfg.remoteAgents}
-
-    # Location of the task directory
-    tasks_directory: "${cfg.tasksDirectory}"
-
-    # Super admins: list of user names that can do everything in the backend
-    superadmins:
-    ${lib.concatMapStrings (x: "  - \"${x}\"\n") cfg.superadmins}
-
-    # Aliases for containers
-    # Only containers listed here can be used by tasks
-    containers:
-    ${lib.concatStrings (lib.mapAttrsToList (name: fullname:
-      "  ${name}: \"${fullname}\"\n"
-    ) cfg.containers)}
-
-    # Use single minified javascript file (production) or multiple files (dev) ?
-    use_minified_js: true
-
-    ## TODO (maybe): Add NixOS options for these parameters.
-
-    # MongoDB options
-    #mongo_opt:
-    #    host: localhost
-    #    database: INGInious
-
-    # Disable INGInious?
-    #maintenance: false
-
-    #smtp:
-    #    sendername: 'INGInious <no-reply@inginious.org>'
-    #    host: 'smtp.gmail.com'
-    #    port: 587
-    #    username: 'configme@gmail.com'
-    #    password: 'secret'
-    #    starttls: True
-
-    ## NixOS extra config
-
-    ${cfg.extraConfig}
-  '';
-in
-{
-  options.services.lighttpd.inginious = {
-    enable = mkEnableOption  "INGInious, an automated code testing and grading system.";
-
-    configFile = mkOption {
-      type = types.nullOr types.path;
-      default = null;
-      example = literalExample ''pkgs.writeText "configuration.yaml" "# custom config options ...";'';
-      description = ''The path to an INGInious configuration file.'';
-    };
-
-    extraConfig = mkOption {
-      type = types.lines;
-      default = "";
-      example = ''
-        # Load the dummy auth plugin.
-        plugins:
-          - plugin_module: inginious.frontend.webapp.plugins.auth.demo_auth
-            users:
-              # register the user "test" with the password "someverycomplexpassword"
-              test: someverycomplexpassword
-      '';
-      description = ''Extra option in YaML format, to be appended to the config file.'';
-    };
-
-    tasksDirectory = mkOption {
-      type = types.path;
-      example = "/var/lib/INGInious/tasks";
-      description = ''
-        Path to the tasks folder.
-        Defaults to the provided test tasks folder (readonly).
-      '';
-    };
-
-    useLTI = mkOption {
-      type = types.bool;
-      default = false;
-      description = ''Whether to start the LTI frontend in place of the webapp.'';
-    };
-
-    superadmins = mkOption {
-      type = types.uniq (types.listOf types.str);
-      default = [ "admin" ];
-      example = [ "john" "pepe" "emilia" ];
-      description = ''List of user logins allowed to administrate the whole server.'';
-    };
-
-    containers = mkOption {
-      type = types.attrsOf types.str;
-      default = {
-          default = "ingi/inginious-c-default";
-      };
-      example = {
-        default = "ingi/inginious-c-default";
-        sekexe  = "ingi/inginious-c-sekexe";
-        java    = "ingi/inginious-c-java";
-        oz      = "ingi/inginious-c-oz";
-        pythia1compat = "ingi/inginious-c-pythia1compat";
-      };
-      description = ''
-        An attrset describing the required containers
-        These containers will be available in INGInious using their short name (key)
-        and will be automatically downloaded before INGInious starts.
-      '';
-    };
-
-    hostPattern = mkOption {
-      type = types.str;
-      default = "^inginious.";
-      example = "^inginious.mydomain.xyz$";
-      description = ''
-        The domain that serves INGInious.
-        INGInious uses absolute paths which makes it difficult to relocate in its own subdir.
-        The default configuration will serve INGInious when the server is accessed with a hostname starting with "inginious.".
-        If left blank, INGInious will take the precedence over all the other lighttpd sites, which is probably not what you want.
-      '';
-    };
-
-    backendType = mkOption {
-      type = types.enum [ "local" "remote_manual" ]; # TODO: support backend "remote"
-      default = "local";
-      description = ''
-        Select how INGINious accesses to grading containers.
-        The default "local" option ensures that Docker is started and provisioned.
-        Fore more information, see http://inginious.readthedocs.io/en/latest/install_doc/config_reference.html
-        Not all backends are supported. Use services.inginious.configFile for full flexibility.
-      '';
-    };
-
-    remoteAgents = mkOption {
-      type = types.listOf (types.attrsOf types.str);
-      default = [];
-      example = [ { host = "192.0.2.25"; port = "1345"; } ];
-      description = ''A list of remote agents, used only when services.inginious.backendType is "remote_manual".'';
-    };
-  };
-
-  config = mkIf cfg.enable (
-    mkMerge [
-      # For a local install, we need docker.
-      (mkIf (cfg.backendType == "local") {
-        virtualisation.docker = {
-          enable = true;
-          # We need docker to listen on port 2375.
-          listenOptions = ["127.0.0.1:2375" "/var/run/docker.sock"];
-          storageDriver = mkDefault "overlay";
-        };
-
-        users.extraUsers."lighttpd".extraGroups = [ "docker" ];
-
-        # Ensure that docker has pulled the required images.
-        systemd.services.inginious-prefetch = {
-          script = let
-            images = lib.unique (
-              [ "centos" "ingi/inginious-agent" ]
-              ++ lib.mapAttrsToList (_: image: image) cfg.containers
-            );
-          in lib.concatMapStrings (image: ''
-            ${pkgs.docker}/bin/docker pull ${image}
-          '') images;
-
-          serviceConfig.Type = "oneshot";
-          wants = [ "docker.service" ];
-          after = [ "docker.service" ];
-          wantedBy = [ "lighttpd.service" ];
-          before = [ "lighttpd.service" ];
-        };
-      })
-
-      # Common
-      {
-        services.lighttpd.inginious.tasksDirectory = mkDefault "${inginious}/lib/python2.7/site-packages/inginious/tasks";
-        # To access inginous tools (like inginious-test-task)
-        environment.systemPackages = [ inginious ];
-
-        services.mongodb.enable = true;
-
-        services.lighttpd.enable = true;
-        services.lighttpd.enableModules = [ "mod_access" "mod_alias" "mod_fastcgi" "mod_redirect" "mod_rewrite" ];
-        services.lighttpd.extraConfig = ''
-          $HTTP["host"] =~ "${cfg.hostPattern}" {
-            fastcgi.server = ( "/${execName}" =>
-              ((
-                "socket" => "/run/lighttpd/inginious-fastcgi.socket",
-                "bin-path" => "${inginious}/bin/${execName} --config=${inginiousConfigFile}",
-                "max-procs" => 1,
-                "bin-environment" => ( "REAL_SCRIPT_NAME" => "" ),
-                "check-local" => "disable"
-              ))
-            )
-            url.rewrite-once = (
-              "^/.well-known/.*" => "$0",
-              "^/static/.*" => "$0",
-              "^/.*$" => "/${execName}$0",
-              "^/favicon.ico$" => "/static/common/favicon.ico",
-            )
-            alias.url += (
-              "/static/webapp/" => "${inginious}/lib/python2.7/site-packages/inginious/frontend/webapp/static/",
-              "/static/common/" => "${inginious}/lib/python2.7/site-packages/inginious/frontend/common/static/"
-            )
-          }
-        '';
-
-        systemd.services.lighttpd.preStart = ''
-          mkdir -p /run/lighttpd
-          chown lighttpd.lighttpd /run/lighttpd
-        '';
-
-        systemd.services.lighttpd.wants = [ "mongodb.service" "docker.service" ];
-        systemd.services.lighttpd.after = [ "mongodb.service" "docker.service" ];
-      }
-    ]);
-}
diff --git a/nixos/modules/services/web-servers/meguca.nix b/nixos/modules/services/web-servers/meguca.nix
new file mode 100644
index 000000000000..ed7325ff0790
--- /dev/null
+++ b/nixos/modules/services/web-servers/meguca.nix
@@ -0,0 +1,159 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.meguca;
+  postgres = config.services.postgresql;
+in
+{
+  options.services.meguca = {
+    enable = mkEnableOption "meguca";
+
+    baseDir = mkOption {
+      type = types.path;
+      default = "/run/meguca";
+      description = "Location where meguca stores it's database and links.";
+    };
+
+    password = mkOption {
+      type = types.str;
+      default = "meguca";
+      description = "Password for the meguca database.";
+    };
+
+    passwordFile = mkOption {
+      type = types.path;
+      default = "/run/keys/meguca-password-file";
+      description = "Password file for the meguca database.";
+    };
+
+    reverseProxy = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Reverse proxy IP.";
+    };
+
+    sslCertificate = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Path to the SSL certificate.";
+    };
+
+    listenAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Listen on a specific IP address and port.";
+    };
+
+    cacheSize = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = "Cache size in MB.";
+    };
+
+    postgresArgs = mkOption {
+      type = types.str;
+      default = "user=meguca password=" + cfg.password + " dbname=meguca sslmode=disable";
+      description = "Postgresql connection arguments.";
+    };
+
+    postgresArgsFile = mkOption {
+      type = types.path;
+      default = "/run/keys/meguca-postgres-args";
+      description = "Postgresql connection arguments file.";
+    };
+
+    compressTraffic = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Compress all traffic with gzip.";
+    };
+
+    assumeReverseProxy = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Assume the server is behind a reverse proxy, when resolving client IPs.";
+    };
+
+    httpsOnly = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Serve and listen only through HTTPS.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.sudo.enable = cfg.enable == true;
+    services.postgresql.enable = cfg.enable == true;
+
+    services.meguca.passwordFile = mkDefault (toString (pkgs.writeTextFile {
+      name = "meguca-password-file";
+      text = cfg.password;
+    }));
+
+    services.meguca.postgresArgsFile = mkDefault (toString (pkgs.writeTextFile {
+      name = "meguca-postgres-args";
+      text = cfg.postgresArgs;
+    }));
+
+    systemd.services.meguca = {
+      description = "meguca";
+      after = [ "network.target" "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        # Ensure folder exists and links are correct or create them
+        mkdir -p ${cfg.baseDir}
+        chmod 750 ${cfg.baseDir}
+        ln -sf ${pkgs.meguca}/share/meguca/www ${cfg.baseDir}
+
+        # Ensure the database is correct or create it
+        ${pkgs.sudo}/bin/sudo -u ${postgres.superUser} ${postgres.package}/bin/createuser \
+          -SDR meguca || true
+        ${pkgs.sudo}/bin/sudo -u ${postgres.superUser} ${postgres.package}/bin/createdb \
+          -T template0 -E UTF8 -O meguca meguca || true
+        ${pkgs.sudo}/bin/sudo -u meguca ${postgres.package}/bin/psql \
+          -c "ALTER ROLE meguca WITH PASSWORD '$(cat ${cfg.passwordFile})';" || true
+      '';
+
+    script = ''
+      cd ${cfg.baseDir}
+
+      ${pkgs.meguca}/bin/meguca -d "$(cat ${cfg.postgresArgsFile})"\
+        ${optionalString (cfg.reverseProxy != null) " -R ${cfg.reverseProxy}"}\
+        ${optionalString (cfg.sslCertificate != null) " -S ${cfg.sslCertificate}"}\
+        ${optionalString (cfg.listenAddress != null) " -a ${cfg.listenAddress}"}\
+        ${optionalString (cfg.cacheSize != null) " -c ${toString cfg.cacheSize}"}\
+        ${optionalString (cfg.compressTraffic) " -g"}\
+        ${optionalString (cfg.assumeReverseProxy) " -r"}\
+        ${optionalString (cfg.httpsOnly) " -s"} start
+    '';
+
+      serviceConfig = {
+        PermissionsStartOnly = true;
+        Type = "forking";
+        User = "meguca";
+        Group = "meguca";
+        RuntimeDirectory = "meguca";
+        ExecStop = "${pkgs.meguca}/bin/meguca stop";
+      };
+    };
+
+    users = {
+      users.meguca = {
+        description = "meguca server service user";
+        home = cfg.baseDir;
+        createHome = true;
+        group = "meguca";
+        uid = config.ids.uids.meguca;
+      };
+
+      groups.meguca = {
+        gid = config.ids.gids.meguca;
+        members = [ "meguca" ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ chiiruno ];
+}
diff --git a/nixos/modules/services/web-servers/mighttpd2.nix b/nixos/modules/services/web-servers/mighttpd2.nix
index a888f623616e..4e7082c67690 100644
--- a/nixos/modules/services/web-servers/mighttpd2.nix
+++ b/nixos/modules/services/web-servers/mighttpd2.nix
@@ -119,13 +119,13 @@ in {
       };
     };
 
-    users.extraUsers.mighttpd2 = {
+    users.users.mighttpd2 = {
       group = "mighttpd2";
       uid = config.ids.uids.mighttpd2;
       isSystemUser = true;
     };
 
-    users.extraGroups.mighttpd2.gid = config.ids.gids.mighttpd2;
+    users.groups.mighttpd2.gid = config.ids.gids.mighttpd2;
   };
 
   meta.maintainers = with lib.maintainers; [ fgaz ];
diff --git a/nixos/modules/services/web-servers/minio.nix b/nixos/modules/services/web-servers/minio.nix
index 843f0d986877..f78a966989b6 100644
--- a/nixos/modules/services/web-servers/minio.nix
+++ b/nixos/modules/services/web-servers/minio.nix
@@ -85,7 +85,7 @@ in
       '';
       serviceConfig = {
         PermissionsStartOnly = true;
-        ExecStart = "${cfg.package}/bin/minio server --address ${cfg.listenAddress} --config-dir=${cfg.configDir} ${cfg.dataDir}";
+        ExecStart = "${cfg.package}/bin/minio server --json --address ${cfg.listenAddress} --config-dir=${cfg.configDir} ${cfg.dataDir}";
         Type = "simple";
         User = "minio";
         Group = "minio";
@@ -101,11 +101,11 @@ in
       };
     };
 
-    users.extraUsers.minio = {
+    users.users.minio = {
       group = "minio";
       uid = config.ids.uids.minio;
     };
 
-    users.extraGroups.minio.gid = config.ids.uids.minio;
+    users.groups.minio.gid = config.ids.uids.minio;
   };
 }
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 0aa780bf6da1..355976c4b7cb 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -613,13 +613,13 @@ in
         listToAttrs acmePairs
     );
 
-    users.extraUsers = optionalAttrs (cfg.user == "nginx") (singleton
+    users.users = optionalAttrs (cfg.user == "nginx") (singleton
       { name = "nginx";
         group = cfg.group;
         uid = config.ids.uids.nginx;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "nginx") (singleton
+    users.groups = optionalAttrs (cfg.group == "nginx") (singleton
       { name = "nginx";
         gid = config.ids.gids.nginx;
       });
diff --git a/nixos/modules/services/web-servers/tomcat.nix b/nixos/modules/services/web-servers/tomcat.nix
index aa94e0e976c9..d8ccb7ca65d6 100644
--- a/nixos/modules/services/web-servers/tomcat.nix
+++ b/nixos/modules/services/web-servers/tomcat.nix
@@ -110,7 +110,7 @@ in
       webapps = mkOption {
         type = types.listOf types.package;
         default = [ tomcat.webapps ];
-        defaultText = "[ tomcat.webapps ]";
+        defaultText = "[ pkgs.tomcat85.webapps ]";
         description = "List containing WAR files or directories with WAR files which are web applications to be deployed on Tomcat";
       };
 
@@ -166,12 +166,12 @@ in
 
   config = mkIf config.services.tomcat.enable {
 
-    users.extraGroups = singleton
+    users.groups = singleton
       { name = "tomcat";
         gid = config.ids.gids.tomcat;
       };
 
-    users.extraUsers = singleton
+    users.users = singleton
       { name = "tomcat";
         uid = config.ids.uids.tomcat;
         description = "Tomcat user";
diff --git a/nixos/modules/services/web-servers/traefik.nix b/nixos/modules/services/web-servers/traefik.nix
index b6c7fef21fb2..700202b1d28f 100644
--- a/nixos/modules/services/web-servers/traefik.nix
+++ b/nixos/modules/services/web-servers/traefik.nix
@@ -114,12 +114,12 @@ in {
       };
     };
 
-    users.extraUsers.traefik = {
+    users.users.traefik = {
       group = "traefik";
       home = cfg.dataDir;
       createHome = true;
     };
 
-    users.extraGroups.traefik = {};
+    users.groups.traefik = {};
   };
 }
diff --git a/nixos/modules/services/web-servers/uwsgi.nix b/nixos/modules/services/web-servers/uwsgi.nix
index 14596bb3add0..3f858d90fa46 100644
--- a/nixos/modules/services/web-servers/uwsgi.nix
+++ b/nixos/modules/services/web-servers/uwsgi.nix
@@ -27,13 +27,7 @@ let
         else if hasPython3 then uwsgi.python3
         else null;
 
-      pythonPackages = pkgs.pythonPackages.override {
-        inherit python;
-      };
-
-      penv = python.buildEnv.override {
-        extraLibs = (c.pythonPackages or (self: [])) pythonPackages;
-      };
+      pythonEnv = python.withPackages (c.pythonPackages or (self: []));
 
       uwsgiCfg = {
         uwsgi =
@@ -42,7 +36,7 @@ let
               inherit plugins;
             } // removeAttrs c [ "type" "pythonPackages" ]
               // optionalAttrs (python != null) {
-                pythonpath = "${penv}/${python.sitePackages}";
+                pythonpath = "${pythonEnv}/${python.sitePackages}";
                 env =
                   # Argh, uwsgi expects list of key-values there instead of a dictionary.
                   let env' = c.env or [];
@@ -51,7 +45,7 @@ let
                            then substring (stringLength "PATH=") (stringLength x) x
                            else null;
                       oldPaths = filter (x: x != null) (map getPath env');
-                  in env' ++ [ "PATH=${optionalString (oldPaths != []) "${last oldPaths}:"}${penv}/bin" ];
+                  in env' ++ [ "PATH=${optionalString (oldPaths != []) "${last oldPaths}:"}${pythonEnv}/bin" ];
               }
           else if c.type == "emperor"
             then {
@@ -152,13 +146,13 @@ in {
       };
     };
 
-    users.extraUsers = optionalAttrs (cfg.user == "uwsgi") (singleton
+    users.users = optionalAttrs (cfg.user == "uwsgi") (singleton
       { name = "uwsgi";
         group = cfg.group;
         uid = config.ids.uids.uwsgi;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "uwsgi") (singleton
+    users.groups = optionalAttrs (cfg.group == "uwsgi") (singleton
       { name = "uwsgi";
         gid = config.ids.gids.uwsgi;
       });
diff --git a/nixos/modules/services/web-servers/varnish/default.nix b/nixos/modules/services/web-servers/varnish/default.nix
index bc74d62b116a..63f967185c2d 100644
--- a/nixos/modules/services/web-servers/varnish/default.nix
+++ b/nixos/modules/services/web-servers/varnish/default.nix
@@ -103,11 +103,11 @@ in
       })
     ];
 
-    users.extraUsers.varnish = {
+    users.users.varnish = {
       group = "varnish";
       uid = config.ids.uids.varnish;
     };
 
-    users.extraGroups.varnish.gid = config.ids.uids.varnish;
+    users.groups.varnish.gid = config.ids.uids.varnish;
   };
 }
diff --git a/nixos/modules/services/web-servers/zope2.nix b/nixos/modules/services/web-servers/zope2.nix
index 496e34db4a96..1dcc3ac9d8d4 100644
--- a/nixos/modules/services/web-servers/zope2.nix
+++ b/nixos/modules/services/web-servers/zope2.nix
@@ -103,7 +103,7 @@ in
 
   config = mkIf (cfg.instances != {}) {
 
-    users.extraUsers.zope2.uid = config.ids.uids.zope2;
+    users.users.zope2.uid = config.ids.uids.zope2;
 
     systemd.services =
       let
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index 27b62df7097c..9fb8f44b2421 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -10,7 +10,6 @@ let
     let
       pkgName = drv: (builtins.parseDrvName drv.name).name;
       ysNames = map pkgName ys;
-      res = (filter (x: !(builtins.elem (pkgName x) ysNames)) xs);
     in
       filter (x: !(builtins.elem (pkgName x) ysNames)) xs;
 
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index 91d091d7d7e2..4b9e561d53c8 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -221,6 +221,11 @@ in
       security.pam.services.sddm.enableKwallet = true;
       security.pam.services.slim.enableKwallet = true;
 
+      # Update the start menu for each user that has `isNormalUser` set.
+      system.activationScripts.plasmaSetup = stringAfter [ "users" "groups" ]
+        (concatStringsSep "\n"
+          (mapAttrsToList (name: value: "${pkgs.su}/bin/su ${name} -c kbuildsycoca5")
+            (filterAttrs (n: v: v.isNormalUser) config.users.users)));
     })
   ];
 
diff --git a/nixos/modules/services/x11/desktop-managers/xfce.nix b/nixos/modules/services/x11/desktop-managers/xfce.nix
index 7dcc600d2664..ae155470419d 100644
--- a/nixos/modules/services/x11/desktop-managers/xfce.nix
+++ b/nixos/modules/services/x11/desktop-managers/xfce.nix
@@ -43,12 +43,6 @@ in
         default = true;
         description = "Enable the XFWM (default) window manager.";
       };
-
-      screenLock = mkOption {
-        type = types.enum [ "xscreensaver" "xlockmore" "slock" ];
-        default = "xlockmore";
-        description = "Application used by XFCE to lock the screen.";
-      };
     };
   };
 
@@ -92,7 +86,7 @@ in
       thunar-volman # TODO: drop
     ] ++ (if config.hardware.pulseaudio.enable
           then [ xfce4-mixer-pulse xfce4-volumed-pulse ]
-	  else [ xfce4-mixer xfce4-volumed ])
+          else [ xfce4-mixer xfce4-volumed ])
       # TODO: NetworkManager doesn't belong here
       ++ optionals config.networking.networkmanager.enable [ networkmanagerapplet ]
       ++ optionals config.powerManagement.enable [ xfce4-power-manager ]
diff --git a/nixos/modules/services/x11/display-managers/gdm.nix b/nixos/modules/services/x11/display-managers/gdm.nix
index a6a38a21b617..8b08c01ea0db 100644
--- a/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixos/modules/services/x11/display-managers/gdm.nix
@@ -89,7 +89,7 @@ in
 
     services.xserver.displayManager.slim.enable = false;
 
-    users.extraUsers.gdm =
+    users.users.gdm =
       { name = "gdm";
         uid = config.ids.uids.gdm;
         group = "gdm";
@@ -97,7 +97,7 @@ in
         description = "GDM user";
       };
 
-    users.extraGroups.gdm.gid = config.ids.gids.gdm;
+    users.groups.gdm.gid = config.ids.gids.gdm;
 
     # GDM needs different xserverArgs, presumable because using wayland by default.
     services.xserver.tty = null;
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix
new file mode 100644
index 000000000000..ba8151a60f20
--- /dev/null
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.mini;
+
+  xgreeters = pkgs.linkFarm "lightdm-mini-greeter-xgreeters" [{
+    path = "${pkgs.lightdm-mini-greeter}/share/xgreeters/lightdm-mini-greeter.desktop";
+    name = "lightdm-mini-greeter.desktop";
+  }];
+
+  miniGreeterConf = pkgs.writeText "lightdm-mini-greeter.conf"
+    ''
+    [greeter]
+    user = ${cfg.user}
+    show-password-label = true
+    password-label-text = Password:
+    show-input-cursor = true
+
+    [greeter-hotkeys]
+    mod-key = meta
+    shutdown-key = s
+    restart-key = r
+    hibernate-key = h
+    suspend-key = u
+
+    [greeter-theme]
+    font = Sans
+    font-size = 1em
+    text-color = "#080800"
+    error-color = "#F8F8F0"
+    background-image = "${ldmcfg.background}"
+    background-color = "#1B1D1E"
+    window-color = "#F92672"
+    border-color = "#080800"
+    border-width = 2px
+    layout-space = 15
+    password-color = "#F8F8F0"
+    password-background-color = "#1B1D1E"
+
+    ${cfg.extraConfig}
+    '';
+
+in
+{
+  options = {
+
+    services.xserver.displayManager.lightdm.greeters.mini = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable lightdm-mini-greeter as the lightdm greeter.
+
+          Note that this greeter starts only the default X session.
+          You can configure the default X session by
+          <option>services.xserver.desktopManager.default</option> and
+          <option>services.xserver.windowManager.default</option>.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "root";
+        description = ''
+          The user to login as.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra configuration that should be put in the lightdm-mini-greeter.conf
+          configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+
+    services.xserver.displayManager.lightdm.greeters.gtk.enable = false;
+
+    services.xserver.displayManager.lightdm.greeter = mkDefault {
+      package = xgreeters;
+      name = "lightdm-mini-greeter";
+    };
+
+    environment.etc."lightdm/lightdm-mini-greeter.conf".source = miniGreeterConf;
+
+  };
+}
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 9d30155a7234..206ede227efa 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -42,7 +42,7 @@ let
     ''
       [LightDM]
       ${optionalString cfg.greeter.enable ''
-        greeter-user = ${config.users.extraUsers.lightdm.name}
+        greeter-user = ${config.users.users.lightdm.name}
         greeters-directory = ${cfg.greeter.package}
       ''}
       sessions-directory = ${dmcfg.session.desktops}
@@ -72,6 +72,7 @@ in
   # preferred.
   imports = [
     ./lightdm-greeters/gtk.nix
+    ./lightdm-greeters/mini.nix
   ];
 
   options = {
@@ -251,14 +252,14 @@ in
         session  include   lightdm
     '';
 
-    users.extraUsers.lightdm = {
+    users.users.lightdm = {
       createHome = true;
       home = "/var/lib/lightdm-data";
       group = "lightdm";
       uid = config.ids.uids.lightdm;
     };
 
-    users.extraGroups.lightdm.gid = config.ids.gids.lightdm;
+    users.groups.lightdm.gid = config.ids.gids.lightdm;
     services.xserver.tty     = null; # We might start multiple X servers so let the tty increment themselves..
     services.xserver.display = null; # We specify our own display (and logfile) in xserver-wrapper up there
   };
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index 2d4cb8aa20a5..426b899586f5 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -19,17 +19,6 @@ let
 
   Xsetup = pkgs.writeScript "Xsetup" ''
     #!/bin/sh
-
-    # Prior to Qt 5.9.2, there is a QML cache invalidation bug which sometimes
-    # strikes new Plasma 5 releases. If the QML cache is not invalidated, SDDM
-    # will segfault without explanation. We really tore our hair out for awhile
-    # before finding the bug:
-    # https://bugreports.qt.io/browse/QTBUG-62302
-    # We work around the problem by deleting the QML cache before startup. It
-    # will be regenerated, causing a small but perceptible delay when SDDM
-    # starts.
-    rm -fr /var/lib/sddm/.cache/sddm-greeter/qmlcache
-
     ${cfg.setupScript}
   '';
 
@@ -65,6 +54,10 @@ let
     XauthPath=${pkgs.xorg.xauth}/bin/xauth
     DisplayCommand=${Xsetup}
     DisplayStopCommand=${Xstop}
+    EnableHidpi=${if cfg.enableHidpi then "true" else "false"}
+
+    [Wayland]
+    EnableHidpi=${if cfg.enableHidpi then "true" else "false"}
 
     ${optionalString cfg.autoLogin.enable ''
     [Autologin]
@@ -95,6 +88,17 @@ in
         '';
       };
 
+      enableHidpi = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Whether to enable automatic HiDPI mode.
+          </para>
+          <para>
+          Versions up to 0.17 are broken so this only works from 0.18 onwards.
+        '';
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
@@ -253,7 +257,7 @@ in
       '';
     };
 
-    users.extraUsers.sddm = {
+    users.users.sddm = {
       createHome = true;
       home = "/var/lib/sddm";
       group = "sddm";
@@ -262,7 +266,7 @@ in
 
     environment.etc."sddm.conf".source = cfgFile;
 
-    users.extraGroups.sddm.gid = config.ids.gids.sddm;
+    users.groups.sddm.gid = config.ids.gids.sddm;
 
     environment.systemPackages = [ sddm ];
     services.dbus.packages = [ sddm ];
@@ -270,5 +274,20 @@ in
     # To enable user switching, allow sddm to allocate TTYs/displays dynamically.
     services.xserver.tty = null;
     services.xserver.display = null;
+
+    systemd.tmpfiles.rules = [
+      # Prior to Qt 5.9.2, there is a QML cache invalidation bug which sometimes
+      # strikes new Plasma 5 releases. If the QML cache is not invalidated, SDDM
+      # will segfault without explanation. We really tore our hair out for awhile
+      # before finding the bug:
+      # https://bugreports.qt.io/browse/QTBUG-62302
+      # We work around the problem by deleting the QML cache before startup.
+      # This was supposedly fixed in Qt 5.9.2 however it has been reported with
+      # 5.10 and 5.11 as well. The initial workaround was to delete the directory
+      # in the Xsetup script but that doesn't do anything.
+      # Instead we use tmpfiles.d to ensure it gets wiped.
+      # This causes a small but perceptible delay when SDDM starts.
+      "e ${config.users.users.sddm.home}/.cache - - - 0"
+    ];
   };
 }
diff --git a/nixos/modules/services/x11/hardware/libinput.nix b/nixos/modules/services/x11/hardware/libinput.nix
index d0a87f183b6f..072004d5dd91 100644
--- a/nixos/modules/services/x11/hardware/libinput.nix
+++ b/nixos/modules/services/x11/hardware/libinput.nix
@@ -116,7 +116,7 @@ in {
       };
 
       scrollMethod = mkOption {
-        type = types.enum [ "twofinger" "edge" "none" ];
+        type = types.enum [ "twofinger" "edge" "button" "none" ];
         default = "twofinger";
         example = "edge";
         description =
diff --git a/nixos/modules/services/x11/window-managers/awesome.nix b/nixos/modules/services/x11/window-managers/awesome.nix
index 71eb02ec5954..089e9f769f0a 100644
--- a/nixos/modules/services/x11/window-managers/awesome.nix
+++ b/nixos/modules/services/x11/window-managers/awesome.nix
@@ -37,6 +37,11 @@ in
         apply = pkg: if pkg == null then pkgs.awesome else pkg;
       };
 
+      noArgb = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Disable client transparency support, which can be greatly detrimental to performance in some setups";
+      };
     };
 
   };
@@ -50,7 +55,7 @@ in
       { name = "awesome";
         start =
           ''
-            ${awesome}/bin/awesome ${makeSearchPath cfg.luaModules} &
+            ${awesome}/bin/awesome ${lib.optionalString cfg.noArgb "--no-argb"} ${makeSearchPath cfg.luaModules} &
             waitPID=$!
           '';
       };
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 1404231f837e..3048cd02683f 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -244,6 +244,13 @@ in
           "ati_unfree" "amdgpu" "amdgpu-pro"
           "nv" "nvidia" "nvidiaLegacy340" "nvidiaLegacy304"
         ];
+        # TODO(@oxij): think how to easily add the rest, like those nvidia things
+        relatedPackages = concatLists
+          (mapAttrsToList (n: v:
+            optional (hasPrefix "xf86video" n) {
+              path  = [ "xorg" n ];
+              title = removePrefix "xf86video" n;
+            }) pkgs.xorg);
         description = ''
           The names of the video drivers the configuration
           supports. They will be tried in order until one that