about summary refs log tree commit diff
path: root/nixpkgs/nixos/modules/services/monitoring
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/modules/services/monitoring')
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/alerta.nix115
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/apcupsd.nix191
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/arbtt.nix63
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/bosun.nix166
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/cadvisor.nix140
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/collectd.nix138
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix34
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix272
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent-defaults.nix8
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent.nix236
-rwxr-xr-xnixpkgs/nixos/modules/services/monitoring/dd-agent/update-dd-agent-defaults9
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/do-agent.nix34
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix63
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix66
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/grafana.nix559
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/graphite.nix570
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/hdaps.nix23
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/heapster.nix57
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/incron.nix98
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/kapacitor.nix191
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/loki.nix112
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/longview.nix160
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/monit.nix46
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/munin.nix402
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/nagios.nix212
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/netdata.nix219
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix187
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix642
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix249
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.xml227
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix38
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bind.nix54
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix70
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix77
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix38
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix73
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix38
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix35
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix19
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix46
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mail.nix158
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix66
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix64
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix58
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix65
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix40
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix82
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix47
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix92
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix70
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix31
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix44
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix66
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix88
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix66
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix166
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix47
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix81
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix70
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/riemann.nix105
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/scollector.nix135
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/smartd.nix242
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/statsd.nix149
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/sysstat.nix76
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/teamviewer.nix46
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/telegraf.nix71
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/thanos.nix835
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/tuptime.nix84
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/ups.nix263
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/uptime.nix96
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/vnstat.nix58
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix159
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/zabbix-proxy.nix299
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix299
74 files changed, 10295 insertions, 0 deletions
diff --git a/nixpkgs/nixos/modules/services/monitoring/alerta.nix b/nixpkgs/nixos/modules/services/monitoring/alerta.nix
new file mode 100644
index 000000000000..34f2d41706a5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/alerta.nix
@@ -0,0 +1,115 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.alerta;
+
+  alertaConf = pkgs.writeTextFile {
+    name = "alertad.conf";
+    text = ''
+      DATABASE_URL = '${cfg.databaseUrl}'
+      DATABASE_NAME = '${cfg.databaseName}'
+      LOG_FILE = '${cfg.logDir}/alertad.log'
+      LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+      CORS_ORIGINS = [ ${concatMapStringsSep ", " (s: "\"" + s + "\"") cfg.corsOrigins} ];
+      AUTH_REQUIRED = ${if cfg.authenticationRequired then "True" else "False"}
+      SIGNUP_ENABLED = ${if cfg.signupEnabled then "True" else "False"}
+      ${cfg.extraConfig}
+    '';
+  };
+in
+{
+  options.services.alerta = {
+    enable = mkEnableOption "alerta";
+
+    port = mkOption {
+      type = types.int;
+      default = 5000;
+      description = "Port of Alerta";
+    };
+
+    bind = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      example = literalExample "0.0.0.0";
+      description = "Address to bind to. The default is to bind to all addresses";
+    };
+
+    logDir = mkOption {
+      type = types.path;
+      description = "Location where the logfiles are stored";
+      default = "/var/log/alerta";
+    };
+
+    databaseUrl = mkOption {
+      type = types.str;
+      description = "URL of the MongoDB or PostgreSQL database to connect to";
+      default = "mongodb://localhost";
+      example = "mongodb://localhost";
+    };
+
+    databaseName = mkOption {
+      type = types.str;
+      description = "Name of the database instance to connect to";
+      default = "monitoring";
+      example = "monitoring";
+    };
+
+    corsOrigins = mkOption {
+      type = types.listOf types.str;
+      description = "List of URLs that can access the API for Cross-Origin Resource Sharing (CORS)";
+      example = [ "http://localhost" "http://localhost:5000" ];
+      default = [ "http://localhost" "http://localhost:5000" ];
+    };
+
+    authenticationRequired = mkOption {
+      type = types.bool;
+      description = "Whether users must authenticate when using the web UI or command-line tool";
+      default = false;
+    };
+
+    signupEnabled = mkOption {
+      type = types.bool;
+      description = "Whether to prevent sign-up of new users via the web UI";
+      default = true;
+    };
+
+    extraConfig = mkOption {
+      description = "These lines go into alertad.conf verbatim.";
+      default = "";
+      type = types.lines;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.logDir}' - alerta alerta - -"
+    ];
+
+    systemd.services.alerta = {
+      description = "Alerta Monitoring System";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      environment = {
+        ALERTA_SVR_CONF_FILE = alertaConf;
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.python36Packages.alerta-server}/bin/alertad run --port ${toString cfg.port} --host ${cfg.bind}";
+        User = "alerta";
+        Group = "alerta";
+      };
+    };
+
+    environment.systemPackages = [ pkgs.python36Packages.alerta ];
+
+    users.users.alerta = {
+      uid = config.ids.uids.alerta;
+      description = "Alerta user";
+    };
+
+    users.groups.alerta = {
+      gid = config.ids.gids.alerta;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/apcupsd.nix b/nixpkgs/nixos/modules/services/monitoring/apcupsd.nix
new file mode 100644
index 000000000000..75218aa1d46b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/apcupsd.nix
@@ -0,0 +1,191 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.apcupsd;
+
+  configFile = pkgs.writeText "apcupsd.conf" ''
+    ## apcupsd.conf v1.1 ##
+    # apcupsd complains if the first line is not like above.
+    ${cfg.configText}
+    SCRIPTDIR ${toString scriptDir}
+  '';
+
+  # List of events from "man apccontrol"
+  eventList = [
+    "annoyme"
+    "battattach"
+    "battdetach"
+    "changeme"
+    "commfailure"
+    "commok"
+    "doreboot"
+    "doshutdown"
+    "emergency"
+    "failing"
+    "killpower"
+    "loadlimit"
+    "mainsback"
+    "onbattery"
+    "offbattery"
+    "powerout"
+    "remotedown"
+    "runlimit"
+    "timeout"
+    "startselftest"
+    "endselftest"
+  ];
+
+  shellCmdsForEventScript = eventname: commands: ''
+    echo "#!${pkgs.runtimeShell}" > "$out/${eventname}"
+    echo '${commands}' >> "$out/${eventname}"
+    chmod a+x "$out/${eventname}"
+  '';
+
+  eventToShellCmds = event: if builtins.hasAttr event cfg.hooks then (shellCmdsForEventScript event (builtins.getAttr event cfg.hooks)) else "";
+
+  scriptDir = pkgs.runCommand "apcupsd-scriptdir" { preferLocalBuild = true; } (''
+    mkdir "$out"
+    # Copy SCRIPTDIR from apcupsd package
+    cp -r ${pkgs.apcupsd}/etc/apcupsd/* "$out"/
+    # Make the files writeable (nix will unset the write bits afterwards)
+    chmod u+w "$out"/*
+    # Remove the sample event notification scripts, because they don't work
+    # anyways (they try to send mail to "root" with the "mail" command)
+    (cd "$out" && rm changeme commok commfailure onbattery offbattery)
+    # Remove the sample apcupsd.conf file (we're generating our own)
+    rm "$out/apcupsd.conf"
+    # Set the SCRIPTDIR= line in apccontrol to the dir we're creating now
+    sed -i -e "s|^SCRIPTDIR=.*|SCRIPTDIR=$out|" "$out/apccontrol"
+    '' + concatStringsSep "\n" (map eventToShellCmds eventList)
+
+  );
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.apcupsd = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Whether to enable the APC UPS daemon. apcupsd monitors your UPS and
+          permits orderly shutdown of your computer in the event of a power
+          failure. User manual: http://www.apcupsd.com/manual/manual.html.
+          Note that apcupsd runs as root (to allow shutdown of computer).
+          You can check the status of your UPS with the "apcaccess" command.
+        '';
+      };
+
+      configText = mkOption {
+        default = ''
+          UPSTYPE usb
+          NISIP 127.0.0.1
+          BATTERYLEVEL 50
+          MINUTES 5
+        '';
+        type = types.lines;
+        description = ''
+          Contents of the runtime configuration file, apcupsd.conf. The default
+          settings makes apcupsd autodetect USB UPSes, limit network access to
+          localhost and shutdown the system when the battery level is below 50
+          percent, or when the UPS has calculated that it has 5 minutes or less
+          of remaining power-on time. See man apcupsd.conf for details.
+        '';
+      };
+
+      hooks = mkOption {
+        default = {};
+        example = {
+          doshutdown = ''# shell commands to notify that the computer is shutting down'';
+        };
+        type = types.attrsOf types.lines;
+        description = ''
+          Each attribute in this option names an apcupsd event and the string
+          value it contains will be executed in a shell, in response to that
+          event (prior to the default action). See "man apccontrol" for the
+          list of events and what they represent.
+
+          A hook script can stop apccontrol from doing its default action by
+          exiting with value 99. Do not do this unless you know what you're
+          doing.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [ {
+      assertion = let hooknames = builtins.attrNames cfg.hooks; in all (x: elem x eventList) hooknames;
+      message = ''
+        One (or more) attribute names in services.apcupsd.hooks are invalid.
+        Current attribute names: ${toString (builtins.attrNames cfg.hooks)}
+        Valid attribute names  : ${toString eventList}
+      '';
+    } ];
+
+    # Give users access to the "apcaccess" tool
+    environment.systemPackages = [ pkgs.apcupsd ];
+
+    # NOTE 1: apcupsd runs as root because it needs permission to run
+    # "shutdown"
+    #
+    # NOTE 2: When apcupsd calls "wall", it prints an error because stdout is
+    # not connected to a tty (it is connected to the journal):
+    #   wall: cannot get tty name: Inappropriate ioctl for device
+    # The message still gets through.
+    systemd.services.apcupsd = {
+      description = "APC UPS Daemon";
+      wantedBy = [ "multi-user.target" ];
+      preStart = "mkdir -p /run/apcupsd/";
+      serviceConfig = {
+        ExecStart = "${pkgs.apcupsd}/bin/apcupsd -b -f ${configFile} -d1";
+        # TODO: When apcupsd has initiated a shutdown, systemd always ends up
+        # waiting for it to stop ("A stop job is running for UPS daemon"). This
+        # is weird, because in the journal one can clearly see that apcupsd has
+        # received the SIGTERM signal and has already quit (or so it seems).
+        # This reduces the wait time from 90 seconds (default) to just 5. Then
+        # systemd kills it with SIGKILL.
+        TimeoutStopSec = 5;
+      };
+      unitConfig.Documentation = "man:apcupsd(8)";
+    };
+
+    # A special service to tell the UPS to power down/hibernate just before the
+    # computer shuts down. (The UPS has a built in delay before it actually
+    # shuts off power.) Copied from here:
+    # http://forums.opensuse.org/english/get-technical-help-here/applications/479499-apcupsd-systemd-killpower-issues.html
+    systemd.services.apcupsd-killpower = {
+      description = "APC UPS Kill Power";
+      after = [ "shutdown.target" ]; # append umount.target?
+      before = [ "final.target" ];
+      wantedBy = [ "shutdown.target" ];
+      unitConfig = {
+        ConditionPathExists = "/run/apcupsd/powerfail";
+        DefaultDependencies = "no";
+      };
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.apcupsd}/bin/apcupsd --killpower -f ${configFile}";
+        TimeoutSec = "infinity";
+        StandardOutput = "tty";
+        RemainAfterExit = "yes";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/arbtt.nix b/nixpkgs/nixos/modules/services/monitoring/arbtt.nix
new file mode 100644
index 000000000000..b41a3c7b5016
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/arbtt.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.arbtt;
+in {
+  options = {
+    services.arbtt = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable the arbtt statistics capture service.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.haskellPackages.arbtt;
+        defaultText = "pkgs.haskellPackages.arbtt";
+        example = literalExample "pkgs.haskellPackages.arbtt";
+        description = ''
+          The package to use for the arbtt binaries.
+        '';
+      };
+
+      logFile = mkOption {
+        type = types.str;
+        default = "%h/.arbtt/capture.log";
+        example = "/home/username/.arbtt-capture.log";
+        description = ''
+          The log file for captured samples.
+        '';
+      };
+
+      sampleRate = mkOption {
+        type = types.int;
+        default = 60;
+        example = 120;
+        description = ''
+          The sampling interval in seconds.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.arbtt = {
+      description = "arbtt statistics capture service";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${cfg.package}/bin/arbtt-capture --logfile=${cfg.logFile} --sample-rate=${toString cfg.sampleRate}";
+        Restart = "always";
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.michaelpj ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/bosun.nix b/nixpkgs/nixos/modules/services/monitoring/bosun.nix
new file mode 100644
index 000000000000..04e9da1c81a3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/bosun.nix
@@ -0,0 +1,166 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.bosun;
+
+  configFile = pkgs.writeText "bosun.conf" ''
+    ${optionalString (cfg.opentsdbHost !=null) "tsdbHost = ${cfg.opentsdbHost}"}
+    ${optionalString (cfg.influxHost !=null) "influxHost = ${cfg.influxHost}"}
+    httpListen = ${cfg.listenAddress}
+    stateFile = ${cfg.stateFile}
+    ledisDir = ${cfg.ledisDir}
+    checkFrequency = ${cfg.checkFrequency}
+
+    ${cfg.extraConfig}
+  '';
+
+in {
+
+  options = {
+
+    services.bosun = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to run bosun.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.bosun;
+        defaultText = "pkgs.bosun";
+        example = literalExample "pkgs.bosun";
+        description = ''
+          bosun binary to use.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "bosun";
+        description = ''
+          User account under which bosun runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "bosun";
+        description = ''
+          Group account under which bosun runs.
+        '';
+      };
+
+      opentsdbHost = mkOption {
+        type = types.nullOr types.str;
+        default = "localhost:4242";
+        description = ''
+          Host and port of the OpenTSDB database that stores bosun data.
+          To disable opentsdb you can pass null as parameter.
+        '';
+      };
+
+      influxHost = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "localhost:8086";
+        description = ''
+           Host and port of the influxdb database.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = ":8070";
+        description = ''
+          The host address and port that bosun's web interface will listen on.
+        '';
+      };
+
+      stateFile = mkOption {
+        type = types.path;
+        default = "/var/lib/bosun/bosun.state";
+        description = ''
+          Path to bosun's state file.
+        '';
+      };
+
+      ledisDir = mkOption {
+        type = types.path;
+        default = "/var/lib/bosun/ledis_data";
+        description = ''
+          Path to bosun's ledis data dir
+        '';
+      };
+
+      checkFrequency = mkOption {
+        type = types.str;
+        default = "5m";
+        description = ''
+          Bosun's check frequency
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra configuration options for Bosun. You should describe your
+          desired templates, alerts, macros, etc through this configuration
+          option.
+
+          A detailed description of the supported syntax can be found at-spi2-atk
+          http://bosun.org/configuration.html
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.bosun = {
+      description = "bosun metrics collector (part of Bosun)";
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        mkdir -p "$(dirname "${cfg.stateFile}")";
+        touch "${cfg.stateFile}"
+        touch "${cfg.stateFile}.tmp"
+
+        mkdir -p "${cfg.ledisDir}";
+
+        if [ "$(id -u)" = 0 ]; then
+          chown ${cfg.user}:${cfg.group} "${cfg.stateFile}"
+          chown ${cfg.user}:${cfg.group} "${cfg.stateFile}.tmp"
+          chown ${cfg.user}:${cfg.group} "${cfg.ledisDir}"
+        fi
+      '';
+
+      serviceConfig = {
+        PermissionsStartOnly = true;
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = ''
+          ${cfg.package}/bin/bosun -c ${configFile}
+        '';
+      };
+    };
+
+    users.users.bosun = {
+      description = "bosun user";
+      group = "bosun";
+      uid = config.ids.uids.bosun;
+    };
+
+    users.groups.bosun.gid = config.ids.gids.bosun;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/cadvisor.nix b/nixpkgs/nixos/modules/services/monitoring/cadvisor.nix
new file mode 100644
index 000000000000..655a6934a266
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/cadvisor.nix
@@ -0,0 +1,140 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cadvisor;
+
+in {
+  options = {
+    services.cadvisor = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Whether to enable cadvisor service.";
+      };
+
+      listenAddress = mkOption {
+        default = "127.0.0.1";
+        type = types.str;
+        description = "Cadvisor listening host";
+      };
+
+      port = mkOption {
+        default = 8080;
+        type = types.int;
+        description = "Cadvisor listening port";
+      };
+
+      storageDriver = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        example = "influxdb";
+        description = "Cadvisor storage driver.";
+      };
+
+      storageDriverHost = mkOption {
+        default = "localhost:8086";
+        type = types.str;
+        description = "Cadvisor storage driver host.";
+      };
+
+      storageDriverDb = mkOption {
+        default = "root";
+        type = types.str;
+        description = "Cadvisord storage driver database name.";
+      };
+
+      storageDriverUser = mkOption {
+        default = "root";
+        type = types.str;
+        description = "Cadvisor storage driver username.";
+      };
+
+      storageDriverPassword = mkOption {
+        default = "root";
+        type = types.str;
+        description = ''
+          Cadvisor storage driver password.
+
+          Warning: this password is stored in the world-readable Nix store. It's
+          recommended to use the <option>storageDriverPasswordFile</option> option
+          since that gives you control over the security of the password.
+          <option>storageDriverPasswordFile</option> also takes precedence over <option>storageDriverPassword</option>.
+        '';
+      };
+
+      storageDriverPasswordFile = mkOption {
+        type = types.str;
+        description = ''
+          File that contains the cadvisor storage driver password.
+
+          <option>storageDriverPasswordFile</option> takes precedence over <option>storageDriverPassword</option>
+
+          Warning: when <option>storageDriverPassword</option> is non-empty this defaults to a file in the
+          world-readable Nix store that contains the value of <option>storageDriverPassword</option>.
+
+          It's recommended to override this with a path not in the Nix store.
+          Tip: use <link xlink:href='https://nixos.org/nixops/manual/#idm140737318306400'>nixops key management</link>
+        '';
+      };
+
+      storageDriverSecure = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Cadvisor storage driver, enable secure communication.";
+      };
+
+      extraOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Additional cadvisor options.
+          
+          See <link xlink:href='https://github.com/google/cadvisor/blob/master/docs/runtime_options.md'/> for available options.
+        '';
+      };
+    };
+  };
+
+  config = mkMerge [
+    { services.cadvisor.storageDriverPasswordFile = mkIf (cfg.storageDriverPassword != "") (
+        mkDefault (toString (pkgs.writeTextFile {
+          name = "cadvisor-storage-driver-password";
+          text = cfg.storageDriverPassword;
+        }))
+      );
+    }
+
+    (mkIf cfg.enable {
+      systemd.services.cadvisor = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" "docker.service" "influxdb.service" ];
+
+        postStart = mkBefore ''
+          until ${pkgs.curl.bin}/bin/curl -s -o /dev/null 'http://${cfg.listenAddress}:${toString cfg.port}/containers/'; do
+            sleep 1;
+          done
+        '';
+
+        script = ''
+          exec ${pkgs.cadvisor}/bin/cadvisor \
+            -logtostderr=true \
+            -listen_ip="${cfg.listenAddress}" \
+            -port="${toString cfg.port}" \
+            ${escapeShellArgs cfg.extraOptions} \
+            ${optionalString (cfg.storageDriver != null) ''
+              -storage_driver "${cfg.storageDriver}" \
+              -storage_driver_user "${cfg.storageDriverHost}" \
+              -storage_driver_db "${cfg.storageDriverDb}" \
+              -storage_driver_user "${cfg.storageDriverUser}" \
+              -storage_driver_password "$(cat "${cfg.storageDriverPasswordFile}")" \
+              ${optionalString cfg.storageDriverSecure "-storage_driver_secure"}
+            ''}
+        '';
+
+        serviceConfig.TimeoutStartSec=300;
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/collectd.nix b/nixpkgs/nixos/modules/services/monitoring/collectd.nix
new file mode 100644
index 000000000000..ef3663c62e04
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/collectd.nix
@@ -0,0 +1,138 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.collectd;
+
+  conf = pkgs.writeText "collectd.conf" ''
+    BaseDir "${cfg.dataDir}"
+    AutoLoadPlugin ${boolToString cfg.autoLoadPlugin}
+    Hostname "${config.networking.hostName}"
+
+    LoadPlugin syslog
+    <Plugin "syslog">
+      LogLevel "info"
+      NotifyLevel "OKAY"
+    </Plugin>
+
+    ${concatStrings (mapAttrsToList (plugin: pluginConfig: ''
+      LoadPlugin ${plugin}
+      <Plugin "${plugin}">
+      ${pluginConfig}
+      </Plugin>
+    '') cfg.plugins)}
+
+    ${concatMapStrings (f: ''
+      Include "${f}"
+    '') cfg.include}
+
+    ${cfg.extraConfig}
+  '';
+
+  package =
+    if cfg.buildMinimalPackage
+    then minimalPackage
+    else cfg.package;
+
+  minimalPackage = cfg.package.override {
+    enabledPlugins = [ "syslog" ] ++ builtins.attrNames cfg.plugins;
+  };
+
+in {
+  options.services.collectd = with types; {
+    enable = mkEnableOption "collectd agent";
+
+    package = mkOption {
+      default = pkgs.collectd;
+      defaultText = "pkgs.collectd";
+      description = ''
+        Which collectd package to use.
+      '';
+      type = types.package;
+    };
+
+    buildMinimalPackage = mkOption {
+      default = false;
+      description = ''
+        Build a minimal collectd package with only the configured `services.collectd.plugins`
+      '';
+      type = types.bool;
+    };
+
+    user = mkOption {
+      default = "collectd";
+      description = ''
+        User under which to run collectd.
+      '';
+      type = nullOr str;
+    };
+
+    dataDir = mkOption {
+      default = "/var/lib/collectd";
+      description = ''
+        Data directory for collectd agent.
+      '';
+      type = path;
+    };
+
+    autoLoadPlugin = mkOption {
+      default = false;
+      description = ''
+        Enable plugin autoloading.
+      '';
+      type = bool;
+    };
+
+    include = mkOption {
+      default = [];
+      description = ''
+        Additional paths to load config from.
+      '';
+      type = listOf str;
+    };
+
+    plugins = mkOption {
+      default = {};
+      example = { cpu = ""; memory = ""; network = "Server 192.168.1.1 25826"; };
+      description = ''
+        Attribute set of plugin names to plugin config segments
+      '';
+      type = types.attrsOf types.str;
+    };
+
+    extraConfig = mkOption {
+      default = "";
+      description = ''
+        Extra configuration for collectd.
+      '';
+      type = lines;
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} - - -"
+    ];
+
+    systemd.services.collectd = {
+      description = "Collectd Monitoring Agent";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${package}/sbin/collectd -C ${conf} -f";
+        User = cfg.user;
+        Restart = "on-failure";
+        RestartSec = 3;
+      };
+    };
+
+    users.users = optionalAttrs (cfg.user == "collectd") {
+      collectd = {
+        isSystemUser = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix b/nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix
new file mode 100644
index 000000000000..88ca3a9227d2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix
@@ -0,0 +1,34 @@
+# A general watchdog for the linux operating system that should run in the
+# background at all times to ensure a realtime process won't hang the machine
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) das_watchdog;
+
+in {
+  ###### interface
+
+  options = {
+    services.das_watchdog.enable = mkEnableOption "realtime watchdog";
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.das_watchdog.enable {
+    environment.systemPackages = [ das_watchdog ];
+    systemd.services.das_watchdog = {
+      description = "Watchdog to ensure a realtime process won't hang the machine";
+      after = [ "multi-user.target" "sound.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "root";
+        Type = "simple";
+        ExecStart = "${das_watchdog}/bin/das_watchdog";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix b/nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix
new file mode 100644
index 000000000000..f1cb890794e1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix
@@ -0,0 +1,272 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.datadog-agent;
+
+  ddConf = {
+    dd_url              = "https://app.datadoghq.com";
+    skip_ssl_validation = false;
+    confd_path          = "/etc/datadog-agent/conf.d";
+    additional_checksd  = "/etc/datadog-agent/checks.d";
+    use_dogstatsd       = true;
+  }
+  // optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; }
+  // optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; }
+  // optionalAttrs (cfg.tags != null ) { tags = concatStringsSep ", " cfg.tags; }
+  // optionalAttrs (cfg.enableLiveProcessCollection) { process_config = { enabled = "true"; }; }
+  // optionalAttrs (cfg.enableTraceAgent) { apm_config = { enabled = true; }; }
+  // cfg.extraConfig;
+
+  # Generate Datadog configuration files for each configured checks.
+  # This works because check configurations have predictable paths,
+  # and because JSON is a valid subset of YAML.
+  makeCheckConfigs = entries: mapAttrs' (name: conf: {
+    name = "datadog-agent/conf.d/${name}.d/conf.yaml";
+    value.source = pkgs.writeText "${name}-check-conf.yaml" (builtins.toJSON conf);
+  }) entries;
+
+  defaultChecks = {
+    disk = cfg.diskCheck;
+    network = cfg.networkCheck;
+  };
+
+  # Assemble all check configurations and the top-level agent
+  # configuration.
+  etcfiles = with pkgs; with builtins;
+  { "datadog-agent/datadog.yaml" = {
+      source = writeText "datadog.yaml" (toJSON ddConf);
+    };
+  } // makeCheckConfigs (cfg.checks // defaultChecks);
+
+  # Apply the configured extraIntegrations to the provided agent
+  # package. See the documentation of `dd-agent/integrations-core.nix`
+  # for detailed information on this.
+  datadogPkg = cfg.package.override {
+    pythonPackages = pkgs.datadog-integrations-core cfg.extraIntegrations;
+  };
+in {
+  options.services.datadog-agent = {
+    enable = mkOption {
+      description = ''
+        Whether to enable the datadog-agent v6 monitoring service
+      '';
+      default = false;
+      type = types.bool;
+    };
+
+    package = mkOption {
+      default = pkgs.datadog-agent;
+      defaultText = "pkgs.datadog-agent";
+      description = ''
+        Which DataDog v6 agent package to use. Note that the provided
+        package is expected to have an overridable `pythonPackages`-attribute
+        which configures the Python environment with the Datadog
+        checks.
+      '';
+      type = types.package;
+    };
+
+    apiKeyFile = mkOption {
+      description = ''
+        Path to a file containing the Datadog API key to associate the
+        agent with your account.
+      '';
+      example = "/run/keys/datadog_api_key";
+      type = types.path;
+    };
+
+    tags = mkOption {
+      description = "The tags to mark this Datadog agent";
+      example = [ "test" "service" ];
+      default = null;
+      type = types.nullOr (types.listOf types.str);
+    };
+
+    hostname = mkOption {
+      description = "The hostname to show in the Datadog dashboard (optional)";
+      default = null;
+      example = "mymachine.mydomain";
+      type = types.nullOr types.str;
+    };
+
+    logLevel = mkOption {
+      description = "Logging verbosity.";
+      default = null;
+      type = types.nullOr (types.enum ["DEBUG" "INFO" "WARN" "ERROR"]);
+    };
+
+    extraIntegrations = mkOption {
+      default = {};
+      type    = types.attrs;
+
+      description = ''
+        Extra integrations from the Datadog core-integrations
+        repository that should be built and included.
+
+        By default the included integrations are disk, mongo, network,
+        nginx and postgres.
+
+        To include additional integrations the name of the derivation
+        and a function to filter its dependencies from the Python
+        package set must be provided.
+      '';
+
+      example = {
+        ntp = (pythonPackages: [ pythonPackages.ntplib ]);
+      };
+    };
+
+    extraConfig = mkOption {
+      default = {};
+      type = types.attrs;
+      description = ''
+        Extra configuration options that will be merged into the
+        main config file <filename>datadog.yaml</filename>.
+      '';
+     };
+
+    enableLiveProcessCollection = mkOption {
+      description = ''
+        Whether to enable the live process collection agent.
+      '';
+      default = false;
+      type = types.bool;
+    };
+
+    enableTraceAgent = mkOption {
+      description = ''
+        Whether to enable the trace agent.
+      '';
+      default = false;
+      type = types.bool;
+    };
+
+    checks = mkOption {
+      description = ''
+        Configuration for all Datadog checks. Keys of this attribute
+        set will be used as the name of the check to create the
+        appropriate configuration in `conf.d/$check.d/conf.yaml`.
+
+        The configuration is converted into JSON from the plain Nix
+        language configuration, meaning that you should write
+        configuration adhering to Datadog's documentation - but in Nix
+        language.
+
+        Refer to the implementation of this module (specifically the
+        definition of `defaultChecks`) for an example.
+
+        Note: The 'disk' and 'network' check are configured in
+        separate options because they exist by default. Attempting to
+        override their configuration here will have no effect.
+      '';
+
+      example = {
+        http_check = {
+          init_config = null; # sic!
+          instances = [
+            {
+              name = "some-service";
+              url = "http://localhost:1337/healthz";
+              tags = [ "some-service" ];
+            }
+          ];
+        };
+      };
+
+      default = {};
+
+      # sic! The structure of the values is up to the check, so we can
+      # not usefully constrain the type further.
+      type = with types; attrsOf attrs;
+    };
+
+    diskCheck = mkOption {
+      description = "Disk check config";
+      type = types.attrs;
+      default = {
+        init_config = {};
+        instances = [ { use_mount = "false"; } ];
+      };
+    };
+
+    networkCheck = mkOption {
+      description = "Network check config";
+      type = types.attrs;
+      default = {
+        init_config = {};
+        # Network check only supports one configured instance
+        instances = [ { collect_connection_state = false;
+          excluded_interfaces = [ "lo" "lo0" ]; } ];
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ datadogPkg pkgs.sysstat pkgs.procps pkgs.iproute ];
+
+    users.users.datadog = {
+      description = "Datadog Agent User";
+      uid = config.ids.uids.datadog;
+      group = "datadog";
+      home = "/var/log/datadog/";
+      createHome = true;
+    };
+
+    users.groups.datadog.gid = config.ids.gids.datadog;
+
+    systemd.services = let
+      makeService = attrs: recursiveUpdate {
+        path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.iproute ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = "datadog";
+          Group = "datadog";
+          Restart = "always";
+          RestartSec = 2;
+        };
+        restartTriggers = [ datadogPkg ] ++  map (x: x.source) (attrValues etcfiles);
+      } attrs;
+    in {
+      datadog-agent = makeService {
+        description = "Datadog agent monitor";
+        preStart = ''
+          chown -R datadog: /etc/datadog-agent
+          rm -f /etc/datadog-agent/auth_token
+        '';
+        script = ''
+          export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
+          exec ${datadogPkg}/bin/agent run -c /etc/datadog-agent/datadog.yaml
+        '';
+        serviceConfig.PermissionsStartOnly = true;
+      };
+
+      dd-jmxfetch = lib.mkIf (lib.hasAttr "jmx" cfg.checks) (makeService {
+        description = "Datadog JMX Fetcher";
+        path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ];
+        serviceConfig.ExecStart = "${datadogPkg}/bin/dd-jmxfetch";
+      });
+
+      datadog-process-agent = lib.mkIf cfg.enableLiveProcessCollection (makeService {
+        description = "Datadog Live Process Agent";
+        path = [ ];
+        script = ''
+          export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
+          ${pkgs.datadog-process-agent}/bin/agent --config /etc/datadog-agent/datadog.yaml
+        '';
+      });
+
+      datadog-trace-agent = lib.mkIf cfg.enableTraceAgent (makeService {
+        description = "Datadog Trace Agent";
+        path = [ ];
+        script = ''
+          export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
+          ${datadogPkg}/bin/trace-agent -config /etc/datadog-agent/datadog.yaml
+        '';
+      });
+
+    };
+
+    environment.etc = etcfiles;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent-defaults.nix b/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent-defaults.nix
new file mode 100644
index 000000000000..045128197421
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent-defaults.nix
@@ -0,0 +1,8 @@
+# Generated using update-dd-agent-default, please re-run after updating dd-agent. DO NOT EDIT MANUALLY.
+[
+  "auto_conf"
+  "agent_metrics.yaml.default"
+  "disk.yaml.default"
+  "network.yaml.default"
+  "ntp.yaml.default"
+]
diff --git a/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent.nix b/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
new file mode 100644
index 000000000000..e91717fb2054
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
@@ -0,0 +1,236 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dd-agent;
+
+  ddConf = pkgs.writeText "datadog.conf" ''
+    [Main]
+    dd_url: https://app.datadoghq.com
+    skip_ssl_validation: no
+    api_key: ${cfg.api_key}
+    ${optionalString (cfg.hostname != null) "hostname: ${cfg.hostname}"}
+
+    collector_log_file: /var/log/datadog/collector.log
+    forwarder_log_file: /var/log/datadog/forwarder.log
+    dogstatsd_log_file: /var/log/datadog/dogstatsd.log
+    pup_log_file:       /var/log/datadog/pup.log
+
+    # proxy_host: my-proxy.com
+    # proxy_port: 3128
+    # proxy_user: user
+    # proxy_password: password
+
+    # tags: mytag0, mytag1
+    ${optionalString (cfg.tags != null ) "tags: ${concatStringsSep ", " cfg.tags }"}
+
+    # collect_ec2_tags: no
+    # recent_point_threshold: 30
+    # use_mount: no
+    # listen_port: 17123
+    # graphite_listen_port: 17124
+    # non_local_traffic: no
+    # use_curl_http_client: False
+    # bind_host: localhost
+
+    # use_pup: no
+    # pup_port: 17125
+    # pup_interface: localhost
+    # pup_url: http://localhost:17125
+
+    # dogstatsd_port : 8125
+    # dogstatsd_interval : 10
+    # dogstatsd_normalize : yes
+    # statsd_forward_host: address_of_own_statsd_server
+    # statsd_forward_port: 8125
+
+    # device_blacklist_re: .*\/dev\/mapper\/lxc-box.*
+
+    # ganglia_host: localhost
+    # ganglia_port: 8651
+  '';
+
+  diskConfig = pkgs.writeText "disk.yaml" ''
+    init_config:
+
+    instances:
+      - use_mount: no
+  '';
+
+  networkConfig = pkgs.writeText "network.yaml" ''
+    init_config:
+
+    instances:
+      # Network check only supports one configured instance
+      - collect_connection_state: false
+        excluded_interfaces:
+          - lo
+          - lo0
+  '';
+
+  postgresqlConfig = pkgs.writeText "postgres.yaml" cfg.postgresqlConfig;
+  nginxConfig = pkgs.writeText "nginx.yaml" cfg.nginxConfig;
+  mongoConfig = pkgs.writeText "mongo.yaml" cfg.mongoConfig;
+  jmxConfig = pkgs.writeText "jmx.yaml" cfg.jmxConfig;
+  processConfig = pkgs.writeText "process.yaml" cfg.processConfig;
+
+  etcfiles =
+    let
+      defaultConfd = import ./dd-agent-defaults.nix;
+    in
+      listToAttrs (map (f: {
+        name = "dd-agent/conf.d/${f}";
+        value.source = "${pkgs.dd-agent}/agent/conf.d-system/${f}";
+      }) defaultConfd) //
+      {
+        "dd-agent/datadog.conf".source = ddConf;
+        "dd-agent/conf.d/disk.yaml".source = diskConfig;
+        "dd-agent/conf.d/network.yaml".source = networkConfig;
+      } //
+      (optionalAttrs (cfg.postgresqlConfig != null)
+      {
+        "dd-agent/conf.d/postgres.yaml".source = postgresqlConfig;
+      }) //
+      (optionalAttrs (cfg.nginxConfig != null)
+      {
+        "dd-agent/conf.d/nginx.yaml".source = nginxConfig;
+      }) //
+      (optionalAttrs (cfg.mongoConfig != null)
+      { 
+        "dd-agent/conf.d/mongo.yaml".source = mongoConfig;
+      }) //
+      (optionalAttrs (cfg.processConfig != null)
+      { 
+        "dd-agent/conf.d/process.yaml".source = processConfig;
+      }) //
+      (optionalAttrs (cfg.jmxConfig != null)
+      {
+        "dd-agent/conf.d/jmx.yaml".source = jmxConfig;
+      });
+
+in {
+  options.services.dd-agent = {
+    enable = mkOption {
+      description = ''
+        Whether to enable the dd-agent v5 monitoring service.
+        For datadog-agent v6, see <option>services.datadog-agent.enable</option>.
+      '';
+      default = false;
+      type = types.bool;
+    };
+
+    api_key = mkOption {
+      description = ''
+        The Datadog API key to associate the agent with your account.
+
+        Warning: this key is stored in cleartext within the world-readable
+        Nix store! Consider using the new v6
+        <option>services.datadog-agent</option> module instead.
+      '';
+      example = "ae0aa6a8f08efa988ba0a17578f009ab";
+      type = types.str;
+    };
+
+    tags = mkOption {
+      description = "The tags to mark this Datadog agent";
+      example = [ "test" "service" ];
+      default = null;
+      type = types.nullOr (types.listOf types.str);
+    };
+
+    hostname = mkOption {
+      description = "The hostname to show in the Datadog dashboard (optional)";
+      default = null;
+      example = "mymachine.mydomain";
+      type = types.nullOr types.str;
+    };
+
+    postgresqlConfig = mkOption {
+      description = "Datadog PostgreSQL integration configuration";
+      default = null;
+      type = types.nullOr types.lines;
+    };
+
+    nginxConfig = mkOption {
+      description = "Datadog nginx integration configuration";
+      default = null;
+      type = types.nullOr types.lines;
+    };
+
+    mongoConfig = mkOption {
+      description = "MongoDB integration configuration";
+      default = null;
+      type = types.nullOr types.lines;
+    };
+
+    jmxConfig = mkOption {
+      description = "JMX integration configuration";
+      default = null;
+      type = types.nullOr types.lines;
+    };
+
+    processConfig = mkOption {
+      description = ''
+        Process integration configuration
+        See <link xlink:href="https://docs.datadoghq.com/integrations/process/"/>
+      '';
+      default = null;
+      type = types.nullOr types.lines;
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.dd-agent pkgs.sysstat pkgs.procps ];
+
+    users.users.datadog = {
+      description = "Datadog Agent User";
+      uid = config.ids.uids.datadog;
+      group = "datadog";
+      home = "/var/log/datadog/";
+      createHome = true;
+    };
+
+    users.groups.datadog.gid = config.ids.gids.datadog;
+
+    systemd.services = let
+      makeService = attrs: recursiveUpdate {
+        path = [ pkgs.dd-agent pkgs.python pkgs.sysstat pkgs.procps pkgs.gohai ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = "datadog";
+          Group = "datadog";
+          Restart = "always";
+          RestartSec = 2;
+          PrivateTmp = true;
+        };
+        restartTriggers = [ pkgs.dd-agent ddConf diskConfig networkConfig postgresqlConfig nginxConfig mongoConfig jmxConfig processConfig ];
+      } attrs;
+    in {
+      dd-agent = makeService {
+        description = "Datadog agent monitor";
+        serviceConfig.ExecStart = "${pkgs.dd-agent}/bin/dd-agent foreground";
+      };
+
+      dogstatsd = makeService {
+        description = "Datadog statsd";
+        environment.TMPDIR = "/run/dogstatsd";
+        serviceConfig = {
+          ExecStart = "${pkgs.dd-agent}/bin/dogstatsd start";
+          Type = "forking";
+          PIDFile = "/run/dogstatsd/dogstatsd.pid";
+          RuntimeDirectory = "dogstatsd";
+        };
+      };
+
+      dd-jmxfetch = lib.mkIf (cfg.jmxConfig != null) {
+        description = "Datadog JMX Fetcher";
+        path = [ pkgs.dd-agent pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ];
+        serviceConfig.ExecStart = "${pkgs.dd-agent}/bin/dd-jmxfetch";
+      };
+    };
+
+    environment.etc = etcfiles;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/dd-agent/update-dd-agent-defaults b/nixpkgs/nixos/modules/services/monitoring/dd-agent/update-dd-agent-defaults
new file mode 100755
index 000000000000..76724173171a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/dd-agent/update-dd-agent-defaults
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+dd=$(nix-build --no-out-link -A dd-agent ../../../..)
+echo '# Generated using update-dd-agent-default, please re-run after updating dd-agent. DO NOT EDIT MANUALLY.' > dd-agent-defaults.nix
+echo '[' >> dd-agent-defaults.nix
+echo '  "auto_conf"' >> dd-agent-defaults.nix
+for f in $(find $dd/agent/conf.d-system -maxdepth 1 -type f | grep -v '\.example' | sort); do
+  echo "  \"$(basename $f)\"" >> dd-agent-defaults.nix
+done
+echo ']' >> dd-agent-defaults.nix
diff --git a/nixpkgs/nixos/modules/services/monitoring/do-agent.nix b/nixpkgs/nixos/modules/services/monitoring/do-agent.nix
new file mode 100644
index 000000000000..2d3fe2f79768
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/do-agent.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.do-agent;
+in
+{
+  options.services.do-agent = {
+    enable = mkEnableOption "do-agent, the DigitalOcean droplet metrics agent";
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.do-agent ];
+
+    systemd.services.do-agent = {
+      description = "DigitalOcean Droplet Metrics Agent";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.do-agent}/bin/do-agent --syslog";
+        Restart = "always";
+        OOMScoreAdjust = -900;
+        SyslogIdentifier = "DigitalOceanAgent";
+        PrivateTmp = "yes";
+        ProtectSystem = "full";
+        ProtectHome = "yes";
+        NoNewPrivileges = "yes";
+        DynamicUser = "yes";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix b/nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix
new file mode 100644
index 000000000000..9b65c76ce02e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix
@@ -0,0 +1,63 @@
+# Fusion Inventory daemon.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.fusionInventory;
+
+  configFile = pkgs.writeText "fusion_inventory.conf" ''
+    server = ${concatStringsSep ", " cfg.servers}
+
+    logger = stderr
+
+    ${cfg.extraConfig}
+  '';
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.fusionInventory = {
+
+      enable = mkEnableOption "Fusion Inventory Agent";
+
+      servers = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          The urls of the OCS/GLPI servers to connect to.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Configuration that is injected verbatim into the configuration file.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.fusion-inventory = {
+      description = "FusionInventory user";
+      isSystemUser = true;
+    };
+
+    systemd.services.fusion-inventory = {
+      description = "Fusion Inventory Agent";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.fusionInventory}/bin/fusioninventory-agent --conf-file=${configFile} --daemon --no-fork";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix b/nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix
new file mode 100644
index 000000000000..893c15d568bd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.grafana_reporter;
+
+in {
+  options.services.grafana_reporter = {
+    enable = mkEnableOption "grafana_reporter";
+
+    grafana = {
+      protocol = mkOption {
+        description = "Grafana protocol.";
+        default = "http";
+        type = types.enum ["http" "https"];
+      };
+      addr = mkOption {
+        description = "Grafana address.";
+        default = "127.0.0.1";
+        type = types.str;
+      };
+      port = mkOption {
+        description = "Grafana port.";
+        default = 3000;
+        type = types.int;
+      };
+
+    };
+    addr = mkOption {
+      description = "Listening address.";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    port = mkOption {
+      description = "Listening port.";
+      default = 8686;
+      type = types.int;
+    };
+
+    templateDir = mkOption {
+      description = "Optional template directory to use custom tex templates";
+      default = "${pkgs.grafana_reporter}";
+      type = types.str;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.grafana_reporter = {
+      description = "Grafana Reporter Service Daemon";
+      wantedBy = ["multi-user.target"];
+      after = ["network.target"];
+      serviceConfig = let
+        args = lib.concatStringsSep " " [
+          "-proto ${cfg.grafana.protocol}://"
+          "-ip ${cfg.grafana.addr}:${toString cfg.grafana.port}"
+          "-port :${toString cfg.port}"
+          "-templates ${cfg.templateDir}"
+        ];
+      in {
+        ExecStart = "${pkgs.grafana_reporter}/bin/grafana-reporter ${args}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/grafana.nix b/nixpkgs/nixos/modules/services/monitoring/grafana.nix
new file mode 100644
index 000000000000..b0c81a46d4d8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/grafana.nix
@@ -0,0 +1,559 @@
+{ options, config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.grafana;
+  opt = options.services.grafana;
+
+  envOptions = {
+    PATHS_DATA = cfg.dataDir;
+    PATHS_PLUGINS = "${cfg.dataDir}/plugins";
+    PATHS_LOGS = "${cfg.dataDir}/log";
+
+    SERVER_PROTOCOL = cfg.protocol;
+    SERVER_HTTP_ADDR = cfg.addr;
+    SERVER_HTTP_PORT = cfg.port;
+    SERVER_DOMAIN = cfg.domain;
+    SERVER_ROOT_URL = cfg.rootUrl;
+    SERVER_STATIC_ROOT_PATH = cfg.staticRootPath;
+    SERVER_CERT_FILE = cfg.certFile;
+    SERVER_CERT_KEY = cfg.certKey;
+
+    DATABASE_TYPE = cfg.database.type;
+    DATABASE_HOST = cfg.database.host;
+    DATABASE_NAME = cfg.database.name;
+    DATABASE_USER = cfg.database.user;
+    DATABASE_PASSWORD = cfg.database.password;
+    DATABASE_PATH = cfg.database.path;
+    DATABASE_CONN_MAX_LIFETIME = cfg.database.connMaxLifetime;
+
+    SECURITY_ADMIN_USER = cfg.security.adminUser;
+    SECURITY_ADMIN_PASSWORD = cfg.security.adminPassword;
+    SECURITY_SECRET_KEY = cfg.security.secretKey;
+
+    USERS_ALLOW_SIGN_UP = boolToString cfg.users.allowSignUp;
+    USERS_ALLOW_ORG_CREATE = boolToString cfg.users.allowOrgCreate;
+    USERS_AUTO_ASSIGN_ORG = boolToString cfg.users.autoAssignOrg;
+    USERS_AUTO_ASSIGN_ORG_ROLE = cfg.users.autoAssignOrgRole;
+
+    AUTH_ANONYMOUS_ENABLED = boolToString cfg.auth.anonymous.enable;
+    AUTH_ANONYMOUS_ORG_NAME = cfg.auth.anonymous.org_name;
+    AUTH_ANONYMOUS_ORG_ROLE = cfg.auth.anonymous.org_role;
+
+    ANALYTICS_REPORTING_ENABLED = boolToString cfg.analytics.reporting.enable;
+
+    SMTP_ENABLED = boolToString cfg.smtp.enable;
+    SMTP_HOST = cfg.smtp.host;
+    SMTP_USER = cfg.smtp.user;
+    SMTP_PASSWORD = cfg.smtp.password;
+    SMTP_FROM_ADDRESS = cfg.smtp.fromAddress;
+  } // cfg.extraOptions;
+
+  datasourceConfiguration = {
+    apiVersion = 1;
+    datasources = cfg.provision.datasources;
+  };
+
+  datasourceFile = pkgs.writeText "datasource.yaml" (builtins.toJSON datasourceConfiguration);
+
+  dashboardConfiguration = {
+    apiVersion = 1;
+    providers = cfg.provision.dashboards;
+  };
+
+  dashboardFile = pkgs.writeText "dashboard.yaml" (builtins.toJSON dashboardConfiguration);
+
+  provisionConfDir =  pkgs.runCommand "grafana-provisioning" { } ''
+    mkdir -p $out/{datasources,dashboards}
+    ln -sf ${datasourceFile} $out/datasources/datasource.yaml
+    ln -sf ${dashboardFile} $out/dashboards/dashboard.yaml
+  '';
+
+  # Get a submodule without any embedded metadata:
+  _filter = x: filterAttrs (k: v: k != "_module") x;
+
+  # http://docs.grafana.org/administration/provisioning/#datasources
+  grafanaTypes.datasourceConfig = types.submodule {
+    options = {
+      name = mkOption {
+        type = types.str;
+        description = "Name of the datasource. Required";
+      };
+      type = mkOption {
+        type = types.enum ["graphite" "prometheus" "cloudwatch" "elasticsearch" "influxdb" "opentsdb" "mysql" "mssql" "postgres" "loki"];
+        description = "Datasource type. Required";
+      };
+      access = mkOption {
+        type = types.enum ["proxy" "direct"];
+        default = "proxy";
+        description = "Access mode. proxy or direct (Server or Browser in the UI). Required";
+      };
+      orgId = mkOption {
+        type = types.int;
+        default = 1;
+        description = "Org id. will default to orgId 1 if not specified";
+      };
+      url = mkOption {
+        type = types.str;
+        description = "Url of the datasource";
+      };
+      password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Database password, if used";
+      };
+      user = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Database user, if used";
+      };
+      database = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Database name, if used";
+      };
+      basicAuth = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = "Enable/disable basic auth";
+      };
+      basicAuthUser = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Basic auth username";
+      };
+      basicAuthPassword = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Basic auth password";
+      };
+      withCredentials = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Enable/disable with credentials headers";
+      };
+      isDefault = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Mark as default datasource. Max one per org";
+      };
+      jsonData = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = "Datasource specific configuration";
+      };
+      secureJsonData = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = "Datasource specific secure configuration";
+      };
+      version = mkOption {
+        type = types.int;
+        default = 1;
+        description = "Version";
+      };
+      editable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Allow users to edit datasources from the UI.";
+      };
+    };
+  };
+
+  # http://docs.grafana.org/administration/provisioning/#dashboards
+  grafanaTypes.dashboardConfig = types.submodule {
+    options = {
+      name = mkOption {
+        type = types.str;
+        default = "default";
+        description = "Provider name";
+      };
+      orgId = mkOption {
+        type = types.int;
+        default = 1;
+        description = "Organization ID";
+      };
+      folder = mkOption {
+        type = types.str;
+        default = "";
+        description = "Add dashboards to the specified folder";
+      };
+      type = mkOption {
+        type = types.str;
+        default = "file";
+        description = "Dashboard provider type";
+      };
+      disableDeletion = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Disable deletion when JSON file is removed";
+      };
+      updateIntervalSeconds = mkOption {
+        type = types.int;
+        default = 10;
+        description = "How often Grafana will scan for changed dashboards";
+      };
+      options = {
+        path = mkOption {
+          type = types.path;
+          description = "Path grafana will watch for dashboards";
+        };
+      };
+    };
+  };
+in {
+  options.services.grafana = {
+    enable = mkEnableOption "grafana";
+
+    protocol = mkOption {
+      description = "Which protocol to listen.";
+      default = "http";
+      type = types.enum ["http" "https" "socket"];
+    };
+
+    addr = mkOption {
+      description = "Listening address.";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    port = mkOption {
+      description = "Listening port.";
+      default = 3000;
+      type = types.int;
+    };
+
+    domain = mkOption {
+      description = "The public facing domain name used to access grafana from a browser.";
+      default = "localhost";
+      type = types.str;
+    };
+
+    rootUrl = mkOption {
+      description = "Full public facing url.";
+      default = "%(protocol)s://%(domain)s:%(http_port)s/";
+      type = types.str;
+    };
+
+    certFile = mkOption {
+      description = "Cert file for ssl.";
+      default = "";
+      type = types.str;
+    };
+
+    certKey = mkOption {
+      description = "Cert key for ssl.";
+      default = "";
+      type = types.str;
+    };
+
+    staticRootPath = mkOption {
+      description = "Root path for static assets.";
+      default = "${cfg.package}/share/grafana/public";
+      type = types.str;
+    };
+
+    package = mkOption {
+      description = "Package to use.";
+      default = pkgs.grafana;
+      defaultText = "pkgs.grafana";
+      type = types.package;
+    };
+
+    dataDir = mkOption {
+      description = "Data directory.";
+      default = "/var/lib/grafana";
+      type = types.path;
+    };
+
+    database = {
+      type = mkOption {
+        description = "Database type.";
+        default = "sqlite3";
+        type = types.enum ["mysql" "sqlite3" "postgres"];
+      };
+
+      host = mkOption {
+        description = "Database host.";
+        default = "127.0.0.1:3306";
+        type = types.str;
+      };
+
+      name = mkOption {
+        description = "Database name.";
+        default = "grafana";
+        type = types.str;
+      };
+
+      user = mkOption {
+        description = "Database user.";
+        default = "root";
+        type = types.str;
+      };
+
+      password = mkOption {
+        description = ''
+          Database password.
+          This option is mutual exclusive with the passwordFile option.
+        '';
+        default = "";
+        type = types.str;
+      };
+
+      passwordFile = mkOption {
+        description = ''
+          File that containts the database password.
+          This option is mutual exclusive with the password option.
+        '';
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      path = mkOption {
+        description = "Database path.";
+        default = "${cfg.dataDir}/data/grafana.db";
+        type = types.path;
+      };
+
+      connMaxLifetime = mkOption {
+        description = ''
+          Sets the maximum amount of time (in seconds) a connection may be reused.
+          For MySQL this setting should be shorter than the `wait_timeout' variable.
+        '';
+        default = "unlimited";
+        example = 14400;
+        type = types.either types.int (types.enum [ "unlimited" ]);
+      };
+    };
+
+    provision = {
+      enable = mkEnableOption "provision";
+      datasources = mkOption {
+        description = "Grafana datasources configuration";
+        default = [];
+        type = types.listOf grafanaTypes.datasourceConfig;
+        apply = x: map _filter x;
+      };
+      dashboards = mkOption {
+        description = "Grafana dashboard configuration";
+        default = [];
+        type = types.listOf grafanaTypes.dashboardConfig;
+        apply = x: map _filter x;
+      };
+    };
+
+    security = {
+      adminUser = mkOption {
+        description = "Default admin username.";
+        default = "admin";
+        type = types.str;
+      };
+
+      adminPassword = mkOption {
+        description = ''
+          Default admin password.
+          This option is mutual exclusive with the adminPasswordFile option.
+        '';
+        default = "admin";
+        type = types.str;
+      };
+
+      adminPasswordFile = mkOption {
+        description = ''
+          Default admin password.
+          This option is mutual exclusive with the <literal>adminPassword</literal> option.
+        '';
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      secretKey = mkOption {
+        description = "Secret key used for signing.";
+        default = "SW2YcwTIb9zpOOhoPsMm";
+        type = types.str;
+      };
+
+      secretKeyFile = mkOption {
+        description = "Secret key used for signing.";
+        default = null;
+        type = types.nullOr types.path;
+      };
+    };
+
+    smtp = {
+      enable = mkEnableOption "smtp";
+      host = mkOption {
+        description = "Host to connect to";
+        default = "localhost:25";
+        type = types.str;
+      };
+      user = mkOption {
+        description = "User used for authentication";
+        default = "";
+        type = types.str;
+      };
+      password = mkOption {
+        description = ''
+          Password used for authentication.
+          This option is mutual exclusive with the passwordFile option.
+        '';
+        default = "";
+        type = types.str;
+      };
+      passwordFile = mkOption {
+        description = ''
+          Password used for authentication.
+          This option is mutual exclusive with the password option.
+        '';
+        default = null;
+        type = types.nullOr types.path;
+      };
+      fromAddress = mkOption {
+        description = "Email address used for sending";
+        default = "admin@grafana.localhost";
+        type = types.str;
+      };
+    };
+
+    users = {
+      allowSignUp = mkOption {
+        description = "Disable user signup / registration";
+        default = false;
+        type = types.bool;
+      };
+
+      allowOrgCreate = mkOption {
+        description = "Whether user is allowed to create organizations.";
+        default = false;
+        type = types.bool;
+      };
+
+      autoAssignOrg = mkOption {
+        description = "Whether to automatically assign new users to default org.";
+        default = true;
+        type = types.bool;
+      };
+
+      autoAssignOrgRole = mkOption {
+        description = "Default role new users will be auto assigned.";
+        default = "Viewer";
+        type = types.enum ["Viewer" "Editor"];
+      };
+    };
+
+    auth.anonymous = {
+      enable = mkOption {
+        description = "Whether to allow anonymous access";
+        default = false;
+        type = types.bool;
+      };
+      org_name = mkOption {
+        description = "Which organization to allow anonymous access to";
+        default = "Main Org.";
+        type = types.str;
+      };
+      org_role = mkOption {
+        description = "Which role anonymous users have in the organization";
+        default = "Viewer";
+        type = types.str;
+      };
+
+    };
+
+    analytics.reporting = {
+      enable = mkOption {
+        description = "Whether to allow anonymous usage reporting to stats.grafana.net";
+        default = true;
+        type = types.bool;
+      };
+    };
+
+    extraOptions = mkOption {
+      description = ''
+        Extra configuration options passed as env variables as specified in
+        <link xlink:href="http://docs.grafana.org/installation/configuration/">documentation</link>,
+        but without GF_ prefix
+      '';
+      default = {};
+      type = with types; attrsOf (either str path);
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings = flatten [
+      (optional (
+        cfg.database.password != opt.database.password.default ||
+        cfg.security.adminPassword != opt.security.adminPassword.default
+      ) "Grafana passwords will be stored as plaintext in the Nix store!")
+      (optional (
+        any (x: x.password != null || x.basicAuthPassword != null || x.secureJsonData != null) cfg.provision.datasources
+      ) "Datasource passwords will be stored as plaintext in the Nix store!")
+    ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    assertions = [
+      {
+        assertion = cfg.database.password != opt.database.password.default -> cfg.database.passwordFile == null;
+        message = "Cannot set both password and passwordFile";
+      }
+      {
+        assertion = cfg.security.adminPassword != opt.security.adminPassword.default -> cfg.security.adminPasswordFile == null;
+        message = "Cannot set both adminPassword and adminPasswordFile";
+      }
+      {
+        assertion = cfg.security.secretKey != opt.security.secretKey.default -> cfg.security.secretKeyFile == null;
+        message = "Cannot set both secretKey and secretKeyFile";
+      }
+      {
+        assertion = cfg.smtp.password != opt.smtp.password.default -> cfg.smtp.passwordFile == null;
+        message = "Cannot set both password and passwordFile";
+      }
+    ];
+
+    systemd.services.grafana = {
+      description = "Grafana Service Daemon";
+      wantedBy = ["multi-user.target"];
+      after = ["networking.target"];
+      environment = {
+        QT_QPA_PLATFORM = "offscreen";
+      } // mapAttrs' (n: v: nameValuePair "GF_${n}" (toString v)) envOptions;
+      script = ''
+        ${optionalString (cfg.database.passwordFile != null) ''
+          export GF_DATABASE_PASSWORD="$(cat ${escapeShellArg cfg.database.passwordFile})"
+        ''}
+        ${optionalString (cfg.security.adminPasswordFile != null) ''
+          export GF_SECURITY_ADMIN_PASSWORD="$(cat ${escapeShellArg cfg.security.adminPasswordFile})"
+        ''}
+        ${optionalString (cfg.security.secretKeyFile != null) ''
+          export GF_SECURITY_SECRET_KEY="$(cat ${escapeShellArg cfg.security.secretKeyFile})"
+        ''}
+        ${optionalString (cfg.smtp.passwordFile != null) ''
+          export GF_SMTP_PASSWORD="$(cat ${escapeShellArg cfg.smtp.passwordFile})"
+        ''}
+        ${optionalString cfg.provision.enable ''
+          export GF_PATHS_PROVISIONING=${provisionConfDir};
+        ''}
+        exec ${cfg.package}/bin/grafana-server -homepath ${cfg.dataDir}
+      '';
+      serviceConfig = {
+        WorkingDirectory = cfg.dataDir;
+        User = "grafana";
+      };
+      preStart = ''
+        ln -fs ${cfg.package}/share/grafana/conf ${cfg.dataDir}
+        ln -fs ${cfg.package}/share/grafana/tools ${cfg.dataDir}
+      '';
+    };
+
+    users.users.grafana = {
+      uid = config.ids.uids.grafana;
+      description = "Grafana user";
+      home = cfg.dataDir;
+      createHome = true;
+      group = "grafana";
+    };
+    users.groups.grafana = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/graphite.nix b/nixpkgs/nixos/modules/services/monitoring/graphite.nix
new file mode 100644
index 000000000000..64d9d61950da
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/graphite.nix
@@ -0,0 +1,570 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.graphite;
+  writeTextOrNull = f: t: mapNullable (pkgs.writeTextDir f) t;
+
+  dataDir = cfg.dataDir;
+  staticDir = cfg.dataDir + "/static";
+
+  graphiteLocalSettingsDir = pkgs.runCommand "graphite_local_settings" {
+      inherit graphiteLocalSettings;
+      preferLocalBuild = true;
+    } ''
+    mkdir -p $out
+    ln -s $graphiteLocalSettings $out/graphite_local_settings.py
+  '';
+
+  graphiteLocalSettings = pkgs.writeText "graphite_local_settings.py" (
+    "STATIC_ROOT = '${staticDir}'\n" +
+    optionalString (config.time.timeZone != null) "TIME_ZONE = '${config.time.timeZone}'\n"
+    + cfg.web.extraConfig
+  );
+
+  graphiteApiConfig = pkgs.writeText "graphite-api.yaml" ''
+    search_index: ${dataDir}/index
+    ${optionalString (config.time.timeZone != null) ''time_zone: ${config.time.timeZone}''}
+    ${optionalString (cfg.api.finders != []) ''finders:''}
+    ${concatMapStringsSep "\n" (f: "  - " + f.moduleName) cfg.api.finders}
+    ${optionalString (cfg.api.functions != []) ''functions:''}
+    ${concatMapStringsSep "\n" (f: "  - " + f) cfg.api.functions}
+    ${cfg.api.extraConfig}
+  '';
+
+  seyrenConfig = {
+    SEYREN_URL = cfg.seyren.seyrenUrl;
+    MONGO_URL = cfg.seyren.mongoUrl;
+    GRAPHITE_URL = cfg.seyren.graphiteUrl;
+  } // cfg.seyren.extraConfig;
+
+  configDir = pkgs.buildEnv {
+    name = "graphite-config";
+    paths = lists.filter (el: el != null) [
+      (writeTextOrNull "carbon.conf" cfg.carbon.config)
+      (writeTextOrNull "storage-aggregation.conf" cfg.carbon.storageAggregation)
+      (writeTextOrNull "storage-schemas.conf" cfg.carbon.storageSchemas)
+      (writeTextOrNull "blacklist.conf" cfg.carbon.blacklist)
+      (writeTextOrNull "whitelist.conf" cfg.carbon.whitelist)
+      (writeTextOrNull "rewrite-rules.conf" cfg.carbon.rewriteRules)
+      (writeTextOrNull "relay-rules.conf" cfg.carbon.relayRules)
+      (writeTextOrNull "aggregation-rules.conf" cfg.carbon.aggregationRules)
+    ];
+  };
+
+  carbonOpts = name: with config.ids; ''
+    --nodaemon --syslog --prefix=${name} --pidfile /run/${name}/${name}.pid ${name}
+  '';
+
+  carbonEnv = {
+    PYTHONPATH = let
+      cenv = pkgs.python3.buildEnv.override {
+        extraLibs = [ pkgs.python3Packages.carbon ];
+      };
+    in "${cenv}/${pkgs.python3.sitePackages}";
+    GRAPHITE_ROOT = dataDir;
+    GRAPHITE_CONF_DIR = configDir;
+    GRAPHITE_STORAGE_DIR = dataDir;
+  };
+
+in {
+
+  imports = [
+    (mkRemovedOptionModule ["services" "graphite" "pager"] "")
+  ];
+
+  ###### interface
+
+  options.services.graphite = {
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/db/graphite";
+      description = ''
+        Data directory for graphite.
+      '';
+    };
+
+    web = {
+      enable = mkOption {
+        description = "Whether to enable graphite web frontend.";
+        default = false;
+        type = types.bool;
+      };
+
+      listenAddress = mkOption {
+        description = "Graphite web frontend listen address.";
+        default = "127.0.0.1";
+        type = types.str;
+      };
+
+      port = mkOption {
+        description = "Graphite web frontend port.";
+        default = 8080;
+        type = types.int;
+      };
+
+      extraConfig = mkOption {
+        type = types.str;
+        default = "";
+        description = ''
+          Graphite webapp settings. See:
+          <link xlink:href="http://graphite.readthedocs.io/en/latest/config-local-settings.html"/>
+        '';
+      };
+    };
+
+    api = {
+      enable = mkOption {
+        description = ''
+          Whether to enable graphite api. Graphite api is lightweight alternative
+          to graphite web, with api and without dashboard. It's advised to use
+          grafana as alternative dashboard and influxdb as alternative to
+          graphite carbon.
+
+          For more information visit
+          <link xlink:href="https://graphite-api.readthedocs.org/en/latest/"/>
+        '';
+        default = false;
+        type = types.bool;
+      };
+
+      finders = mkOption {
+        description = "List of finder plugins to load.";
+        default = [];
+        example = literalExample "[ pkgs.python3Packages.influxgraph ]";
+        type = types.listOf types.package;
+      };
+
+      functions = mkOption {
+        description = "List of functions to load.";
+        default = [
+          "graphite_api.functions.SeriesFunctions"
+          "graphite_api.functions.PieFunctions"
+        ];
+        type = types.listOf types.str;
+      };
+
+      listenAddress = mkOption {
+        description = "Graphite web service listen address.";
+        default = "127.0.0.1";
+        type = types.str;
+      };
+
+      port = mkOption {
+        description = "Graphite api service port.";
+        default = 8080;
+        type = types.int;
+      };
+
+      package = mkOption {
+        description = "Package to use for graphite api.";
+        default = pkgs.python3Packages.graphite_api;
+        defaultText = "pkgs.python3Packages.graphite_api";
+        type = types.package;
+      };
+
+      extraConfig = mkOption {
+        description = "Extra configuration for graphite api.";
+        default = ''
+          whisper:
+            directories:
+                - ${dataDir}/whisper
+        '';
+        example = ''
+          allowed_origins:
+            - dashboard.example.com
+          cheat_times: true
+          influxdb:
+            host: localhost
+            port: 8086
+            user: influxdb
+            pass: influxdb
+            db: metrics
+          cache:
+            CACHE_TYPE: 'filesystem'
+            CACHE_DIR: '/tmp/graphite-api-cache'
+        '';
+        type = types.lines;
+      };
+    };
+
+    carbon = {
+      config = mkOption {
+        description = "Content of carbon configuration file.";
+        default = ''
+          [cache]
+          # Listen on localhost by default for security reasons
+          UDP_RECEIVER_INTERFACE = 127.0.0.1
+          PICKLE_RECEIVER_INTERFACE = 127.0.0.1
+          LINE_RECEIVER_INTERFACE = 127.0.0.1
+          CACHE_QUERY_INTERFACE = 127.0.0.1
+          # Do not log every update
+          LOG_UPDATES = False
+          LOG_CACHE_HITS = False
+        '';
+        type = types.str;
+      };
+
+      enableCache = mkOption {
+        description = "Whether to enable carbon cache, the graphite storage daemon.";
+        default = false;
+        type = types.bool;
+      };
+
+      storageAggregation = mkOption {
+        description = "Defines how to aggregate data to lower-precision retentions.";
+        default = null;
+        type = types.nullOr types.str;
+        example = ''
+          [all_min]
+          pattern = \.min$
+          xFilesFactor = 0.1
+          aggregationMethod = min
+        '';
+      };
+
+      storageSchemas = mkOption {
+        description = "Defines retention rates for storing metrics.";
+        default = "";
+        type = types.nullOr types.str;
+        example = ''
+          [apache_busyWorkers]
+          pattern = ^servers\.www.*\.workers\.busyWorkers$
+          retentions = 15s:7d,1m:21d,15m:5y
+        '';
+      };
+
+      blacklist = mkOption {
+        description = "Any metrics received which match one of the experssions will be dropped.";
+        default = null;
+        type = types.nullOr types.str;
+        example = "^some\\.noisy\\.metric\\.prefix\\..*";
+      };
+
+      whitelist = mkOption {
+        description = "Only metrics received which match one of the experssions will be persisted.";
+        default = null;
+        type = types.nullOr types.str;
+        example = ".*";
+      };
+
+      rewriteRules = mkOption {
+        description = ''
+          Regular expression patterns that can be used to rewrite metric names
+          in a search and replace fashion.
+        '';
+        default = null;
+        type = types.nullOr types.str;
+        example = ''
+          [post]
+          _sum$ =
+          _avg$ =
+        '';
+      };
+
+      enableRelay = mkOption {
+        description = "Whether to enable carbon relay, the carbon replication and sharding service.";
+        default = false;
+        type = types.bool;
+      };
+
+      relayRules = mkOption {
+        description = "Relay rules are used to send certain metrics to a certain backend.";
+        default = null;
+        type = types.nullOr types.str;
+        example = ''
+          [example]
+          pattern = ^mydata\.foo\..+
+          servers = 10.1.2.3, 10.1.2.4:2004, myserver.mydomain.com
+        '';
+      };
+
+      enableAggregator = mkOption {
+        description = "Whether to enable carbon aggregator, the carbon buffering service.";
+        default = false;
+        type = types.bool;
+      };
+
+      aggregationRules = mkOption {
+        description = "Defines if and how received metrics will be aggregated.";
+        default = null;
+        type = types.nullOr types.str;
+        example = ''
+          <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
+          <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
+        '';
+      };
+    };
+
+    seyren = {
+      enable = mkOption {
+        description = "Whether to enable seyren service.";
+        default = false;
+        type = types.bool;
+      };
+
+      port = mkOption {
+        description = "Seyren listening port.";
+        default = 8081;
+        type = types.int;
+      };
+
+      seyrenUrl = mkOption {
+        default = "http://localhost:${toString cfg.seyren.port}/";
+        description = "Host where seyren is accessible.";
+        type = types.str;
+      };
+
+      graphiteUrl = mkOption {
+        default = "http://${cfg.web.listenAddress}:${toString cfg.web.port}";
+        description = "Host where graphite service runs.";
+        type = types.str;
+      };
+
+      mongoUrl = mkOption {
+        default = "mongodb://${config.services.mongodb.bind_ip}:27017/seyren";
+        description = "Mongodb connection string.";
+        type = types.str;
+      };
+
+      extraConfig = mkOption {
+        default = {};
+        description = ''
+          Extra seyren configuration. See
+          <link xlink:href='https://github.com/scobal/seyren#config' />
+        '';
+        type = types.attrsOf types.str;
+        example = literalExample ''
+          {
+            GRAPHITE_USERNAME = "user";
+            GRAPHITE_PASSWORD = "pass";
+          }
+        '';
+      };
+    };
+
+    beacon = {
+      enable = mkEnableOption "graphite beacon";
+
+      config = mkOption {
+        description = "Graphite beacon configuration.";
+        default = {};
+        type = types.attrs;
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkMerge [
+    (mkIf cfg.carbon.enableCache {
+      systemd.services.carbonCache = let name = "carbon-cache"; in {
+        description = "Graphite Data Storage Backend";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        environment = carbonEnv;
+        serviceConfig = {
+          RuntimeDirectory = name;
+          ExecStart = "${pkgs.python3Packages.twisted}/bin/twistd ${carbonOpts name}";
+          User = "graphite";
+          Group = "graphite";
+          PermissionsStartOnly = true;
+          PIDFile="/run/${name}/${name}.pid";
+        };
+        preStart = ''
+          install -dm0700 -o graphite -g graphite ${cfg.dataDir}
+          install -dm0700 -o graphite -g graphite ${cfg.dataDir}/whisper
+        '';
+      };
+    })
+
+    (mkIf cfg.carbon.enableAggregator {
+      systemd.services.carbonAggregator = let name = "carbon-aggregator"; in {
+        enable = cfg.carbon.enableAggregator;
+        description = "Carbon Data Aggregator";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        environment = carbonEnv;
+        serviceConfig = {
+          RuntimeDirectory = name;
+          ExecStart = "${pkgs.python3Packages.twisted}/bin/twistd ${carbonOpts name}";
+          User = "graphite";
+          Group = "graphite";
+          PIDFile="/run/${name}/${name}.pid";
+        };
+      };
+    })
+
+    (mkIf cfg.carbon.enableRelay {
+      systemd.services.carbonRelay = let name = "carbon-relay"; in {
+        description = "Carbon Data Relay";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        environment = carbonEnv;
+        serviceConfig = {
+          RuntimeDirectory = name;
+          ExecStart = "${pkgs.python3Packages.twisted}/bin/twistd ${carbonOpts name}";
+          User = "graphite";
+          Group = "graphite";
+          PIDFile="/run/${name}/${name}.pid";
+        };
+      };
+    })
+
+    (mkIf (cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay) {
+      environment.systemPackages = [
+        pkgs.python3Packages.carbon
+      ];
+    })
+
+    (mkIf cfg.web.enable ({
+      systemd.services.graphiteWeb = {
+        description = "Graphite Web Interface";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        path = [ pkgs.perl ];
+        environment = {
+          PYTHONPATH = let
+              penv = pkgs.python3.buildEnv.override {
+                extraLibs = [
+                  pkgs.python3Packages.graphite-web
+                ];
+              };
+              penvPack = "${penv}/${pkgs.python3.sitePackages}";
+            in concatStringsSep ":" [
+                 "${graphiteLocalSettingsDir}"
+                 "${penvPack}"
+                 # explicitly adding pycairo in path because it cannot be imported via buildEnv
+                 "${pkgs.python3Packages.pycairo}/${pkgs.python3.sitePackages}"
+               ];
+          DJANGO_SETTINGS_MODULE = "graphite.settings";
+          GRAPHITE_SETTINGS_MODULE = "graphite_local_settings";
+          GRAPHITE_CONF_DIR = configDir;
+          GRAPHITE_STORAGE_DIR = dataDir;
+          LD_LIBRARY_PATH = "${pkgs.cairo.out}/lib";
+        };
+        serviceConfig = {
+          ExecStart = ''
+            ${pkgs.python3Packages.waitress-django}/bin/waitress-serve-django \
+              --host=${cfg.web.listenAddress} --port=${toString cfg.web.port}
+          '';
+          User = "graphite";
+          Group = "graphite";
+          PermissionsStartOnly = true;
+        };
+        preStart = ''
+          if ! test -e ${dataDir}/db-created; then
+            mkdir -p ${dataDir}/{whisper/,log/webapp/}
+            chmod 0700 ${dataDir}/{whisper/,log/webapp/}
+
+            ${pkgs.python3Packages.django}/bin/django-admin.py migrate --noinput
+
+            chown -R graphite:graphite ${dataDir}
+
+            touch ${dataDir}/db-created
+          fi
+
+          # Only collect static files when graphite_web changes.
+          if ! [ "${dataDir}/current_graphite_web" -ef "${pkgs.python3Packages.graphite-web}" ]; then
+            mkdir -p ${staticDir}
+            ${pkgs.python3Packages.django}/bin/django-admin.py collectstatic  --noinput --clear
+            chown -R graphite:graphite ${staticDir}
+            ln -sfT "${pkgs.python3Packages.graphite-web}" "${dataDir}/current_graphite_web"
+          fi
+        '';
+      };
+
+      environment.systemPackages = [ pkgs.python3Packages.graphite-web ];
+    }))
+
+    (mkIf cfg.api.enable {
+      systemd.services.graphiteApi = {
+        description = "Graphite Api Interface";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        environment = {
+          PYTHONPATH = let
+              aenv = pkgs.python3.buildEnv.override {
+                extraLibs = [ cfg.api.package pkgs.cairo pkgs.python3Packages.cffi ] ++ cfg.api.finders;
+              };
+            in "${aenv}/${pkgs.python3.sitePackages}";
+          GRAPHITE_API_CONFIG = graphiteApiConfig;
+          LD_LIBRARY_PATH = "${pkgs.cairo.out}/lib";
+        };
+        serviceConfig = {
+          ExecStart = ''
+            ${pkgs.python3Packages.waitress}/bin/waitress-serve \
+            --host=${cfg.api.listenAddress} --port=${toString cfg.api.port} \
+            graphite_api.app:app
+          '';
+          User = "graphite";
+          Group = "graphite";
+          PermissionsStartOnly = true;
+        };
+        preStart = ''
+          if ! test -e ${dataDir}/db-created; then
+            mkdir -p ${dataDir}/cache/
+            chmod 0700 ${dataDir}/cache/
+
+            chown graphite:graphite ${cfg.dataDir}
+            chown -R graphite:graphite ${cfg.dataDir}/cache
+
+            touch ${dataDir}/db-created
+          fi
+        '';
+      };
+    })
+
+    (mkIf cfg.seyren.enable {
+      systemd.services.seyren = {
+        description = "Graphite Alerting Dashboard";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" "mongodb.service" ];
+        environment = seyrenConfig;
+        serviceConfig = {
+          ExecStart = "${pkgs.seyren}/bin/seyren -httpPort ${toString cfg.seyren.port}";
+          WorkingDirectory = dataDir;
+          User = "graphite";
+          Group = "graphite";
+        };
+        preStart = ''
+          if ! test -e ${dataDir}/db-created; then
+            mkdir -p ${dataDir}
+            chown graphite:graphite ${dataDir}
+          fi
+        '';
+      };
+
+      services.mongodb.enable = mkDefault true;
+    })
+
+    (mkIf cfg.beacon.enable {
+      systemd.services.graphite-beacon = {
+        description = "Grpahite Beacon Alerting Daemon";
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          ExecStart = ''
+            ${pkgs.python3Packages.graphite_beacon}/bin/graphite-beacon \
+              --config=${pkgs.writeText "graphite-beacon.json" (builtins.toJSON cfg.beacon.config)}
+          '';
+          User = "graphite";
+          Group = "graphite";
+        };
+      };
+    })
+
+    (mkIf (
+      cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay ||
+      cfg.web.enable || cfg.api.enable ||
+      cfg.seyren.enable || cfg.beacon.enable
+     ) {
+      users.users.graphite = {
+        uid = config.ids.uids.graphite;
+        description = "Graphite daemon user";
+        home = dataDir;
+      };
+      users.groups.graphite.gid = config.ids.gids.graphite;
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/hdaps.nix b/nixpkgs/nixos/modules/services/monitoring/hdaps.nix
new file mode 100644
index 000000000000..2cad3b84d847
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/hdaps.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.hdapsd;
+  hdapsd = [ pkgs.hdapsd ];
+in
+{
+  options = {
+    services.hdapsd.enable = mkEnableOption
+      ''
+        Hard Drive Active Protection System Daemon,
+        devices are detected and managed automatically by udev and systemd
+      '';
+  };
+
+  config = mkIf cfg.enable {
+    boot.kernelModules = [ "hdapsd" ];
+    services.udev.packages = hdapsd;
+    systemd.packages = hdapsd;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/heapster.nix b/nixpkgs/nixos/modules/services/monitoring/heapster.nix
new file mode 100644
index 000000000000..0a9dfa12eaa5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/heapster.nix
@@ -0,0 +1,57 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.heapster;
+in {
+  options.services.heapster = {
+    enable = mkOption {
+      description = "Whether to enable heapster monitoring";
+      default = false;
+      type = types.bool;
+    };
+
+    source = mkOption {
+      description = "Heapster metric source";
+      example = "kubernetes:https://kubernetes.default";
+      type = types.str;
+    };
+
+    sink = mkOption {
+      description = "Heapster metic sink";
+      example = "influxdb:http://localhost:8086";
+      type = types.str;
+    };
+
+    extraOpts = mkOption {
+      description = "Heapster extra options";
+      default = "";
+      type = types.separatedString " ";
+    };
+
+    package = mkOption {
+      description = "Package to use by heapster";
+      default = pkgs.heapster;
+      defaultText = "pkgs.heapster";
+      type = types.package;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.heapster = {
+      wantedBy = ["multi-user.target"];
+      after = ["cadvisor.service" "kube-apiserver.service"];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/heapster --source=${cfg.source} --sink=${cfg.sink} ${cfg.extraOpts}";
+        User = "heapster";
+      };
+    };
+
+    users.users.heapster = {
+      uid = config.ids.uids.heapster;
+      description = "Heapster user";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/incron.nix b/nixpkgs/nixos/modules/services/monitoring/incron.nix
new file mode 100644
index 000000000000..1789fd9f2051
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/incron.nix
@@ -0,0 +1,98 @@
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.incron;
+
+in
+
+{
+  options = {
+
+    services.incron = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable the incron daemon.
+
+          Note that commands run under incrontab only support common Nix profiles for the <envar>PATH</envar> provided variable.
+        '';
+      };
+
+      allow = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        description = ''
+          Users allowed to use incrontab.
+
+          If empty then no user will be allowed to have their own incrontab.
+          If <literal>null</literal> then will defer to <option>deny</option>.
+          If both <option>allow</option> and <option>deny</option> are null
+          then all users will be allowed to have their own incrontab.
+        '';
+      };
+
+      deny = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        description = "Users forbidden from using incrontab.";
+      };
+
+      systab = mkOption {
+        type = types.lines;
+        default = "";
+        description = "The system incrontab contents.";
+        example = ''
+          /var/mail IN_CLOSE_WRITE abc $@/$#
+          /tmp IN_ALL_EVENTS efg $@/$# $&
+        '';
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExample "[ pkgs.rsync ]";
+        description = "Extra packages available to the system incrontab.";
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    warnings = optional (cfg.allow != null && cfg.deny != null)
+      ''If `services.incron.allow` is set then `services.incron.deny` will be ignored.'';
+
+    environment.systemPackages = [ pkgs.incron ];
+
+    security.wrappers.incrontab.source = "${pkgs.incron}/bin/incrontab";
+
+    # incron won't read symlinks
+    environment.etc."incron.d/system" = {
+      mode = "0444";
+      text = cfg.systab;
+    };
+    environment.etc."incron.allow" = mkIf (cfg.allow != null) {
+      text = concatStringsSep "\n" cfg.allow;
+    };
+    environment.etc."incron.deny" = mkIf (cfg.deny != null) {
+      text = concatStringsSep "\n" cfg.deny;
+    };
+
+    systemd.services.incron = {
+      description = "File System Events Scheduler";
+      wantedBy = [ "multi-user.target" ];
+      path = cfg.extraPackages;
+      serviceConfig.PIDFile = "/run/incrond.pid";
+      serviceConfig.ExecStartPre = "${pkgs.coreutils}/bin/mkdir -m 710 -p /var/spool/incron";
+      serviceConfig.ExecStart = "${pkgs.incron}/bin/incrond --foreground";
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/kapacitor.nix b/nixpkgs/nixos/modules/services/monitoring/kapacitor.nix
new file mode 100644
index 000000000000..9b4ff3c56124
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/kapacitor.nix
@@ -0,0 +1,191 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.kapacitor;
+
+  kapacitorConf = pkgs.writeTextFile {
+    name = "kapacitord.conf";
+    text = ''
+      hostname="${config.networking.hostName}"
+      data_dir="${cfg.dataDir}"
+
+      [http]
+        bind-address = "${cfg.bind}:${toString cfg.port}"
+        log-enabled = false
+        auth-enabled = false
+
+      [task]
+        dir = "${cfg.dataDir}/tasks"
+        snapshot-interval = "${cfg.taskSnapshotInterval}"
+
+      [replay]
+        dir = "${cfg.dataDir}/replay"
+
+      [storage]
+        boltdb = "${cfg.dataDir}/kapacitor.db"
+
+      ${optionalString (cfg.loadDirectory != null) ''
+        [load]
+          enabled = true
+          dir = "${cfg.loadDirectory}"
+      ''}
+
+      ${optionalString (cfg.defaultDatabase.enable) ''
+        [[influxdb]]
+          name = "default"
+          enabled = true
+          default = true
+          urls = [ "${cfg.defaultDatabase.url}" ]
+          username = "${cfg.defaultDatabase.username}"
+          password = "${cfg.defaultDatabase.password}"
+      ''}
+
+      ${optionalString (cfg.alerta.enable) ''
+        [alerta]
+          enabled = true
+          url = "${cfg.alerta.url}"
+          token = "${cfg.alerta.token}"
+          environment = "${cfg.alerta.environment}"
+          origin = "${cfg.alerta.origin}"
+      ''}
+
+      ${cfg.extraConfig}
+    '';
+  };
+in
+{
+  options.services.kapacitor = {
+    enable = mkEnableOption "kapacitor";
+
+    dataDir = mkOption {
+      type = types.path;
+      example = "/var/lib/kapacitor";
+      default = "/var/lib/kapacitor";
+      description = "Location where Kapacitor stores its state";
+    };
+
+    port = mkOption {
+      type = types.int;
+      default = 9092;
+      description = "Port of Kapacitor";
+    };
+
+    bind = mkOption {
+      type = types.str;
+      default = "";
+      example = literalExample "0.0.0.0";
+      description = "Address to bind to. The default is to bind to all addresses";
+    };
+
+    extraConfig = mkOption {
+      description = "These lines go into kapacitord.conf verbatim.";
+      default = "";
+      type = types.lines;
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "kapacitor";
+      description = "User account under which Kapacitor runs";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "kapacitor";
+      description = "Group under which Kapacitor runs";
+    };
+
+    taskSnapshotInterval = mkOption {
+      type = types.str;
+      description = "Specifies how often to snapshot the task state  (in InfluxDB time units)";
+      default = "1m0s";
+      example = "1m0s";
+    };
+
+    loadDirectory = mkOption {
+      type = types.nullOr types.path;
+      description = "Directory where to load services from, such as tasks, templates and handlers (or null to disable service loading on startup)";
+      default = null;
+    };
+
+    defaultDatabase = {
+      enable = mkEnableOption "kapacitor.defaultDatabase";
+
+      url = mkOption {
+        description = "The URL to an InfluxDB server that serves as the default database";
+        example = "http://localhost:8086";
+        type = types.str;
+      };
+
+      username = mkOption {
+        description = "The username to connect to the remote InfluxDB server";
+        type = types.str;
+      };
+
+      password = mkOption {
+        description = "The password to connect to the remote InfluxDB server";
+        type = types.str;
+      };
+    };
+
+    alerta = {
+      enable = mkEnableOption "kapacitor alerta integration";
+
+      url = mkOption {
+        description = "The URL to the Alerta REST API";
+        default = "http://localhost:5000";
+        example = "http://localhost:5000";
+        type = types.str;
+      };
+
+      token = mkOption {
+        description = "Default Alerta authentication token";
+        type = types.str;
+        default = "";
+      };
+
+      environment = mkOption {
+        description = "Default Alerta environment";
+        type = types.str;
+        default = "Production";
+      };
+
+      origin = mkOption {
+        description = "Default origin of alert";
+        type = types.str;
+        default = "kapacitor";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.kapacitor ];
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.kapacitor = {
+      description = "Kapacitor Real-Time Stream Processing Engine";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.kapacitor}/bin/kapacitord -config ${kapacitorConf}";
+        User = "kapacitor";
+        Group = "kapacitor";
+      };
+    };
+
+    users.users.kapacitor = {
+      uid = config.ids.uids.kapacitor;
+      description = "Kapacitor user";
+      home = cfg.dataDir;
+    };
+
+    users.groups.kapacitor = {
+      gid = config.ids.gids.kapacitor;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/loki.nix b/nixpkgs/nixos/modules/services/monitoring/loki.nix
new file mode 100644
index 000000000000..f4eec7e0d284
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/loki.nix
@@ -0,0 +1,112 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) escapeShellArgs literalExample mkEnableOption mkIf mkOption types;
+
+  cfg = config.services.loki;
+
+  prettyJSON = conf:
+    pkgs.runCommand "loki-config.json" { } ''
+      echo '${builtins.toJSON conf}' | ${pkgs.jq}/bin/jq 'del(._module)' > $out
+    '';
+
+in {
+  options.services.loki = {
+    enable = mkEnableOption "loki";
+
+    user = mkOption {
+      type = types.str;
+      default = "loki";
+      description = ''
+        User under which the Loki service runs.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "loki";
+      description = ''
+        Group under which the Loki service runs.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/loki";
+      description = ''
+        Specify the directory for Loki.
+      '';
+    };
+
+    configuration = mkOption {
+      type = types.attrs;
+      default = {};
+      description = ''
+        Specify the configuration for Loki in Nix.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = ''
+        Specify a configuration file that Loki should use.
+      '';
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = literalExample [ "--server.http-listen-port=3101" ];
+      description = ''
+        Specify a list of additional command line flags,
+        which get escaped and are then passed to Loki.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [{
+      assertion = (
+        (cfg.configuration == {} -> cfg.configFile != null) &&
+        (cfg.configFile != null -> cfg.configuration == {})
+      );
+      message  = ''
+        Please specify either
+        'services.loki.configuration' or
+        'services.loki.configFile'.
+      '';
+    }];
+
+    users.groups.${cfg.group} = { };
+    users.users.${cfg.user} = {
+      description = "Loki Service User";
+      group = cfg.group;
+      home = cfg.dataDir;
+      createHome = true;
+      isSystemUser = true;
+    };
+
+    systemd.services.loki = {
+      description = "Loki Service Daemon";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = let
+        conf = if cfg.configFile == null
+               then prettyJSON cfg.configuration
+               else cfg.configFile;
+      in
+      {
+        ExecStart = "${pkgs.grafana-loki}/bin/loki --config.file=${conf} ${escapeShellArgs cfg.extraFlags}";
+        User = cfg.user;
+        Restart = "always";
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectSystem = "full";
+        DevicePolicy = "closed";
+        NoNewPrivileges = true;
+        WorkingDirectory = cfg.dataDir;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/longview.nix b/nixpkgs/nixos/modules/services/monitoring/longview.nix
new file mode 100644
index 000000000000..9c38956f9ba8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/longview.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.longview;
+
+  runDir = "/run/longview";
+  configsDir = "${runDir}/longview.d";
+
+in {
+  options = {
+
+    services.longview = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          If enabled, system metrics will be sent to Linode LongView.
+        '';
+      };
+
+      apiKey = mkOption {
+        type = types.str;
+        default = "";
+        example = "01234567-89AB-CDEF-0123456789ABCDEF";
+        description = ''
+          Longview API key. To get this, look in Longview settings which
+          are found at https://manager.linode.com/longview/.
+
+          Warning: this secret is stored in the world-readable Nix store!
+          Use <option>apiKeyFile</option> instead.
+        '';
+      };
+
+      apiKeyFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/longview-api-key";
+        description = ''
+          A file containing the Longview API key.
+          To get this, look in Longview settings which
+          are found at https://manager.linode.com/longview/.
+
+          <option>apiKeyFile</option> takes precedence over <option>apiKey</option>.
+        '';
+      };
+
+      apacheStatusUrl = mkOption {
+        type = types.str;
+        default = "";
+        example = "http://127.0.0.1/server-status";
+        description = ''
+          The Apache status page URL. If provided, Longview will
+          gather statistics from this location. This requires Apache
+          mod_status to be loaded and enabled.
+        '';
+      };
+
+      nginxStatusUrl = mkOption {
+        type = types.str;
+        default = "";
+        example = "http://127.0.0.1/nginx_status";
+        description = ''
+          The Nginx status page URL. Longview will gather statistics
+          from this URL. This requires the Nginx stub_status module to
+          be enabled and configured at the given location.
+        '';
+      };
+
+      mysqlUser = mkOption {
+        type = types.str;
+        default = "";
+        description = ''
+          The user for connecting to the MySQL database. If provided,
+          Longview will connect to MySQL and collect statistics about
+          queries, etc. This user does not need to have been granted
+          any extra privileges.
+        '';
+      };
+
+      mysqlPassword = mkOption {
+        type = types.str;
+        default = "";
+        description = ''
+          The password corresponding to <option>mysqlUser</option>.
+          Warning: this is stored in cleartext in the Nix store!
+          Use <option>mysqlPasswordFile</option> instead.
+        '';
+      };
+
+      mysqlPasswordFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/dbpassword";
+        description = ''
+          A file containing the password corresponding to <option>mysqlUser</option>.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.longview =
+      { description = "Longview Metrics Collection";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig.Type = "forking";
+        serviceConfig.ExecStop = "-${pkgs.coreutils}/bin/kill -TERM $MAINPID";
+        serviceConfig.ExecReload = "-${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        serviceConfig.PIDFile = "${runDir}/longview.pid";
+        serviceConfig.ExecStart = "${pkgs.longview}/bin/longview";
+        preStart = ''
+          umask 077
+          mkdir -p ${configsDir}
+        '' + (optionalString (cfg.apiKeyFile != null) ''
+          cp --no-preserve=all "${cfg.apiKeyFile}" ${runDir}/longview.key
+        '') + (optionalString (cfg.apacheStatusUrl != "") ''
+          cat > ${configsDir}/Apache.conf <<EOF
+          location ${cfg.apacheStatusUrl}?auto
+          EOF
+        '') + (optionalString (cfg.mysqlUser != "" && cfg.mysqlPasswordFile != null) ''
+          cat > ${configsDir}/MySQL.conf <<EOF
+          username ${cfg.mysqlUser}
+          password `head -n1 "${cfg.mysqlPasswordFile}"`
+          EOF
+        '') + (optionalString (cfg.nginxStatusUrl != "") ''
+          cat > ${configsDir}/Nginx.conf <<EOF
+          location ${cfg.nginxStatusUrl}
+          EOF
+        '');
+      };
+
+    warnings = let warn = k: optional (cfg.${k} != "")
+                 "config.services.longview.${k} is insecure. Use ${k}File instead.";
+               in concatMap warn [ "apiKey" "mysqlPassword" ];
+
+    assertions = [
+      { assertion = cfg.apiKeyFile != null;
+        message = "Longview needs an API key configured";
+      }
+    ];
+
+    # Create API key file if not configured.
+    services.longview.apiKeyFile = mkIf (cfg.apiKey != "")
+      (mkDefault (toString (pkgs.writeTextFile {
+        name = "longview.key";
+        text = cfg.apiKey;
+      })));
+
+    # Create MySQL password file if not configured.
+    services.longview.mysqlPasswordFile = mkDefault (toString (pkgs.writeTextFile {
+      name = "mysql-password-file";
+      text = cfg.mysqlPassword;
+    }));
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/monit.nix b/nixpkgs/nixos/modules/services/monitoring/monit.nix
new file mode 100644
index 000000000000..ca9352272174
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/monit.nix
@@ -0,0 +1,46 @@
+{config, pkgs, lib, ...}:
+
+with lib;
+
+let
+  cfg = config.services.monit;
+in
+
+{
+  options.services.monit = {
+
+    enable = mkEnableOption "Monit";
+
+    config = mkOption {
+      type = types.lines;
+      default = "";
+      description = "monitrc content";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.monit ];
+
+    environment.etc.monitrc = {
+      text = cfg.config;
+      mode = "0400";
+    };
+
+    systemd.services.monit = {
+      description = "Pro-active monitoring utility for unix systems";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.monit}/bin/monit -I -c /etc/monitrc";
+        ExecStop = "${pkgs.monit}/bin/monit -c /etc/monitrc quit";
+        ExecReload = "${pkgs.monit}/bin/monit -c /etc/monitrc reload";
+        KillMode = "process";
+        Restart = "always";
+      };
+      restartTriggers = [ config.environment.etc.monitrc.source ];
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/munin.nix b/nixpkgs/nixos/modules/services/monitoring/munin.nix
new file mode 100644
index 000000000000..1ebf7ee6a761
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/munin.nix
@@ -0,0 +1,402 @@
+{ config, lib, pkgs, ... }:
+
+# TODO: support munin-async
+# TODO: LWP/Pg perl libs aren't recognized
+
+# TODO: support fastcgi
+# http://guide.munin-monitoring.org/en/latest/example/webserver/apache-cgi.html
+# spawn-fcgi -s /run/munin/fastcgi-graph.sock -U www-data   -u munin -g munin /usr/lib/munin/cgi/munin-cgi-graph
+# spawn-fcgi -s /run/munin/fastcgi-html.sock  -U www-data   -u munin -g munin /usr/lib/munin/cgi/munin-cgi-html
+# https://paste.sh/vofcctHP#-KbDSXVeWoifYncZmLfZzgum
+# nginx https://munin.readthedocs.org/en/latest/example/webserver/nginx.html
+
+
+with lib;
+
+let
+  nodeCfg = config.services.munin-node;
+  cronCfg = config.services.munin-cron;
+
+  muninConf = pkgs.writeText "munin.conf"
+    ''
+      dbdir     /var/lib/munin
+      htmldir   /var/www/munin
+      logdir    /var/log/munin
+      rundir    /run/munin
+
+      ${lib.optionalString (cronCfg.extraCSS != "") "staticdir ${customStaticDir}"}
+
+      ${cronCfg.extraGlobalConfig}
+
+      ${cronCfg.hosts}
+    '';
+
+  nodeConf = pkgs.writeText "munin-node.conf"
+    ''
+      log_level 3
+      log_file Sys::Syslog
+      port 4949
+      host *
+      background 0
+      user root
+      group root
+      host_name ${config.networking.hostName}
+      setsid 0
+
+      # wrapped plugins by makeWrapper being with dots
+      ignore_file ^\.
+
+      allow ^::1$
+      allow ^127\.0\.0\.1$
+
+      ${nodeCfg.extraConfig}
+    '';
+
+  pluginConf = pkgs.writeText "munin-plugin-conf"
+    ''
+      [hddtemp_smartctl]
+      user root
+      group root
+
+      [meminfo]
+      user root
+      group root
+
+      [ipmi*]
+      user root
+      group root
+
+      [munin*]
+      env.UPDATE_STATSFILE /var/lib/munin/munin-update.stats
+
+      ${nodeCfg.extraPluginConfig}
+    '';
+
+  pluginConfDir = pkgs.stdenv.mkDerivation {
+    name = "munin-plugin-conf.d";
+    buildCommand = ''
+      mkdir $out
+      ln -s ${pluginConf} $out/nixos-config
+    '';
+  };
+
+  # Copy one Munin plugin into the Nix store with a specific name.
+  # This is suitable for use with plugins going directly into /etc/munin/plugins,
+  # i.e. munin.extraPlugins.
+  internOnePlugin = name: path:
+    "cp -a '${path}' '${name}'";
+
+  # Copy an entire tree of Munin plugins into a single directory in the Nix
+  # store, with no renaming.
+  # This is suitable for use with munin-node-configure --suggest, i.e.
+  # munin.extraAutoPlugins.
+  internManyPlugins = name: path:
+    "find '${path}' -type f -perm /a+x -exec cp -a -t . '{}' '+'";
+
+  # Use the appropriate intern-fn to copy the plugins into the store and patch
+  # them afterwards in an attempt to get them to run on NixOS.
+  internAndFixPlugins = name: intern-fn: paths:
+    pkgs.runCommand name {} ''
+      mkdir -p "$out"
+      cd "$out"
+      ${lib.concatStringsSep "\n"
+          (lib.attrsets.mapAttrsToList intern-fn paths)}
+      chmod -R u+w .
+      find . -type f -exec sed -E -i '
+        s,(/usr)?/s?bin/,/run/current-system/sw/bin/,g
+      ' '{}' '+'
+    '';
+
+  # TODO: write a derivation for munin-contrib, so that for contrib plugins
+  # you can just refer to them by name rather than needing to include a copy
+  # of munin-contrib in your nixos configuration.
+  extraPluginDir = internAndFixPlugins "munin-extra-plugins.d"
+    internOnePlugin nodeCfg.extraPlugins;
+
+  extraAutoPluginDir = internAndFixPlugins "munin-extra-auto-plugins.d"
+    internManyPlugins
+    (builtins.listToAttrs
+      (map
+        (path: { name = baseNameOf path; value = path; })
+        nodeCfg.extraAutoPlugins));
+
+  customStaticDir = pkgs.runCommand "munin-custom-static-data" {} ''
+    cp -a "${pkgs.munin}/etc/opt/munin/static" "$out"
+    cd "$out"
+    chmod -R u+w .
+    echo "${cronCfg.extraCSS}" >> style.css
+    echo "${cronCfg.extraCSS}" >> style-new.css
+  '';
+in
+
+{
+
+  options = {
+
+    services.munin-node = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Enable Munin Node agent. Munin node listens on 0.0.0.0 and
+          by default accepts connections only from 127.0.0.1 for security reasons.
+
+          See <link xlink:href='http://guide.munin-monitoring.org/en/latest/architecture/index.html' />.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          <filename>munin-node.conf</filename> extra configuration. See
+          <link xlink:href='http://guide.munin-monitoring.org/en/latest/reference/munin-node.conf.html' />
+        '';
+      };
+
+      extraPluginConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          <filename>plugin-conf.d</filename> extra plugin configuration. See
+          <link xlink:href='http://guide.munin-monitoring.org/en/latest/plugin/use.html' />
+        '';
+        example = ''
+          [fail2ban_*]
+          user root
+        '';
+      };
+
+      extraPlugins = mkOption {
+        default = {};
+        type = with types; attrsOf path;
+        description = ''
+          Additional Munin plugins to activate. Keys are the name of the plugin
+          symlink, values are the path to the underlying plugin script. You
+          can use the same plugin script multiple times (e.g. for wildcard
+          plugins).
+
+          Note that these plugins do not participate in autoconfiguration. If
+          you want to autoconfigure additional plugins, use
+          <option>services.munin-node.extraAutoPlugins</option>.
+
+          Plugins enabled in this manner take precedence over autoconfigured
+          plugins.
+
+          Plugins will be copied into the Nix store, and it will attempt to
+          modify them to run properly by fixing hardcoded references to
+          <literal>/bin</literal>, <literal>/usr/bin</literal>,
+          <literal>/sbin</literal>, and <literal>/usr/sbin</literal>.
+        '';
+        example = literalExample ''
+          {
+            zfs_usage_bigpool = /src/munin-contrib/plugins/zfs/zfs_usage_;
+            zfs_usage_smallpool = /src/munin-contrib/plugins/zfs/zfs_usage_;
+            zfs_list = /src/munin-contrib/plugins/zfs/zfs_list;
+          };
+        '';
+      };
+
+      extraAutoPlugins = mkOption {
+        default = [];
+        type = with types; listOf path;
+        description = ''
+          Additional Munin plugins to autoconfigure, using
+          <literal>munin-node-configure --suggest</literal>. These should be
+          the actual paths to the plugin files (or directories containing them),
+          not just their names.
+
+          If you want to manually enable individual plugins instead, use
+          <option>services.munin-node.extraPlugins</option>.
+
+          Note that only plugins that have the 'autoconfig' capability will do
+          anything if listed here, since plugins that cannot autoconfigure
+          won't be automatically enabled by
+          <literal>munin-node-configure</literal>.
+
+          Plugins will be copied into the Nix store, and it will attempt to
+          modify them to run properly by fixing hardcoded references to
+          <literal>/bin</literal>, <literal>/usr/bin</literal>,
+          <literal>/sbin</literal>, and <literal>/usr/sbin</literal>.
+        '';
+        example = literalExample ''
+          [
+            /src/munin-contrib/plugins/zfs
+            /src/munin-contrib/plugins/ssh
+          ];
+        '';
+      };
+
+      disabledPlugins = mkOption {
+        # TODO: figure out why Munin isn't writing the log file and fix it.
+        # In the meantime this at least suppresses a useless graph full of
+        # NaNs in the output.
+        default = [ "munin_stats" ];
+        type = with types; listOf str;
+        description = ''
+          Munin plugins to disable, even if
+          <literal>munin-node-configure --suggest</literal> tries to enable
+          them. To disable a wildcard plugin, use an actual wildcard, as in
+          the example.
+
+          munin_stats is disabled by default as it tries to read
+          <literal>/var/log/munin/munin-update.log</literal> for timing
+          information, and the NixOS build of Munin does not write this file.
+        '';
+        example = [ "diskstats" "zfs_usage_*" ];
+      };
+    };
+
+    services.munin-cron = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Enable munin-cron. Takes care of all heavy lifting to collect data from
+          nodes and draws graphs to html. Runs munin-update, munin-limits,
+          munin-graphs and munin-html in that order.
+
+          HTML output is in <filename>/var/www/munin/</filename>, configure your
+          favourite webserver to serve static files.
+        '';
+      };
+
+      extraGlobalConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          <filename>munin.conf</filename> extra global configuration.
+          See <link xlink:href='http://guide.munin-monitoring.org/en/latest/reference/munin.conf.html' />.
+          Useful to setup notifications, see
+          <link xlink:href='http://guide.munin-monitoring.org/en/latest/tutorial/alert.html' />
+        '';
+        example = ''
+          contact.email.command mail -s "Munin notification for ''${var:host}" someone@example.com
+        '';
+      };
+
+      hosts = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Definitions of hosts of nodes to collect data from. Needs at least one
+          host for cron to succeed. See
+          <link xlink:href='http://guide.munin-monitoring.org/en/latest/reference/munin.conf.html' />
+        '';
+        example = ''
+          [''${config.networking.hostName}]
+          address localhost
+        '';
+      };
+
+      extraCSS = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Custom styling for the HTML that munin-cron generates. This will be
+          appended to the CSS files used by munin-cron and will thus take
+          precedence over the builtin styles.
+        '';
+        example = ''
+          /* A simple dark theme. */
+          html, body { background: #222222; }
+          #header, #footer { background: #333333; }
+          img.i, img.iwarn, img.icrit, img.iunkn {
+            filter: invert(100%) hue-rotate(-30deg);
+          }
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkMerge [ (mkIf (nodeCfg.enable || cronCfg.enable)  {
+
+    environment.systemPackages = [ pkgs.munin ];
+
+    users.users.munin = {
+      description = "Munin monitoring user";
+      group = "munin";
+      uid = config.ids.uids.munin;
+      home = "/var/lib/munin";
+    };
+
+    users.groups.munin = {
+      gid = config.ids.gids.munin;
+    };
+
+  }) (mkIf nodeCfg.enable {
+
+    systemd.services.munin-node = {
+      description = "Munin Node";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [ munin smartmontools "/run/current-system/sw" "/run/wrappers" ];
+      environment.MUNIN_LIBDIR = "${pkgs.munin}/lib";
+      environment.MUNIN_PLUGSTATE = "/run/munin";
+      environment.MUNIN_LOGDIR = "/var/log/munin";
+      preStart = ''
+        echo "Updating munin plugins..."
+
+        mkdir -p /etc/munin/plugins
+        rm -rf /etc/munin/plugins/*
+
+        # Autoconfigure builtin plugins
+        ${pkgs.munin}/bin/munin-node-configure --suggest --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${pkgs.munin}/lib/plugins --servicedir=/etc/munin/plugins --sconfdir=${pluginConfDir} 2>/dev/null | ${pkgs.bash}/bin/bash
+
+        # Autoconfigure extra plugins
+        ${pkgs.munin}/bin/munin-node-configure --suggest --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${extraAutoPluginDir} --servicedir=/etc/munin/plugins --sconfdir=${pluginConfDir} 2>/dev/null | ${pkgs.bash}/bin/bash
+
+        ${lib.optionalString (nodeCfg.extraPlugins != {}) ''
+            # Link in manually enabled plugins
+            ln -f -s -t /etc/munin/plugins ${extraPluginDir}/*
+          ''}
+
+        ${lib.optionalString (nodeCfg.disabledPlugins != []) ''
+            # Disable plugins
+            cd /etc/munin/plugins
+            rm -f ${toString nodeCfg.disabledPlugins}
+          ''}
+      '';
+      serviceConfig = {
+        ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/ --sconfdir=${pluginConfDir}";
+      };
+    };
+
+    # munin_stats plugin breaks as of 2.0.33 when this doesn't exist
+    systemd.tmpfiles.rules = [ "d /run/munin 0755 munin munin -" ];
+
+  }) (mkIf cronCfg.enable {
+
+    # Munin is hardcoded to use DejaVu Mono and the graphs come out wrong if
+    # it's not available.
+    fonts.fonts = [ pkgs.dejavu_fonts ];
+
+    systemd.timers.munin-cron = {
+      description = "batch Munin master programs";
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = "*:0/5";
+    };
+
+    systemd.services.munin-cron = {
+      description = "batch Munin master programs";
+      unitConfig.Documentation = "man:munin-cron(8)";
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "munin";
+        ExecStart = "${pkgs.munin}/bin/munin-cron --config ${muninConf}";
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /run/munin 0755 munin munin -"
+      "d /var/log/munin 0755 munin munin -"
+      "d /var/www/munin 0755 munin munin -"
+      "d /var/lib/munin 0755 munin munin -"
+    ];
+  })];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/nagios.nix b/nixpkgs/nixos/modules/services/monitoring/nagios.nix
new file mode 100644
index 000000000000..9ac6869068f2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/nagios.nix
@@ -0,0 +1,212 @@
+# Nagios system/network monitoring daemon.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nagios;
+
+  nagiosState = "/var/lib/nagios";
+  nagiosLogDir = "/var/log/nagios";
+  urlPath = "/nagios";
+
+  nagiosObjectDefs = cfg.objectDefs;
+
+  nagiosObjectDefsDir = pkgs.runCommand "nagios-objects" {
+      inherit nagiosObjectDefs;
+      preferLocalBuild = true;
+    } "mkdir -p $out; ln -s $nagiosObjectDefs $out/";
+
+  nagiosCfgFile = let
+    default = {
+      log_file="${nagiosLogDir}/current";
+      log_archive_path="${nagiosLogDir}/archive";
+      status_file="${nagiosState}/status.dat";
+      object_cache_file="${nagiosState}/objects.cache";
+      temp_file="${nagiosState}/nagios.tmp";
+      lock_file="/run/nagios.lock";
+      state_retention_file="${nagiosState}/retention.dat";
+      query_socket="${nagiosState}/nagios.qh";
+      check_result_path="${nagiosState}";
+      command_file="${nagiosState}/nagios.cmd";
+      cfg_dir="${nagiosObjectDefsDir}";
+      nagios_user="nagios";
+      nagios_group="nagios";
+      illegal_macro_output_chars="`~$&|'\"<>";
+      retain_state_information="1";
+    };
+    lines = mapAttrsToList (key: value: "${key}=${value}") (default // cfg.extraConfig);
+    content = concatStringsSep "\n" lines;
+    file = pkgs.writeText "nagios.cfg" content;
+    validated =  pkgs.runCommand "nagios-checked.cfg" {preferLocalBuild=true;} ''
+      cp ${file} nagios.cfg
+      # nagios checks the existence of /var/lib/nagios, but
+      # it does not exists in the build sandbox, so we fake it
+      mkdir lib
+      lib=$(readlink -f lib)
+      sed -i s@=${nagiosState}@=$lib@ nagios.cfg
+      ${pkgs.nagios}/bin/nagios -v nagios.cfg && cp ${file} $out
+    '';
+    defaultCfgFile = if cfg.validateConfig then validated else file;
+  in
+  if cfg.mainConfigFile == null then defaultCfgFile else cfg.mainConfigFile;
+
+  # Plain configuration for the Nagios web-interface with no
+  # authentication.
+  nagiosCGICfgFile = pkgs.writeText "nagios.cgi.conf"
+    ''
+      main_config_file=${cfg.mainConfigFile}
+      use_authentication=0
+      url_html_path=${urlPath}
+    '';
+
+  extraHttpdConfig =
+    ''
+      ScriptAlias ${urlPath}/cgi-bin ${pkgs.nagios}/sbin
+
+      <Directory "${pkgs.nagios}/sbin">
+        Options ExecCGI
+        Require all granted
+        SetEnv NAGIOS_CGI_CONFIG ${cfg.cgiConfigFile}
+      </Directory>
+
+      Alias ${urlPath} ${pkgs.nagios}/share
+
+      <Directory "${pkgs.nagios}/share">
+        Options None
+        Require all granted
+      </Directory>
+    '';
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "nagios" "urlPath" ] "The urlPath option has been removed as it is hard coded to /nagios in the nagios package.")
+  ];
+
+  meta.maintainers = with lib.maintainers; [ symphorien ];
+
+  options = {
+    services.nagios = {
+      enable = mkEnableOption "<link xlink:href='http://www.nagios.org/'>Nagios</link> to monitor your system or network.";
+
+      objectDefs = mkOption {
+        description = "
+          A list of Nagios object configuration files that must define
+          the hosts, host groups, services and contacts for the
+          network that you want Nagios to monitor.
+        ";
+        type = types.listOf types.path;
+        example = literalExample "[ ./objects.cfg ]";
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.package;
+        default = with pkgs; [ nagiosPluginsOfficial ssmtp mailutils ];
+        defaultText = "[pkgs.nagiosPluginsOfficial pkgs.ssmtp pkgs.mailutils]";
+        description = "
+          Packages to be added to the Nagios <envar>PATH</envar>.
+          Typically used to add plugins, but can be anything.
+        ";
+      };
+
+      mainConfigFile = mkOption {
+        type = types.nullOr types.package;
+        default = null;
+        description = "
+          If non-null, overrides the main configuration file of Nagios.
+        ";
+      };
+
+      extraConfig = mkOption {
+        type = types.attrsOf types.str;
+        example = {
+          debug_level = "-1";
+          debug_file = "/var/log/nagios/debug.log";
+        };
+        default = {};
+        description = "Configuration to add to /etc/nagios.cfg";
+      };
+
+      validateConfig = mkOption {
+        type = types.bool;
+        default = pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform;
+        description = "if true, the syntax of the nagios configuration file is checked at build time";
+      };
+
+      cgiConfigFile = mkOption {
+        type = types.package;
+        default = nagiosCGICfgFile;
+        defaultText = "nagiosCGICfgFile";
+        description = "
+          Derivation for the configuration file of Nagios CGI scripts
+          that can be used in web servers for running the Nagios web interface.
+        ";
+      };
+
+      enableWebInterface = mkOption {
+        type = types.bool;
+        default = false;
+        description = "
+          Whether to enable the Nagios web interface.  You should also
+          enable Apache (<option>services.httpd.enable</option>).
+        ";
+      };
+
+      virtualHost = mkOption {
+        type = types.submodule (import ../web-servers/apache-httpd/vhost-options.nix);
+        example = literalExample ''
+          { hostName = "example.org";
+            adminAddr = "webmaster@example.org";
+            enableSSL = true;
+            sslServerCert = "/var/lib/acme/example.org/full.pem";
+            sslServerKey = "/var/lib/acme/example.org/key.pem";
+          }
+        '';
+        description = ''
+          Apache configuration can be done by adapting <option>services.httpd.virtualHosts</option>.
+          See <xref linkend="opt-services.httpd.virtualHosts"/> for further information.
+        '';
+      };
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+    users.users.nagios = {
+      description = "Nagios user ";
+      uid         = config.ids.uids.nagios;
+      home        = nagiosState;
+      group       = "nagios";
+    };
+
+    users.groups.nagios = { };
+
+    # This isn't needed, it's just so that the user can type "nagiostats
+    # -c /etc/nagios.cfg".
+    environment.etc."nagios.cfg".source = nagiosCfgFile;
+
+    environment.systemPackages = [ pkgs.nagios ];
+    systemd.services.nagios = {
+      description = "Nagios monitoring daemon";
+      path     = [ pkgs.nagios ] ++ cfg.plugins;
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" ];
+
+      serviceConfig = {
+        User = "nagios";
+        Group = "nagios";
+        Restart = "always";
+        RestartSec = 2;
+        LogsDirectory = "nagios";
+        StateDirectory = "nagios";
+        ExecStart = "${pkgs.nagios}/bin/nagios /etc/nagios.cfg";
+        X-ReloadIfChanged = nagiosCfgFile;
+      };
+    };
+
+    services.httpd.virtualHosts = optionalAttrs cfg.enableWebInterface {
+      ${cfg.virtualHost.hostName} = mkMerge [ cfg.virtualHost { extraConfig = extraHttpdConfig; } ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/netdata.nix b/nixpkgs/nixos/modules/services/monitoring/netdata.nix
new file mode 100644
index 000000000000..a5233a46e341
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/netdata.nix
@@ -0,0 +1,219 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.netdata;
+
+  wrappedPlugins = pkgs.runCommand "wrapped-plugins" { preferLocalBuild = true; } ''
+    mkdir -p $out/libexec/netdata/plugins.d
+    ln -s /run/wrappers/bin/apps.plugin $out/libexec/netdata/plugins.d/apps.plugin
+    ln -s /run/wrappers/bin/freeipmi.plugin $out/libexec/netdata/plugins.d/freeipmi.plugin
+    ln -s /run/wrappers/bin/perf.plugin $out/libexec/netdata/plugins.d/perf.plugin
+    ln -s /run/wrappers/bin/slabinfo.plugin $out/libexec/netdata/plugins.d/slabinfo.plugin
+  '';
+
+  plugins = [
+    "${cfg.package}/libexec/netdata/plugins.d"
+    "${wrappedPlugins}/libexec/netdata/plugins.d"
+  ] ++ cfg.extraPluginPaths;
+
+  localConfig = {
+    global = {
+      "plugins directory" = concatStringsSep " " plugins;
+    };
+    web = {
+      "web files owner" = "root";
+      "web files group" = "root";
+    };
+  };
+  mkConfig = generators.toINI {} (recursiveUpdate localConfig cfg.config);
+  configFile = pkgs.writeText "netdata.conf" (if cfg.configText != null then cfg.configText else mkConfig);
+
+  defaultUser = "netdata";
+
+in {
+  options = {
+    services.netdata = {
+      enable = mkEnableOption "netdata";
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.netdata;
+        defaultText = "pkgs.netdata";
+        description = "Netdata package to use.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "netdata";
+        description = "User account under which netdata runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "netdata";
+        description = "Group under which netdata runs.";
+      };
+
+      configText = mkOption {
+        type = types.nullOr types.lines;
+        description = "Verbatim netdata.conf, cannot be combined with config.";
+        default = null;
+        example = ''
+          [global]
+          debug log = syslog
+          access log = syslog
+          error log = syslog
+        '';
+      };
+
+      python = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            Whether to enable python-based plugins
+          '';
+        };
+        extraPackages = mkOption {
+          default = ps: [];
+          defaultText = "ps: []";
+          example = literalExample ''
+            ps: [
+              ps.psycopg2
+              ps.docker
+              ps.dnspython
+            ]
+          '';
+          description = ''
+            Extra python packages available at runtime
+            to enable additional python plugins.
+          '';
+        };
+      };
+
+      extraPluginPaths = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        example = literalExample ''
+          [ "/path/to/plugins.d" ]
+        '';
+        description = ''
+          Extra paths to add to the netdata global "plugins directory"
+          option.  Useful for when you want to include your own
+          collection scripts.
+          </para><para>
+          Details about writing a custom netdata plugin are available at:
+          <link xlink:href="https://docs.netdata.cloud/collectors/plugins.d/"/>
+          </para><para>
+          Cannot be combined with configText.
+        '';
+      };
+
+      config = mkOption {
+        type = types.attrsOf types.attrs;
+        default = {};
+        description = "netdata.conf configuration as nix attributes. cannot be combined with configText.";
+        example = literalExample ''
+          global = {
+            "debug log" = "syslog";
+            "access log" = "syslog";
+            "error log" = "syslog";
+          };
+        '';
+        };
+      };
+    };
+
+  config = mkIf cfg.enable {
+    assertions =
+      [ { assertion = cfg.config != {} -> cfg.configText == null ;
+          message = "Cannot specify both config and configText";
+        }
+      ];
+
+    systemd.tmpfiles.rules = [
+      "d /var/cache/netdata 0755 ${cfg.user} ${cfg.group} -"
+      "Z /var/cache/netdata - ${cfg.user} ${cfg.group} -"
+      "d /var/log/netdata 0755 ${cfg.user} ${cfg.group} -"
+      "Z /var/log/netdata - ${cfg.user} ${cfg.group} -"
+      "d /var/lib/netdata 0755 ${cfg.user} ${cfg.group} -"
+      "Z /var/lib/netdata - ${cfg.user} ${cfg.group} -"
+      "d /etc/netdata 0755 ${cfg.user} ${cfg.group} -"
+      "Z /etc/netdata - ${cfg.user} ${cfg.group} -"
+    ];
+    systemd.services.netdata = {
+      description = "Real time performance monitoring";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = (with pkgs; [ curl gawk which ]) ++ lib.optional cfg.python.enable
+        (pkgs.python3.withPackages cfg.python.extraPackages);
+      serviceConfig = {
+        Environment="PYTHONPATH=${cfg.package}/libexec/netdata/python.d/python_modules";
+        ExecStart = "${cfg.package}/bin/netdata -P /run/netdata/netdata.pid -D -c ${configFile}";
+        ExecReload = "${pkgs.utillinux}/bin/kill -s HUP -s USR1 -s USR2 $MAINPID";
+        TimeoutStopSec = 60;
+        Restart = "on-failure";
+        # User and group
+        User = cfg.user;
+        Group = cfg.group;
+        # Runtime directory and mode
+        RuntimeDirectory = "netdata";
+        RuntimeDirectoryMode = "0755";
+        # Performance
+        LimitNOFILE = "30000";
+      };
+    };
+
+    systemd.enableCgroupAccounting = true;
+
+    security.wrappers."apps.plugin" = {
+      source = "${cfg.package}/libexec/netdata/plugins.d/apps.plugin.org";
+      capabilities = "cap_dac_read_search,cap_sys_ptrace+ep";
+      owner = cfg.user;
+      group = cfg.group;
+      permissions = "u+rx,g+rx,o-rwx";
+    };
+
+    security.wrappers."freeipmi.plugin" = {
+      source = "${cfg.package}/libexec/netdata/plugins.d/freeipmi.plugin.org";
+      capabilities = "cap_dac_override,cap_fowner+ep";
+      owner = cfg.user;
+      group = cfg.group;
+      permissions = "u+rx,g+rx,o-rwx";
+    };
+
+    security.wrappers."perf.plugin" = {
+      source = "${cfg.package}/libexec/netdata/plugins.d/perf.plugin.org";
+      capabilities = "cap_sys_admin+ep";
+      owner = cfg.user;
+      group = cfg.group;
+      permissions = "u+rx,g+rx,o-rx";
+    };
+
+    security.wrappers."slabinfo.plugin" = {
+      source = "${cfg.package}/libexec/netdata/plugins.d/slabinfo.plugin.org";
+      capabilities = "cap_dac_override+ep";
+      owner = cfg.user;
+      group = cfg.group;
+      permissions = "u+rx,g+rx,o-rx";
+    };
+
+    security.pam.loginLimits = [
+      { domain = "netdata"; type = "soft"; item = "nofile"; value = "10000"; }
+      { domain = "netdata"; type = "hard"; item = "nofile"; value = "30000"; }
+    ];
+
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} = {
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == defaultUser) {
+      ${defaultUser} = { };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
new file mode 100644
index 000000000000..1b02ebf37045
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
@@ -0,0 +1,187 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.alertmanager;
+  mkConfigFile = pkgs.writeText "alertmanager.yml" (builtins.toJSON cfg.configuration);
+
+  checkedConfig = file: pkgs.runCommand "checked-config" { buildInputs = [ cfg.package ]; } ''
+    ln -s ${file} $out
+    amtool check-config $out
+  '';
+
+  alertmanagerYml = let
+    yml = if cfg.configText != null then
+        pkgs.writeText "alertmanager.yml" cfg.configText
+        else mkConfigFile;
+    in checkedConfig yml;
+
+  cmdlineArgs = cfg.extraFlags ++ [
+    "--config.file /tmp/alert-manager-substituted.yaml"
+    "--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
+    "--log.level ${cfg.logLevel}"
+    "--storage.path /var/lib/alertmanager"
+    (toString (map (peer: "--cluster.peer ${peer}:9094") cfg.clusterPeers))
+    ] ++ (optional (cfg.webExternalUrl != null)
+      "--web.external-url ${cfg.webExternalUrl}"
+    ) ++ (optional (cfg.logFormat != null)
+      "--log.format ${cfg.logFormat}"
+  );
+in {
+  imports = [
+    (mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "user" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a user setting.")
+    (mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "group" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a group setting.")
+    (mkRemovedOptionModule [ "services" "prometheus" "alertmanagerURL" ] ''
+      Due to incompatibility, the alertmanagerURL option has been removed,
+      please use 'services.prometheus2.alertmanagers' instead.
+    '')
+  ];
+
+  options = {
+    services.prometheus.alertmanager = {
+      enable = mkEnableOption "Prometheus Alertmanager";
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.prometheus-alertmanager;
+        defaultText = "pkgs.alertmanager";
+        description = ''
+          Package that should be used for alertmanager.
+        '';
+      };
+
+      configuration = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = ''
+          Alertmanager configuration as nix attribute set.
+        '';
+      };
+
+      configText = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = ''
+          Alertmanager configuration as YAML text. If non-null, this option
+          defines the text that is written to alertmanager.yml. If null, the
+          contents of alertmanager.yml is generated from the structured config
+          options.
+        '';
+      };
+
+      logFormat = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          If set use a syslog logger or JSON logging.
+        '';
+      };
+
+      logLevel = mkOption {
+        type = types.enum ["debug" "info" "warn" "error" "fatal"];
+        default = "warn";
+        description = ''
+          Only log messages with the given severity or above.
+        '';
+      };
+
+      webExternalUrl = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy).
+          Used for generating relative and absolute links back to Alertmanager itself.
+          If the URL has a path portion, it will be used to prefix all HTTP endoints served by Alertmanager.
+          If omitted, relevant URL components will be derived automatically.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "";
+        description = ''
+          Address to listen on for the web interface and API. Empty string will listen on all interfaces.
+          "localhost" will listen on 127.0.0.1 (but not ::1).
+        '';
+      };
+
+      port = mkOption {
+        type = types.int;
+        default = 9093;
+        description = ''
+          Port to listen on for the web interface and API.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Open port in firewall for incoming connections.
+        '';
+      };
+
+      clusterPeers = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Initial peers for HA cluster.
+        '';
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Extra commandline options when launching the Alertmanager.
+        '';
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/root/alertmanager.env";
+        description = ''
+          File to load as environment file. Environment variables
+          from this file will be interpolated into the config file
+          using envsubst with this syntax:
+          <literal>$ENVIRONMENT ''${VARIABLE}</literal>
+        '';
+      };
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.enable {
+      assertions = singleton {
+        assertion = cfg.configuration != null || cfg.configText != null;
+        message = "Can not enable alertmanager without a configuration. "
+         + "Set either the `configuration` or `configText` attribute.";
+      };
+    })
+    (mkIf cfg.enable {
+      networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
+
+      systemd.services.alertmanager = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network-online.target" ];
+        preStart = ''
+           ${lib.getBin pkgs.envsubst}/bin/envsubst -o "/tmp/alert-manager-substituted.yaml" \
+                                                    -i "${alertmanagerYml}"
+        '';
+        serviceConfig = {
+          Restart  = "always";
+          StateDirectory = "alertmanager";
+          DynamicUser = true; # implies PrivateTmp
+          EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
+          WorkingDirectory = "/tmp";
+          ExecStart = "${cfg.package}/bin/alertmanager" +
+            optionalString (length cmdlineArgs != 0) (" \\\n  " +
+              concatStringsSep " \\\n  " cmdlineArgs);
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        };
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix
new file mode 100644
index 000000000000..84a72afac2f7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix
@@ -0,0 +1,642 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus;
+
+  workingDir = "/var/lib/" + cfg.stateDir;
+
+  # a wrapper that verifies that the configuration is valid
+  promtoolCheck = what: name: file:
+    if cfg.checkConfig then
+      pkgs.runCommandNoCCLocal
+        "${name}-${replaceStrings [" "] [""] what}-checked"
+        { buildInputs = [ cfg.package ]; } ''
+      ln -s ${file} $out
+      promtool ${what} $out
+    '' else file;
+
+  # Pretty-print JSON to a file
+  writePrettyJSON = name: x:
+    pkgs.runCommandNoCCLocal name {} ''
+      echo '${builtins.toJSON x}' | ${pkgs.jq}/bin/jq . > $out
+    '';
+
+  generatedPrometheusYml = writePrettyJSON "prometheus.yml" promConfig;
+
+  # This becomes the main config file for Prometheus
+  promConfig = {
+    global = filterValidPrometheus cfg.globalConfig;
+    rule_files = map (promtoolCheck "check rules" "rules") (cfg.ruleFiles ++ [
+      (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
+    ]);
+    scrape_configs = filterValidPrometheus cfg.scrapeConfigs;
+    alerting = {
+      inherit (cfg) alertmanagers;
+    };
+  };
+
+  prometheusYml = let
+    yml = if cfg.configText != null then
+      pkgs.writeText "prometheus.yml" cfg.configText
+      else generatedPrometheusYml;
+    in promtoolCheck "check config" "prometheus.yml" yml;
+
+  cmdlineArgs = cfg.extraFlags ++ [
+    "--storage.tsdb.path=${workingDir}/data/"
+    "--config.file=${prometheusYml}"
+    "--web.listen-address=${cfg.listenAddress}"
+    "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
+    "--alertmanager.timeout=${toString cfg.alertmanagerTimeout}s"
+  ] ++
+  optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}";
+
+  filterValidPrometheus = filterAttrsListRecursive (n: v: !(n == "_module" || v == null));
+  filterAttrsListRecursive = pred: x:
+    if isAttrs x then
+      listToAttrs (
+        concatMap (name:
+          let v = x.${name}; in
+          if pred name v then [
+            (nameValuePair name (filterAttrsListRecursive pred v))
+          ] else []
+        ) (attrNames x)
+      )
+    else if isList x then
+      map (filterAttrsListRecursive pred) x
+    else x;
+
+  mkDefOpt = type : defaultStr : description : mkOpt type (description + ''
+
+    Defaults to <literal>${defaultStr}</literal> in prometheus
+    when set to <literal>null</literal>.
+  '');
+
+  mkOpt = type : description : mkOption {
+    type = types.nullOr type;
+    default = null;
+    inherit description;
+  };
+
+  promTypes.globalConfig = types.submodule {
+    options = {
+      scrape_interval = mkDefOpt types.str "1m" ''
+        How frequently to scrape targets by default.
+      '';
+
+      scrape_timeout = mkDefOpt types.str "10s" ''
+        How long until a scrape request times out.
+      '';
+
+      evaluation_interval = mkDefOpt types.str "1m" ''
+        How frequently to evaluate rules by default.
+      '';
+
+      external_labels = mkOpt (types.attrsOf types.str) ''
+        The labels to add to any time series or alerts when
+        communicating with external systems (federation, remote
+        storage, Alertmanager).
+      '';
+    };
+  };
+
+  promTypes.scrape_config = types.submodule {
+    options = {
+      job_name = mkOption {
+        type = types.str;
+        description = ''
+          The job name assigned to scraped metrics by default.
+        '';
+      };
+      scrape_interval = mkOpt types.str ''
+        How frequently to scrape targets from this job. Defaults to the
+        globally configured default.
+      '';
+
+      scrape_timeout = mkOpt types.str ''
+        Per-target timeout when scraping this job. Defaults to the
+        globally configured default.
+      '';
+
+      metrics_path = mkDefOpt types.str "/metrics" ''
+        The HTTP resource path on which to fetch metrics from targets.
+      '';
+
+      honor_labels = mkDefOpt types.bool "false" ''
+        Controls how Prometheus handles conflicts between labels
+        that are already present in scraped data and labels that
+        Prometheus would attach server-side ("job" and "instance"
+        labels, manually configured target labels, and labels
+        generated by service discovery implementations).
+
+        If honor_labels is set to "true", label conflicts are
+        resolved by keeping label values from the scraped data and
+        ignoring the conflicting server-side labels.
+
+        If honor_labels is set to "false", label conflicts are
+        resolved by renaming conflicting labels in the scraped data
+        to "exported_&lt;original-label&gt;" (for example
+        "exported_instance", "exported_job") and then attaching
+        server-side labels. This is useful for use cases such as
+        federation, where all labels specified in the target should
+        be preserved.
+      '';
+
+      honor_timestamps = mkDefOpt types.bool "true" ''
+        honor_timestamps controls whether Prometheus respects the timestamps present
+        in scraped data.
+
+        If honor_timestamps is set to <literal>true</literal>, the timestamps of the metrics exposed
+        by the target will be used.
+
+        If honor_timestamps is set to <literal>false</literal>, the timestamps of the metrics exposed
+        by the target will be ignored.
+      '';
+
+      scheme = mkDefOpt (types.enum ["http" "https"]) "http" ''
+        The URL scheme with which to fetch metrics from targets.
+      '';
+
+      params = mkOpt (types.attrsOf (types.listOf types.str)) ''
+        Optional HTTP URL parameters.
+      '';
+
+      basic_auth = mkOpt (types.submodule {
+        options = {
+          username = mkOption {
+            type = types.str;
+            description = ''
+              HTTP username
+            '';
+          };
+          password = mkOption {
+            type = types.str;
+            description = ''
+              HTTP password
+            '';
+          };
+        };
+      }) ''
+        Optional http login credentials for metrics scraping.
+      '';
+
+      bearer_token = mkOpt types.str ''
+        Sets the `Authorization` header on every scrape request with
+        the configured bearer token. It is mutually exclusive with
+        <option>bearer_token_file</option>.
+      '';
+
+      bearer_token_file = mkOpt types.str ''
+        Sets the `Authorization` header on every scrape request with
+        the bearer token read from the configured file. It is mutually
+        exclusive with <option>bearer_token</option>.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        Configures the scrape request's TLS settings.
+      '';
+
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
+      '';
+
+      ec2_sd_configs = mkOpt (types.listOf promTypes.ec2_sd_config) ''
+        List of EC2 service discovery configurations.
+      '';
+
+      dns_sd_configs = mkOpt (types.listOf promTypes.dns_sd_config) ''
+        List of DNS service discovery configurations.
+      '';
+
+      consul_sd_configs = mkOpt (types.listOf promTypes.consul_sd_config) ''
+        List of Consul service discovery configurations.
+      '';
+
+      file_sd_configs = mkOpt (types.listOf promTypes.file_sd_config) ''
+        List of file service discovery configurations.
+      '';
+
+      static_configs = mkOpt (types.listOf promTypes.static_config) ''
+        List of labeled target groups for this job.
+      '';
+
+      relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
+        List of relabel configurations.
+      '';
+
+      sample_limit = mkDefOpt types.int "0" ''
+        Per-scrape limit on number of scraped samples that will be accepted.
+        If more than this number of samples are present after metric relabelling
+        the entire scrape will be treated as failed. 0 means no limit.
+      '';
+    };
+  };
+
+  promTypes.static_config = types.submodule {
+    options = {
+      targets = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          The targets specified by the target group.
+        '';
+      };
+      labels = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = ''
+          Labels assigned to all metrics scraped from the targets.
+        '';
+      };
+    };
+  };
+
+  promTypes.ec2_sd_config = types.submodule {
+    options = {
+      region = mkOption {
+        type = types.str;
+        description = ''
+          The AWS Region.
+        '';
+      };
+      endpoint = mkOpt types.str ''
+        Custom endpoint to be used.
+      '';
+
+      access_key = mkOpt types.str ''
+        The AWS API key id. If blank, the environment variable
+        <literal>AWS_ACCESS_KEY_ID</literal> is used.
+      '';
+
+      secret_key = mkOpt types.str ''
+        The AWS API key secret. If blank, the environment variable
+         <literal>AWS_SECRET_ACCESS_KEY</literal> is used.
+      '';
+
+      profile = mkOpt  types.str ''
+        Named AWS profile used to connect to the API.
+      '';
+
+      role_arn = mkOpt types.str ''
+        AWS Role ARN, an alternative to using AWS API keys.
+      '';
+
+      refresh_interval = mkDefOpt types.str "60s" ''
+        Refresh interval to re-read the instance list.
+      '';
+
+      port = mkDefOpt types.int "80" ''
+        The port to scrape metrics from. If using the public IP
+        address, this must instead be specified in the relabeling
+        rule.
+      '';
+
+      filters = mkOpt (types.listOf promTypes.filter) ''
+        Filters can be used optionally to filter the instance list by other criteria.
+      '';
+    };
+  };
+
+  promTypes.filter = types.submodule {
+    options = {
+      name = mkOption {
+        type = types.str;
+        description = ''
+          See <link xlink:href="https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html">this list</link>
+          for the available filters.
+        '';
+      };
+
+      value = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Value of the filter.
+        '';
+      };
+    };
+  };
+
+  promTypes.dns_sd_config = types.submodule {
+    options = {
+      names = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          A list of DNS SRV record names to be queried.
+        '';
+      };
+
+      refresh_interval = mkDefOpt types.str "30s" ''
+        The time after which the provided names are refreshed.
+      '';
+    };
+  };
+
+  promTypes.consul_sd_config = types.submodule {
+    options = {
+      server = mkDefOpt types.str "localhost:8500" ''
+        Consul server to query.
+      '';
+
+      token = mkOpt types.str "Consul token";
+
+      datacenter = mkOpt types.str "Consul datacenter";
+
+      scheme = mkDefOpt types.str "http" "Consul scheme";
+
+      username = mkOpt types.str "Consul username";
+
+      password = mkOpt types.str "Consul password";
+
+      tls_config = mkOpt promTypes.tls_config ''
+        Configures the Consul request's TLS settings.
+      '';
+
+      services = mkOpt (types.listOf types.str) ''
+        A list of services for which targets are retrieved.
+      '';
+
+      tags = mkOpt (types.listOf types.str) ''
+        An optional list of tags used to filter nodes for a given
+        service. Services must contain all tags in the list.
+      '';
+
+      node_meta = mkOpt (types.attrsOf types.str) ''
+        Node metadata used to filter nodes for a given service.
+      '';
+
+      tag_separator = mkDefOpt types.str "," ''
+        The string by which Consul tags are joined into the tag label.
+      '';
+
+      allow_stale = mkOpt types.bool ''
+        Allow stale Consul results
+        (see <link xlink:href="https://www.consul.io/api/index.html#consistency-modes"/>).
+
+        Will reduce load on Consul.
+      '';
+
+      refresh_interval = mkDefOpt types.str "30s" ''
+        The time after which the provided names are refreshed.
+
+        On large setup it might be a good idea to increase this value
+        because the catalog will change all the time.
+      '';
+    };
+  };
+
+  promTypes.file_sd_config = types.submodule {
+    options = {
+      files = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          Patterns for files from which target groups are extracted. Refer
+          to the Prometheus documentation for permitted filename patterns
+          and formats.
+        '';
+      };
+
+      refresh_interval = mkDefOpt types.str "5m" ''
+        Refresh interval to re-read the files.
+      '';
+    };
+  };
+
+  promTypes.relabel_config = types.submodule {
+    options = {
+      source_labels = mkOpt (types.listOf types.str) ''
+        The source labels select values from existing labels. Their content
+        is concatenated using the configured separator and matched against
+        the configured regular expression.
+      '';
+
+      separator = mkDefOpt types.str ";" ''
+        Separator placed between concatenated source label values.
+      '';
+
+      target_label = mkOpt types.str ''
+        Label to which the resulting value is written in a replace action.
+        It is mandatory for replace actions.
+      '';
+
+      regex = mkDefOpt types.str "(.*)" ''
+        Regular expression against which the extracted value is matched.
+      '';
+
+      modulus = mkOpt types.int ''
+        Modulus to take of the hash of the source label values.
+      '';
+
+      replacement = mkDefOpt types.str "$1" ''
+        Replacement value against which a regex replace is performed if the
+        regular expression matches.
+      '';
+
+      action = mkDefOpt (types.enum ["replace" "keep" "drop"]) "replace" ''
+        Action to perform based on regex matching.
+      '';
+
+    };
+  };
+
+  promTypes.tls_config = types.submodule {
+    options = {
+      ca_file = mkOpt types.str ''
+        CA certificate to validate API server certificate with.
+      '';
+
+      cert_file = mkOpt types.str ''
+        Certificate file for client cert authentication to the server.
+      '';
+
+      key_file = mkOpt types.str ''
+        Key file for client cert authentication to the server.
+      '';
+
+      server_name = mkOpt types.str ''
+        ServerName extension to indicate the name of the server.
+        http://tools.ietf.org/html/rfc4366#section-3.1
+      '';
+
+      insecure_skip_verify = mkOpt types.bool ''
+        Disable validation of the server certificate.
+      '';
+    };
+  };
+
+in {
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "prometheus2" ] [ "services" "prometheus" ])
+  ];
+
+  options.services.prometheus = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable the Prometheus monitoring daemon.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.prometheus;
+      defaultText = "pkgs.prometheus";
+      description = ''
+        The prometheus package that should be used.
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0:9090";
+      description = ''
+        Address to listen on for the web interface, API, and telemetry.
+      '';
+    };
+
+    stateDir = mkOption {
+      type = types.str;
+      default = "prometheus2";
+      description = ''
+        Directory below <literal>/var/lib</literal> to store Prometheus metrics data.
+        This directory will be created automatically using systemd's StateDirectory mechanism.
+      '';
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = ''
+        Extra commandline options when launching Prometheus.
+      '';
+    };
+
+    configText = mkOption {
+      type = types.nullOr types.lines;
+      default = null;
+      description = ''
+        If non-null, this option defines the text that is written to
+        prometheus.yml. If null, the contents of prometheus.yml is generated
+        from the structured config options.
+      '';
+    };
+
+    globalConfig = mkOption {
+      type = promTypes.globalConfig;
+      default = {};
+      description = ''
+        Parameters that are valid in all  configuration contexts. They
+        also serve as defaults for other configuration sections
+      '';
+    };
+
+    rules = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = ''
+        Alerting and/or Recording rules to evaluate at runtime.
+      '';
+    };
+
+    ruleFiles = mkOption {
+      type = types.listOf types.path;
+      default = [];
+      description = ''
+        Any additional rules files to include in this configuration.
+      '';
+    };
+
+    scrapeConfigs = mkOption {
+      type = types.listOf promTypes.scrape_config;
+      default = [];
+      description = ''
+        A list of scrape configurations.
+      '';
+    };
+
+    alertmanagers = mkOption {
+      type = types.listOf types.attrs;
+      example = literalExample ''
+        [ {
+          scheme = "https";
+          path_prefix = "/alertmanager";
+          static_configs = [ {
+            targets = [
+              "prometheus.domain.tld"
+            ];
+          } ];
+        } ]
+      '';
+      default = [];
+      description = ''
+        A list of alertmanagers to send alerts to.
+        See <link xlink:href="https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config">the official documentation</link> for more information.
+      '';
+    };
+
+    alertmanagerNotificationQueueCapacity = mkOption {
+      type = types.int;
+      default = 10000;
+      description = ''
+        The capacity of the queue for pending alert manager notifications.
+      '';
+    };
+
+    alertmanagerTimeout = mkOption {
+      type = types.int;
+      default = 10;
+      description = ''
+        Alert manager HTTP API timeout (in seconds).
+      '';
+    };
+
+    webExternalUrl = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "https://example.com/";
+      description = ''
+        The URL under which Prometheus is externally reachable (for example,
+        if Prometheus is served via a reverse proxy).
+      '';
+    };
+
+    checkConfig = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Check configuration with <literal>promtool
+        check</literal>. The call to <literal>promtool</literal> is
+        subject to sandboxing by Nix. When credentials are stored in
+        external files (<literal>password_file</literal>,
+        <literal>bearer_token_file</literal>, etc), they will not be
+        visible to <literal>promtool</literal> and it will report
+        errors, despite a correct configuration.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.prometheus.gid = config.ids.gids.prometheus;
+    users.users.prometheus = {
+      description = "Prometheus daemon user";
+      uid = config.ids.uids.prometheus;
+      group = "prometheus";
+    };
+    systemd.services.prometheus = {
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/prometheus" +
+          optionalString (length cmdlineArgs != 0) (" \\\n  " +
+            concatStringsSep " \\\n  " cmdlineArgs);
+        User = "prometheus";
+        Restart  = "always";
+        WorkingDirectory = workingDir;
+        StateDirectory = cfg.stateDir;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix
new file mode 100644
index 000000000000..0318acae50f7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -0,0 +1,249 @@
+{ config, pkgs, lib, options, ... }:
+
+let
+  inherit (lib) concatStrings foldl foldl' genAttrs literalExample maintainers
+                mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption
+                optional types;
+
+  cfg = config.services.prometheus.exporters;
+
+  # each attribute in `exporterOpts` is expected to have specified:
+  #   - port        (types.int):   port on which the exporter listens
+  #   - serviceOpts (types.attrs): config that is merged with the
+  #                                default definition of the exporter's
+  #                                systemd service
+  #   - extraOpts   (types.attrs): extra configuration options to
+  #                                configure the exporter with, which
+  #                                are appended to the default options
+  #
+  #  Note that `extraOpts` is optional, but a script for the exporter's
+  #  systemd service must be provided by specifying either
+  #  `serviceOpts.script` or `serviceOpts.serviceConfig.ExecStart`
+
+  exporterOpts = genAttrs [
+    "apcupsd"
+    "bind"
+    "blackbox"
+    "collectd"
+    "dnsmasq"
+    "dovecot"
+    "fritzbox"
+    "json"
+    "keylight"
+    "lnd"
+    "mail"
+    "mikrotik"
+    "minio"
+    "nextcloud"
+    "nginx"
+    "node"
+    "postfix"
+    "postgres"
+    "rspamd"
+    "snmp"
+    "surfboard"
+    "tor"
+    "unifi"
+    "varnish"
+    "wireguard"
+  ] (name:
+    import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options; }
+  );
+
+  mkExporterOpts = ({ name, port }: {
+    enable = mkEnableOption "the prometheus ${name} exporter";
+    port = mkOption {
+      type = types.int;
+      default = port;
+      description = ''
+        Port to listen on.
+      '';
+    };
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = ''
+        Address to listen on.
+      '';
+    };
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = ''
+        Extra commandline options to pass to the ${name} exporter.
+      '';
+    };
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Open port in firewall for incoming connections.
+      '';
+    };
+    firewallFilter = mkOption {
+      type = types.str;
+      default = "-p tcp -m tcp --dport ${toString port}";
+      example = literalExample ''
+        "-i eth0 -p tcp -m tcp --dport ${toString port}"
+      '';
+      description = ''
+        Specify a filter for iptables to use when
+        <option>services.prometheus.exporters.${name}.openFirewall</option>
+        is true. It is used as `ip46tables -I nixos-fw <option>firewallFilter</option> -j nixos-fw-accept`.
+      '';
+    };
+    user = mkOption {
+      type = types.str;
+      default = "${name}-exporter";
+      description = ''
+        User name under which the ${name} exporter shall be run.
+        Has no effect when <option>systemd.services.prometheus-${name}-exporter.serviceConfig.DynamicUser</option> is true.
+      '';
+    };
+    group = mkOption {
+      type = types.str;
+      default = "${name}-exporter";
+      description = ''
+        Group under which the ${name} exporter shall be run.
+        Has no effect when <option>systemd.services.prometheus-${name}-exporter.serviceConfig.DynamicUser</option> is true.
+      '';
+    };
+  });
+
+  mkSubModule = { name, port, extraOpts, imports }: {
+    ${name} = mkOption {
+      type = types.submodule {
+        inherit imports;
+        options = (mkExporterOpts {
+          inherit name port;
+        } // extraOpts);
+      };
+      internal = true;
+      default = {};
+    };
+  };
+
+  mkSubModules = (foldl' (a: b: a//b) {}
+    (mapAttrsToList (name: opts: mkSubModule {
+      inherit name;
+      inherit (opts) port;
+      extraOpts = opts.extraOpts or {};
+      imports = opts.imports or [];
+    }) exporterOpts)
+  );
+
+  mkExporterConf = { name, conf, serviceOpts }:
+    let
+      enableDynamicUser = serviceOpts.serviceConfig.DynamicUser or true;
+    in
+    mkIf conf.enable {
+      warnings = conf.warnings or [];
+      users.users."${name}-exporter" = (mkIf (conf.user == "${name}-exporter" && !enableDynamicUser) {
+        description = "Prometheus ${name} exporter service user";
+        isSystemUser = true;
+        inherit (conf) group;
+      });
+      users.groups = (mkIf (conf.group == "${name}-exporter" && !enableDynamicUser) {
+        "${name}-exporter" = {};
+      });
+      networking.firewall.extraCommands = mkIf conf.openFirewall (concatStrings [
+        "ip46tables -A nixos-fw ${conf.firewallFilter} "
+        "-m comment --comment ${name}-exporter -j nixos-fw-accept"
+      ]);
+      systemd.services."prometheus-${name}-exporter" = mkMerge ([{
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig.Restart = mkDefault "always";
+        serviceConfig.PrivateTmp = mkDefault true;
+        serviceConfig.WorkingDirectory = mkDefault /tmp;
+        serviceConfig.DynamicUser = mkDefault enableDynamicUser;
+      } serviceOpts ] ++ optional (!enableDynamicUser) {
+        serviceConfig.User = conf.user;
+        serviceConfig.Group = conf.group;
+      });
+  };
+in
+{
+
+  imports = (lib.forEach [ "blackboxExporter" "collectdExporter" "fritzboxExporter"
+                   "jsonExporter" "minioExporter" "nginxExporter" "nodeExporter"
+                   "snmpExporter" "unifiExporter" "varnishExporter" ]
+       (opt: lib.mkRemovedOptionModule [ "services" "prometheus" "${opt}" ] ''
+         The prometheus exporters are now configured using `services.prometheus.exporters'.
+         See the 18.03 release notes for more information.
+       '' ))
+
+    ++ (lib.forEach [ "enable" "substitutions" "preset" ]
+       (opt: lib.mkRemovedOptionModule [ "fonts" "fontconfig" "ultimate" "${opt}" ] ''
+         The fonts.fontconfig.ultimate module and configuration is obsolete.
+         The repository has since been archived and activity has ceased.
+         https://github.com/bohoomil/fontconfig-ultimate/issues/171.
+         No action should be needed for font configuration, as the fonts.fontconfig
+         module is already used by default.
+       '' ));
+
+  options.services.prometheus.exporters = mkOption {
+    type = types.submodule {
+      options = (mkSubModules);
+    };
+    description = "Prometheus exporter configuration";
+    default = {};
+    example = literalExample ''
+      {
+        node = {
+          enable = true;
+          enabledCollectors = [ "systemd" ];
+        };
+        varnish.enable = true;
+      }
+    '';
+  };
+
+  config = mkMerge ([{
+    assertions = [ {
+      assertion = cfg.snmp.enable -> (
+        (cfg.snmp.configurationPath == null) != (cfg.snmp.configuration == null)
+      );
+      message = ''
+        Please ensure you have either `services.prometheus.exporters.snmp.configuration'
+          or `services.prometheus.exporters.snmp.configurationPath' set!
+      '';
+    } {
+      assertion = cfg.mikrotik.enable -> (
+        (cfg.mikrotik.configFile == null) != (cfg.mikrotik.configuration == null)
+      );
+      message = ''
+        Please specify either `services.prometheus.exporters.mikrotik.configuration'
+          or `services.prometheus.exporters.mikrotik.configFile'.
+      '';
+    } {
+      assertion = cfg.mail.enable -> (
+        (cfg.mail.configFile == null) != (cfg.mail.configuration == null)
+      );
+      message = ''
+        Please specify either 'services.prometheus.exporters.mail.configuration'
+          or 'services.prometheus.exporters.mail.configFile'.
+      '';
+    } ];
+  }] ++ [(mkIf config.services.minio.enable {
+    services.prometheus.exporters.minio.minioAddress  = mkDefault "http://localhost:9000";
+    services.prometheus.exporters.minio.minioAccessKey = mkDefault config.services.minio.accessKey;
+    services.prometheus.exporters.minio.minioAccessSecret = mkDefault config.services.minio.secretKey;
+  })] ++ [(mkIf config.services.rspamd.enable {
+    services.prometheus.exporters.rspamd.url = mkDefault "http://localhost:11334/stat";
+  })] ++ [(mkIf config.services.nginx.enable {
+    systemd.services.prometheus-nginx-exporter.after = [ "nginx.service" ];
+    systemd.services.prometheus-nginx-exporter.requires = [ "nginx.service" ];
+  })] ++ (mapAttrsToList (name: conf:
+    mkExporterConf {
+      inherit name;
+      inherit (conf) serviceOpts;
+      conf = cfg.${name};
+    }) exporterOpts)
+  );
+
+  meta = {
+    doc = ./exporters.xml;
+    maintainers = [ maintainers.willibutz ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.xml b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.xml
new file mode 100644
index 000000000000..c2d4b05996a4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.xml
@@ -0,0 +1,227 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="module-services-prometheus-exporters">
+ <title>Prometheus exporters</title>
+ <para>
+  Prometheus exporters provide metrics for the
+  <link xlink:href="https://prometheus.io">prometheus monitoring system</link>.
+ </para>
+ <section xml:id="module-services-prometheus-exporters-configuration">
+  <title>Configuration</title>
+
+  <para>
+   One of the most common exporters is the
+   <link xlink:href="https://github.com/prometheus/node_exporter">node
+   exporter</link>, it provides hardware and OS metrics from the host it's
+   running on. The exporter could be configured as follows:
+<programlisting>
+  services.prometheus.exporters.node = {
+    enable = true;
+    enabledCollectors = [
+      "logind"
+      "systemd"
+    ];
+    disabledCollectors = [
+      "textfile"
+    ];
+    openFirewall = true;
+    firewallFilter = "-i br0 -p tcp -m tcp --dport 9100";
+  };
+</programlisting>
+   It should now serve all metrics from the collectors that are explicitly
+   enabled and the ones that are
+   <link xlink:href="https://github.com/prometheus/node_exporter#enabled-by-default">enabled
+   by default</link>, via http under <literal>/metrics</literal>. In this
+   example the firewall should just allow incoming connections to the
+   exporter's port on the bridge interface <literal>br0</literal> (this would
+   have to be configured seperately of course). For more information about
+   configuration see <literal>man configuration.nix</literal> or search through
+   the
+   <link xlink:href="https://nixos.org/nixos/options.html#prometheus.exporters">available
+   options</link>.
+  </para>
+ </section>
+ <section xml:id="module-services-prometheus-exporters-new-exporter">
+  <title>Adding a new exporter</title>
+
+  <para>
+   To add a new exporter, it has to be packaged first (see
+   <literal>nixpkgs/pkgs/servers/monitoring/prometheus/</literal> for
+   examples), then a module can be added. The postfix exporter is used in this
+   example:
+  </para>
+
+  <itemizedlist>
+   <listitem>
+    <para>
+     Some default options for all exporters are provided by
+     <literal>nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix</literal>:
+    </para>
+   </listitem>
+   <listitem override='none'>
+    <itemizedlist>
+     <listitem>
+      <para>
+       <literal>enable</literal>
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       <literal>port</literal>
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       <literal>listenAddress</literal>
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       <literal>extraFlags</literal>
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       <literal>openFirewall</literal>
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       <literal>firewallFilter</literal>
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       <literal>user</literal>
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       <literal>group</literal>
+      </para>
+     </listitem>
+    </itemizedlist>
+   </listitem>
+   <listitem>
+    <para>
+     As there is already a package available, the module can now be added. This
+     is accomplished by adding a new file to the
+     <literal>nixos/modules/services/monitoring/prometheus/exporters/</literal>
+     directory, which will be called postfix.nix and contains all exporter
+     specific options and configuration:
+<programlisting>
+# nixpgs/nixos/modules/services/prometheus/exporters/postfix.nix
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  # for convenience we define cfg here
+  cfg = config.services.prometheus.exporters.postfix;
+in
+{
+  port = 9154; # The postfix exporter listens on this port by default
+
+  # `extraOpts` is an attribute set which contains additional options
+  # (and optional overrides for default options).
+  # Note that this attribute is optional.
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+    logfilePath = mkOption {
+      type = types.path;
+      default = /var/log/postfix_exporter_input.log;
+      example = /var/log/mail.log;
+      description = ''
+        Path where Postfix writes log entries.
+        This file will be truncated by this exporter!
+      '';
+    };
+    showqPath = mkOption {
+      type = types.path;
+      default = /var/spool/postfix/public/showq;
+      example = /var/lib/postfix/queue/public/showq;
+      description = ''
+        Path at which Postfix places its showq socket.
+      '';
+    };
+  };
+
+  # `serviceOpts` is an attribute set which contains configuration
+  # for the exporter's systemd service. One of
+  # `serviceOpts.script` and `serviceOpts.serviceConfig.ExecStart`
+  # has to be specified here. This will be merged with the default
+  # service confiuration.
+  # Note that by default 'DynamicUser' is 'true'.
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
+</programlisting>
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+     This should already be enough for the postfix exporter. Additionally one
+     could now add assertions and conditional default values. This can be done
+     in the 'meta-module' that combines all exporter definitions and generates
+     the submodules:
+     <literal>nixpkgs/nixos/modules/services/prometheus/exporters.nix</literal>
+    </para>
+   </listitem>
+  </itemizedlist>
+ </section>
+ <section xml:id="module-services-prometheus-exporters-update-exporter-module">
+  <title>Updating an exporter module</title>
+   <para>
+     Should an exporter option change at some point, it is possible to add
+     information about the change to the exporter definition similar to
+     <literal>nixpkgs/nixos/modules/rename.nix</literal>:
+<programlisting>
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.nginx;
+in
+{
+  port = 9113;
+  extraOpts = {
+    # additional module options
+    # ...
+  };
+  serviceOpts = {
+    # service configuration
+    # ...
+  };
+  imports = [
+    # 'services.prometheus.exporters.nginx.telemetryEndpoint' -> 'services.prometheus.exporters.nginx.telemetryPath'
+    (mkRenamedOptionModule [ "telemetryEndpoint" ] [ "telemetryPath" ])
+
+    # removed option 'services.prometheus.exporters.nginx.insecure'
+    (mkRemovedOptionModule [ "insecure" ] ''
+      This option was replaced by 'prometheus.exporters.nginx.sslVerify' which defaults to true.
+    '')
+    ({ options.warnings = options.warnings; })
+  ];
+}
+</programlisting>
+    </para>
+  </section>
+</chapter>
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix
new file mode 100644
index 000000000000..57c35a742c5f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.apcupsd;
+in
+{
+  port = 9162;
+  extraOpts = {
+    apcupsdAddress = mkOption {
+      type = types.str;
+      default = ":3551";
+      description = ''
+        Address of the apcupsd Network Information Server (NIS).
+      '';
+    };
+
+    apcupsdNetwork = mkOption {
+      type = types.enum ["tcp" "tcp4" "tcp6"];
+      default = "tcp";
+      description = ''
+        Network of the apcupsd Network Information Server (NIS): one of "tcp", "tcp4", or "tcp6".
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-apcupsd-exporter}/bin/apcupsd_exporter \
+          -telemetry.addr ${cfg.listenAddress}:${toString cfg.port} \
+          -apcupsd.addr ${cfg.apcupsdAddress} \
+          -apcupsd.network ${cfg.apcupsdNetwork} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bind.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bind.nix
new file mode 100644
index 000000000000..972632b5a24a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bind.nix
@@ -0,0 +1,54 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.bind;
+in
+{
+  port = 9119;
+  extraOpts = {
+    bindURI = mkOption {
+      type = types.str;
+      default = "http://localhost:8053/";
+      description = ''
+        HTTP XML API address of an Bind server.
+      '';
+    };
+    bindTimeout = mkOption {
+      type = types.str;
+      default = "10s";
+      description = ''
+        Timeout for trying to get stats from Bind.
+      '';
+    };
+    bindVersion = mkOption {
+      type = types.enum [ "xml.v2" "xml.v3" "auto" ];
+      default = "auto";
+      description = ''
+        BIND statistics version. Can be detected automatically.
+      '';
+    };
+    bindGroups = mkOption {
+      type = types.listOf (types.enum [ "server" "view" "tasks" ]);
+      default = [ "server" "view" ];
+      description = ''
+        List of statistics to collect. Available: [server, view, tasks]
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-bind-exporter}/bin/bind_exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -bind.pid-file /var/run/named/named.pid \
+          -bind.timeout ${toString cfg.bindTimeout} \
+          -bind.stats-url ${cfg.bindURI} \
+          -bind.stats-version ${cfg.bindVersion} \
+          -bind.stats-groups ${concatStringsSep "," cfg.bindGroups} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
new file mode 100644
index 000000000000..fe8d905da3fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
@@ -0,0 +1,70 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  logPrefix = "services.prometheus.exporter.blackbox";
+  cfg = config.services.prometheus.exporters.blackbox;
+
+  # This ensures that we can deal with string paths, path types and
+  # store-path strings with context.
+  coerceConfigFile = file:
+    if (builtins.isPath file) || (lib.isStorePath file) then
+      file
+    else
+      (lib.warn ''
+        ${logPrefix}: configuration file "${file}" is being copied to the nix-store.
+        If you would like to avoid that, please set enableConfigCheck to false.
+      '' /. + file);
+  checkConfigLocation = file:
+    if lib.hasPrefix "/tmp/" file then
+      throw
+      "${logPrefix}: configuration file must not reside within /tmp - it won't be visible to the systemd service."
+    else
+      true;
+  checkConfig = file:
+    pkgs.runCommand "checked-blackbox-exporter.conf" {
+      preferLocalBuild = true;
+      buildInputs = [ pkgs.buildPackages.prometheus-blackbox-exporter ];
+    } ''
+      ln -s ${coerceConfigFile file} $out
+      blackbox_exporter --config.check --config.file $out
+    '';
+in {
+  port = 9115;
+  extraOpts = {
+    configFile = mkOption {
+      type = types.path;
+      description = ''
+        Path to configuration file.
+      '';
+    };
+    enableConfigCheck = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Whether to run a correctness check for the configuration file. This depends
+        on the configuration file residing in the nix-store. Paths passed as string will
+        be copied to the store.
+      '';
+    };
+  };
+
+  serviceOpts = let
+    adjustedConfigFile = if cfg.enableConfigCheck then
+      checkConfig cfg.configFile
+    else
+      checkConfigLocation cfg.configFile;
+  in {
+    serviceConfig = {
+      AmbientCapabilities = [ "CAP_NET_RAW" ]; # for ping probes
+      ExecStart = ''
+        ${pkgs.prometheus-blackbox-exporter}/bin/blackbox_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --config.file ${escapeShellArg adjustedConfigFile} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix
new file mode 100644
index 000000000000..972104630275
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix
@@ -0,0 +1,77 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.collectd;
+in
+{
+  port = 9103;
+  extraOpts = {
+    collectdBinary = {
+      enable = mkEnableOption "collectd binary protocol receiver";
+
+      authFile = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = "File mapping user names to pre-shared keys (passwords).";
+      };
+
+      port = mkOption {
+        type = types.int;
+        default = 25826;
+        description = ''Network address on which to accept collectd binary network packets.'';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = ''
+          Address to listen on for binary network packets.
+          '';
+      };
+
+      securityLevel = mkOption {
+        type = types.enum ["None" "Sign" "Encrypt"];
+        default = "None";
+        description = ''
+          Minimum required security level for accepted packets.
+        '';
+      };
+    };
+
+    logFormat = mkOption {
+      type = types.str;
+      default = "logger:stderr";
+      example = "logger:syslog?appname=bob&local=7 or logger:stdout?json=true";
+      description = ''
+        Set the log target and format.
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum ["debug" "info" "warn" "error" "fatal"];
+      default = "info";
+      description = ''
+        Only log messages with the given severity or above.
+      '';
+    };
+  };
+  serviceOpts = let
+    collectSettingsArgs = if (cfg.collectdBinary.enable) then ''
+      -collectd.listen-address ${cfg.collectdBinary.listenAddress}:${toString cfg.collectdBinary.port} \
+      -collectd.security-level ${cfg.collectdBinary.securityLevel} \
+    '' else "";
+  in {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-collectd-exporter}/bin/collectd_exporter \
+          -log.format ${escapeShellArg cfg.logFormat} \
+          -log.level ${cfg.logLevel} \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          ${collectSettingsArgs} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
new file mode 100644
index 000000000000..68afba21d64a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.dnsmasq;
+in
+{
+  port = 9153;
+  extraOpts = {
+    dnsmasqListenAddress = mkOption {
+      type = types.str;
+      default = "localhost:53";
+      description = ''
+        Address on which dnsmasq listens.
+      '';
+    };
+    leasesPath = mkOption {
+      type = types.path;
+      default = "/var/lib/misc/dnsmasq.leases";
+      example = "/var/lib/dnsmasq/dnsmasq.leases";
+      description = ''
+        Path to the <literal>dnsmasq.leases</literal> file.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-dnsmasq-exporter}/bin/dnsmasq_exporter \
+          --listen ${cfg.listenAddress}:${toString cfg.port} \
+          --dnsmasq ${cfg.dnsmasqListenAddress} \
+          --leases_path ${escapeShellArg cfg.leasesPath} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix
new file mode 100644
index 000000000000..aba3533e4395
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.dovecot;
+in
+{
+  port = 9166;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+    socketPath = mkOption {
+      type = types.path;
+      default = "/var/run/dovecot/stats";
+      example = "/var/run/dovecot2/old-stats";
+      description = ''
+        Path under which the stats socket is placed.
+        The user/group under which the exporter runs,
+        should be able to access the socket in order
+        to scrape the metrics successfully.
+
+        Please keep in mind that the stats module has changed in
+        <link xlink:href="https://wiki2.dovecot.org/Upgrading/2.3">Dovecot 2.3+</link> which
+        is not <link xlink:href="https://github.com/kumina/dovecot_exporter/issues/8">compatible with this exporter</link>.
+
+        The following extra config has to be passed to Dovecot to ensure that recent versions
+        work with this exporter:
+        <programlisting>
+        {
+          <xref linkend="opt-services.prometheus.exporters.dovecot.enable" /> = true;
+          <xref linkend="opt-services.prometheus.exporters.dovecot.socketPath" /> = "/var/run/dovecot2/old-stats";
+          <xref linkend="opt-services.dovecot2.extraConfig" /> = '''
+            mail_plugins = $mail_plugins old_stats
+            service old-stats {
+              unix_listener old-stats {
+                user = dovecot-exporter
+                group = dovecot-exporter
+              }
+            }
+          ''';
+        }
+        </programlisting>
+      '';
+    };
+    scopes = mkOption {
+      type = types.listOf types.str;
+      default = [ "user" ];
+      example = [ "user" "global" ];
+      description = ''
+        Stats scopes to query.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-dovecot-exporter}/bin/dovecot_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --dovecot.socket-path ${escapeShellArg cfg.socketPath} \
+          --dovecot.scopes ${concatStringsSep "," cfg.scopes} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix
new file mode 100644
index 000000000000..9526597b8c96
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.fritzbox;
+in
+{
+  port = 9133;
+  extraOpts = {
+    gatewayAddress = mkOption {
+      type = types.str;
+      default = "fritz.box";
+      description = ''
+        The hostname or IP of the FRITZ!Box.
+      '';
+    };
+
+    gatewayPort = mkOption {
+      type = types.int;
+      default = 49000;
+      description = ''
+        The port of the FRITZ!Box UPnP service.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-fritzbox-exporter}/bin/exporter \
+          -listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -gateway-address ${cfg.gatewayAddress} \
+          -gateway-port ${toString cfg.gatewayPort} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix
new file mode 100644
index 000000000000..bd0026b55f72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix
@@ -0,0 +1,35 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.json;
+in
+{
+  port = 7979;
+  extraOpts = {
+    url = mkOption {
+      type = types.str;
+      description = ''
+        URL to scrape JSON from.
+      '';
+    };
+    configFile = mkOption {
+      type = types.path;
+      description = ''
+        Path to configuration file.
+      '';
+    };
+    listenAddress = {}; # not used
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-json-exporter}/bin/prometheus-json-exporter \
+          --port ${toString cfg.port} \
+          ${cfg.url} ${escapeShellArg cfg.configFile} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix
new file mode 100644
index 000000000000..dfa56343b871
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.keylight;
+in
+{
+  port = 9288;
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-keylight-exporter}/bin/keylight_exporter \
+          -metrics.addr ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix
new file mode 100644
index 000000000000..35f972020574
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.lnd;
+in
+{
+  port = 9092;
+  extraOpts = {
+    lndHost = mkOption {
+      type = types.str;
+      default = "localhost:10009";
+      description = ''
+        lnd instance gRPC address:port.
+      '';
+    };
+
+    lndTlsPath = mkOption {
+      type = types.path;
+      description = ''
+        Path to lnd TLS certificate.
+      '';
+    };
+
+    lndMacaroonDir = mkOption {
+      type = types.path;
+      description = ''
+        Path to lnd macaroons.
+      '';
+    };
+  };
+  serviceOpts.serviceConfig = {
+    ExecStart = ''
+      ${pkgs.prometheus-lnd-exporter}/bin/lndmon \
+        --prometheus.listenaddr=${cfg.listenAddress}:${toString cfg.port} \
+        --prometheus.logdir=/var/log/prometheus-lnd-exporter \
+        --lnd.host=${cfg.lndHost} \
+        --lnd.tlspath=${cfg.lndTlsPath} \
+        --lnd.macaroondir=${cfg.lndMacaroonDir} \
+        ${concatStringsSep " \\\n  " cfg.extraFlags}
+    '';
+    LogsDirectory = "prometheus-lnd-exporter";
+    ReadOnlyPaths = [ cfg.lndTlsPath cfg.lndMacaroonDir ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mail.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mail.nix
new file mode 100644
index 000000000000..18c5c4dd1623
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mail.nix
@@ -0,0 +1,158 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.mail;
+
+  configurationFile = pkgs.writeText "prometheus-mail-exporter.conf" (builtins.toJSON (
+    # removes the _module attribute, null values and converts attrNames to lowercase
+    mapAttrs' (name: value:
+      if name == "servers"
+      then nameValuePair (toLower name)
+        ((map (srv: (mapAttrs' (n: v: nameValuePair (toLower n) v)
+          (filterAttrs (n: v: !(n == "_module" || v == null)) srv)
+        ))) value)
+      else nameValuePair (toLower name) value
+    ) (filterAttrs (n: _: !(n == "_module")) cfg.configuration)
+  ));
+
+  serverOptions.options = {
+    name = mkOption {
+      type = types.str;
+      description = ''
+        Value for label 'configname' which will be added to all metrics.
+      '';
+    };
+    server = mkOption {
+      type = types.str;
+      description = ''
+        Hostname of the server that should be probed.
+      '';
+    };
+    port = mkOption {
+      type = types.int;
+      example = 587;
+      description = ''
+        Port to use for SMTP.
+      '';
+    };
+    from = mkOption {
+      type = types.str;
+      example = "exporteruser@domain.tld";
+      description = ''
+        Content of 'From' Header for probing mails.
+      '';
+    };
+    to = mkOption {
+      type = types.str;
+      example = "exporteruser@domain.tld";
+      description = ''
+        Content of 'To' Header for probing mails.
+      '';
+    };
+    detectionDir = mkOption {
+      type = types.path;
+      example = "/var/spool/mail/exporteruser/new";
+      description = ''
+        Directory in which new mails for the exporter user are placed.
+        Note that this needs to exist when the exporter starts.
+      '';
+    };
+    login = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "exporteruser@domain.tld";
+      description = ''
+        Username to use for SMTP authentication.
+      '';
+    };
+    passphrase = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = ''
+        Password to use for SMTP authentication.
+      '';
+    };
+  };
+
+  exporterOptions.options = {
+    monitoringInterval = mkOption {
+      type = types.str;
+      example = "10s";
+      description = ''
+        Time interval between two probe attempts.
+      '';
+    };
+    mailCheckTimeout = mkOption {
+      type = types.str;
+      description = ''
+        Timeout until mails are considered "didn't make it".
+      '';
+    };
+    disableFileDeletion = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Disables the exporter's function to delete probing mails.
+      '';
+    };
+    servers = mkOption {
+      type = types.listOf (types.submodule serverOptions);
+      default = [];
+      example = literalExample ''
+        [ {
+          name = "testserver";
+          server = "smtp.domain.tld";
+          port = 587;
+          from = "exporteruser@domain.tld";
+          to = "exporteruser@domain.tld";
+          detectionDir = "/path/to/Maildir/new";
+        } ]
+      '';
+      description = ''
+        List of servers that should be probed.
+      '';
+    };
+  };
+in
+{
+  port = 9225;
+  extraOpts = {
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = ''
+        Specify the mailexporter configuration file to use.
+      '';
+    };
+    configuration = mkOption {
+      type = types.nullOr (types.submodule exporterOptions);
+      default = null;
+      description = ''
+        Specify the mailexporter configuration file to use.
+      '';
+    };
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-mail-exporter}/bin/mailexporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --config.file ${
+            if cfg.configuration != null then configurationFile else (escapeShellArg cfg.configFile)
+          } \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix
new file mode 100644
index 000000000000..62c2cc568476
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.mikrotik;
+in
+{
+  port = 9436;
+  extraOpts = {
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = ''
+        Path to a mikrotik exporter configuration file. Mutually exclusive with
+        <option>configuration</option> option.
+      '';
+      example = literalExample "./mikrotik.yml";
+    };
+
+    configuration = mkOption {
+      type = types.nullOr types.attrs;
+      default = null;
+      description = ''
+        Mikrotik exporter configuration as nix attribute set. Mutually exclusive with
+        <option>configFile</option> option.
+
+        See <link xlink:href="https://github.com/nshttpd/mikrotik-exporter/blob/master/README.md"/>
+        for the description of the configuration file format.
+      '';
+      example = literalExample ''
+        {
+          devices = [
+            {
+              name = "my_router";
+              address = "10.10.0.1";
+              user = "prometheus";
+              password = "changeme";
+            }
+          ];
+          features = {
+            bgp = true;
+            dhcp = true;
+            routes = true;
+            optics = true;
+          };
+        }
+      '';
+    };
+  };
+  serviceOpts = let
+    configFile = if cfg.configFile != null
+                 then cfg.configFile
+                 else "${pkgs.writeText "mikrotik-exporter.yml" (builtins.toJSON cfg.configuration)}";
+    in {
+    serviceConfig = {
+      # -port is misleading name, it actually accepts address too
+      ExecStart = ''
+        ${pkgs.prometheus-mikrotik-exporter}/bin/mikrotik-exporter \
+          -config-file=${escapeShellArg configFile} \
+          -port=${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix
new file mode 100644
index 000000000000..d6dd62f871bd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.minio;
+in
+{
+  port = 9290;
+  extraOpts = {
+    minioAddress = mkOption {
+      type = types.str;
+      example = "https://10.0.0.1:9000";
+      description = ''
+        The URL of the minio server.
+        Use HTTPS if Minio accepts secure connections only.
+        By default this connects to the local minio server if enabled.
+      '';
+    };
+
+    minioAccessKey = mkOption {
+      type = types.str;
+      example = "yourMinioAccessKey";
+      description = ''
+        The value of the Minio access key.
+        It is required in order to connect to the server.
+        By default this uses the one from the local minio server if enabled
+        and <literal>config.services.minio.accessKey</literal>.
+      '';
+    };
+
+    minioAccessSecret = mkOption {
+      type = types.str;
+      description = ''
+        The value of the Minio access secret.
+        It is required in order to connect to the server.
+        By default this uses the one from the local minio server if enabled
+        and <literal>config.services.minio.secretKey</literal>.
+      '';
+    };
+
+    minioBucketStats = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Collect statistics about the buckets and files in buckets.
+        It requires more computation, use it carefully in case of large buckets..
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-minio-exporter}/bin/minio-exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -minio.server ${cfg.minioAddress} \
+          -minio.access-key ${escapeShellArg cfg.minioAccessKey} \
+          -minio.access-secret ${escapeShellArg cfg.minioAccessSecret} \
+          ${optionalString cfg.minioBucketStats "-minio.bucket-stats"} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
new file mode 100644
index 000000000000..aee6bd5e66ce
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.nextcloud;
+in
+{
+  port = 9205;
+  extraOpts = {
+    url = mkOption {
+      type = types.str;
+      example = "https://domain.tld";
+      description = ''
+        URL to the Nextcloud serverinfo page.
+        Adding the path to the serverinfo API is optional, it defaults
+        to <literal>/ocs/v2.php/apps/serverinfo/api/v1/info</literal>.
+      '';
+    };
+    username = mkOption {
+      type = types.str;
+      default = "nextcloud-exporter";
+      description = ''
+        Username for connecting to Nextcloud.
+        Note that this account needs to have admin privileges in Nextcloud.
+      '';
+    };
+    passwordFile = mkOption {
+      type = types.path;
+      example = "/path/to/password-file";
+      description = ''
+        File containing the password for connecting to Nextcloud.
+        Make sure that this file is readable by the exporter user.
+      '';
+    };
+    timeout = mkOption {
+      type = types.str;
+      default = "5s";
+      description = ''
+        Timeout for getting server info document.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-nextcloud-exporter}/bin/nextcloud-exporter \
+          -a ${cfg.listenAddress}:${toString cfg.port} \
+          -u ${cfg.username} \
+          -t ${cfg.timeout} \
+          -l ${cfg.url} \
+          -p ${escapeShellArg "@${cfg.passwordFile}"} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
new file mode 100644
index 000000000000..56cddfc55b71
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
@@ -0,0 +1,65 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.nginx;
+in
+{
+  port = 9113;
+  extraOpts = {
+    scrapeUri = mkOption {
+      type = types.str;
+      default = "http://localhost/nginx_status";
+      description = ''
+        Address to access the nginx status page.
+        Can be enabled with services.nginx.statusPage = true.
+      '';
+    };
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+    sslVerify = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Whether to perform certificate verification for https.
+      '';
+    };
+    constLabels = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [
+        "label1=value1"
+        "label2=value2"
+      ];
+      description = ''
+        A list of constant labels that will be used in every metric.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-nginx-exporter}/bin/nginx-prometheus-exporter \
+          --nginx.scrape-uri '${cfg.scrapeUri}' \
+          --nginx.ssl-verify ${toString cfg.sslVerify} \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --prometheus.const-labels ${concatStringsSep "," cfg.constLabels} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+  imports = [
+    (mkRenamedOptionModule [ "telemetryEndpoint" ] [ "telemetryPath" ])
+    (mkRemovedOptionModule [ "insecure" ] ''
+      This option was replaced by 'prometheus.exporters.nginx.sslVerify'.
+    '')
+    ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix
new file mode 100644
index 000000000000..adc2abe0b91c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix
@@ -0,0 +1,40 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.node;
+in
+{
+  port = 9100;
+  extraOpts = {
+    enabledCollectors = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = ''[ "systemd" ]'';
+      description = ''
+        Collectors to enable. The collectors listed here are enabled in addition to the default ones.
+      '';
+    };
+    disabledCollectors = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = ''[ "timex" ]'';
+      description = ''
+        Collectors to disable which are enabled by default.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      RuntimeDirectory = "prometheus-node-exporter";
+      ExecStart = ''
+        ${pkgs.prometheus-node-exporter}/bin/node_exporter \
+          ${concatMapStringsSep " " (x: "--collector." + x) cfg.enabledCollectors} \
+          ${concatMapStringsSep " " (x: "--no-collector." + x) cfg.disabledCollectors} \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} ${concatStringsSep " " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
new file mode 100644
index 000000000000..3b6ef1631f89
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.postfix;
+in
+{
+  port = 9154;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+    logfilePath = mkOption {
+      type = types.path;
+      default = "/var/log/postfix_exporter_input.log";
+      example = "/var/log/mail.log";
+      description = ''
+        Path where Postfix writes log entries.
+        This file will be truncated by this exporter!
+      '';
+    };
+    showqPath = mkOption {
+      type = types.path;
+      default = "/var/spool/postfix/public/showq";
+      example = "/var/lib/postfix/queue/public/showq";
+      description = ''
+        Path where Postfix places it's showq socket.
+      '';
+    };
+    systemd = {
+      enable = mkEnableOption ''
+        reading metrics from the systemd-journal instead of from a logfile
+      '';
+      unit = mkOption {
+        type = types.str;
+        default = "postfix.service";
+        description = ''
+          Name of the postfix systemd unit.
+        '';
+      };
+      slice = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Name of the postfix systemd slice.
+          This overrides the <option>systemd.unit</option>.
+        '';
+      };
+      journalPath = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = ''
+          Path to the systemd journal.
+        '';
+      };
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --postfix.showq_path ${escapeShellArg cfg.showqPath} \
+          ${concatStringsSep " \\\n  " (cfg.extraFlags
+          ++ optional cfg.systemd.enable "--systemd.enable"
+          ++ optional cfg.systemd.enable (if cfg.systemd.slice != null
+                                          then "--systemd.slice ${cfg.systemd.slice}"
+                                          else "--systemd.unit ${cfg.systemd.unit}")
+          ++ optional (cfg.systemd.enable && (cfg.systemd.journalPath != null))
+                       "--systemd.journal_path ${escapeShellArg cfg.systemd.journalPath}"
+          ++ optional (!cfg.systemd.enable) "--postfix.logfile_path ${escapeShellArg cfg.logfilePath}")}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix
new file mode 100644
index 000000000000..1ece73a1159a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.postgres;
+in
+{
+  port = 9187;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+    dataSourceName = mkOption {
+      type = types.str;
+      default = "user=postgres database=postgres host=/run/postgresql sslmode=disable";
+      example = "postgresql://username:password@localhost:5432/postgres?sslmode=disable";
+      description = ''
+        Accepts PostgreSQL URI form and key=value form arguments.
+      '';
+    };
+    runAsLocalSuperUser = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to run the exporter as the local 'postgres' super user.
+      '';
+    };
+  };
+  serviceOpts = {
+    environment.DATA_SOURCE_NAME = cfg.dataSourceName;
+    serviceConfig = {
+      DynamicUser = false;
+      User = mkIf cfg.runAsLocalSuperUser (mkForce "postgres");
+      ExecStart = ''
+        ${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix
new file mode 100644
index 000000000000..1f02ae207249
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix
@@ -0,0 +1,92 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.rspamd;
+
+  prettyJSON = conf:
+    pkgs.runCommand "rspamd-exporter-config.yml" { } ''
+      echo '${builtins.toJSON conf}' | ${pkgs.buildPackages.jq}/bin/jq '.' > $out
+    '';
+
+  generateConfig = extraLabels: (map (path: {
+    name = "rspamd_${replaceStrings [ "." " " ] [ "_" "_" ] path}";
+    path = "$.${path}";
+    labels = extraLabels;
+  }) [
+    "actions.'add header'"
+    "actions.'no action'"
+    "actions.'rewrite subject'"
+    "actions.'soft reject'"
+    "actions.greylist"
+    "actions.reject"
+    "bytes_allocated"
+    "chunks_allocated"
+    "chunks_freed"
+    "chunks_oversized"
+    "connections"
+    "control_connections"
+    "ham_count"
+    "learned"
+    "pools_allocated"
+    "pools_freed"
+    "read_only"
+    "scanned"
+    "shared_chunks_allocated"
+    "spam_count"
+    "total_learns"
+  ]) ++ [{
+    name = "rspamd_statfiles";
+    type = "object";
+    path = "$.statfiles[*]";
+    labels = recursiveUpdate {
+      symbol = "$.symbol";
+      type = "$.type";
+    } extraLabels;
+    values = {
+      revision = "$.revision";
+      size = "$.size";
+      total = "$.total";
+      used = "$.used";
+      languages = "$.languages";
+      users = "$.users";
+    };
+  }];
+in
+{
+  port = 7980;
+  extraOpts = {
+    listenAddress = {}; # not used
+
+    url = mkOption {
+      type = types.str;
+      description = ''
+        URL to the rspamd metrics endpoint.
+        Defaults to http://localhost:11334/stat when
+        <option>services.rspamd.enable</option> is true.
+      '';
+    };
+
+    extraLabels = mkOption {
+      type = types.attrsOf types.str;
+      default = {
+        host = config.networking.hostName;
+      };
+      defaultText = "{ host = config.networking.hostName; }";
+      example = literalExample ''
+        {
+          host = config.networking.hostName;
+          custom_label = "some_value";
+        }
+      '';
+      description = "Set of labels added to each metric.";
+    };
+  };
+  serviceOpts.serviceConfig.ExecStart = ''
+    ${pkgs.prometheus-json-exporter}/bin/prometheus-json-exporter \
+      --port ${toString cfg.port} \
+      ${cfg.url} ${prettyJSON (generateConfig cfg.extraLabels)} \
+      ${concatStringsSep " \\\n  " cfg.extraFlags}
+  '';
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
new file mode 100644
index 000000000000..01276366e97b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
@@ -0,0 +1,70 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.snmp;
+in
+{
+  port = 9116;
+  extraOpts = {
+    configurationPath = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = ''
+        Path to a snmp exporter configuration file. Mutually exclusive with 'configuration' option.
+      '';
+      example = "./snmp.yml";
+    };
+
+    configuration = mkOption {
+      type = types.nullOr types.attrs;
+      default = null;
+      description = ''
+        Snmp exporter configuration as nix attribute set. Mutually exclusive with 'configurationPath' option.
+      '';
+      example = ''
+        {
+          "default" = {
+            "version" = 2;
+            "auth" = {
+              "community" = "public";
+            };
+          };
+        };
+      '';
+    };
+
+    logFormat = mkOption {
+      type = types.enum ["logfmt" "json"];
+      default = "logfmt";
+      description = ''
+        Output format of log messages.
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum ["debug" "info" "warn" "error"];
+      default = "info";
+      description = ''
+        Only log messages with the given severity or above.
+      '';
+    };
+  };
+  serviceOpts = let
+    configFile = if cfg.configurationPath != null
+                 then cfg.configurationPath
+                 else "${pkgs.writeText "snmp-exporter-conf.yml" (builtins.toJSON cfg.configuration)}";
+    in {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-snmp-exporter}/bin/snmp_exporter \
+          --config.file=${escapeShellArg configFile} \
+          --log.format=${escapeShellArg cfg.logFormat} \
+          --log.level=${cfg.logLevel} \
+          --web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix
new file mode 100644
index 000000000000..81c5c70ed93f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.surfboard;
+in
+{
+  port = 9239;
+  extraOpts = {
+    modemAddress = mkOption {
+      type = types.str;
+      default = "192.168.100.1";
+      description = ''
+        The hostname or IP of the cable modem.
+      '';
+    };
+  };
+  serviceOpts = {
+    description = "Prometheus exporter for surfboard cable modem";
+    unitConfig.Documentation = "https://github.com/ipstatic/surfboard_exporter";
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-surfboard-exporter}/bin/surfboard_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --modem-address ${cfg.modemAddress} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix
new file mode 100644
index 000000000000..36c473677efa
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix
@@ -0,0 +1,44 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.tor;
+in
+{
+  port = 9130;
+  extraOpts = {
+    torControlAddress = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = ''
+        Tor control IP address or hostname.
+      '';
+    };
+
+    torControlPort = mkOption {
+      type = types.int;
+      default = 9051;
+      description = ''
+        Tor control port.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-tor-exporter}/bin/prometheus-tor-exporter \
+          -b ${cfg.listenAddress} \
+          -p ${toString cfg.port} \
+          -a ${cfg.torControlAddress} \
+          -c ${toString cfg.torControlPort} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+
+    # CPython requires a process to either have $HOME defined or run as a UID
+    # defined in /etc/passwd. The latter is false with DynamicUser, so define a
+    # dummy $HOME. https://bugs.python.org/issue10496
+    environment = { HOME = "/var/empty"; };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix
new file mode 100644
index 000000000000..8d0e8764001c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.unifi;
+in
+{
+  port = 9130;
+  extraOpts = {
+    unifiAddress = mkOption {
+      type = types.str;
+      example = "https://10.0.0.1:8443";
+      description = ''
+        URL of the UniFi Controller API.
+      '';
+    };
+
+    unifiInsecure = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        If enabled skip the verification of the TLS certificate of the UniFi Controller API.
+        Use with caution.
+      '';
+    };
+
+    unifiUsername = mkOption {
+      type = types.str;
+      example = "ReadOnlyUser";
+      description = ''
+        username for authentication against UniFi Controller API.
+      '';
+    };
+
+    unifiPassword = mkOption {
+      type = types.str;
+      description = ''
+        Password for authentication against UniFi Controller API.
+      '';
+    };
+
+    unifiTimeout = mkOption {
+      type = types.str;
+      default = "5s";
+      example = "2m";
+      description = ''
+        Timeout including unit for UniFi Controller API requests.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-unifi-exporter}/bin/unifi_exporter \
+          -telemetry.addr ${cfg.listenAddress}:${toString cfg.port} \
+          -unifi.addr ${cfg.unifiAddress} \
+          -unifi.username ${escapeShellArg cfg.unifiUsername} \
+          -unifi.password ${escapeShellArg cfg.unifiPassword} \
+          -unifi.timeout ${cfg.unifiTimeout} \
+          ${optionalString cfg.unifiInsecure "-unifi.insecure" } \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
new file mode 100644
index 000000000000..5b5a6e18fcd6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
@@ -0,0 +1,88 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.varnish;
+in
+{
+  port = 9131;
+  extraOpts = {
+    noExit = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Do not exit server on Varnish scrape errors.
+      '';
+    };
+    withGoMetrics = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Export go runtime and http handler metrics.
+      '';
+    };
+    verbose = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable verbose logging.
+      '';
+    };
+    raw = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable raw stdout logging without timestamps.
+      '';
+    };
+    varnishStatPath = mkOption {
+      type = types.str;
+      default = "varnishstat";
+      description = ''
+        Path to varnishstat.
+      '';
+    };
+    instance = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = ''
+        varnishstat -n value.
+      '';
+    };
+    healthPath = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = ''
+        Path under which to expose healthcheck. Disabled unless configured.
+      '';
+    };
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+  };
+  serviceOpts = {
+    path = [ pkgs.varnish ];
+    serviceConfig = {
+      RestartSec = mkDefault 1;
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-varnish-exporter}/bin/prometheus_varnish_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --varnishstat-path ${escapeShellArg cfg.varnishStatPath} \
+          ${concatStringsSep " \\\n  " (cfg.extraFlags
+            ++ optional (cfg.healthPath != null) "--web.health-path ${cfg.healthPath}"
+            ++ optional (cfg.instance != null) "-n ${escapeShellArg cfg.instance}"
+            ++ optional cfg.noExit "--no-exit"
+            ++ optional cfg.withGoMetrics "--with-go-metrics"
+            ++ optional cfg.verbose "--verbose"
+            ++ optional cfg.raw "--raw")}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix
new file mode 100644
index 000000000000..04421fc2d25a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.wireguard;
+in {
+  port = 9586;
+  imports = [
+    (mkRenamedOptionModule [ "addr" ] [ "listenAddress" ])
+    ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+  extraOpts = {
+    verbose = mkEnableOption "Verbose logging mode for prometheus-wireguard-exporter";
+
+    wireguardConfig = mkOption {
+      type = with types; nullOr (either path str);
+      default = null;
+
+      description = ''
+        Path to the Wireguard Config to
+        <link xlink:href="https://github.com/MindFlavor/prometheus_wireguard_exporter/tree/2.0.0#usage">add the peer's name to the stats of a peer</link>.
+
+        Please note that <literal>networking.wg-quick</literal> is required for this feature
+        as <literal>networking.wireguard</literal> uses
+        <citerefentry><refentrytitle>wg</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+        to set the peers up.
+      '';
+    };
+
+    singleSubnetPerField = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        By default, all allowed IPs and subnets are comma-separated in the
+        <literal>allowed_ips</literal> field. With this option enabled,
+        a single IP and subnet will be listed in fields like <literal>allowed_ip_0</literal>,
+        <literal>allowed_ip_1</literal> and so on.
+      '';
+    };
+
+    withRemoteIp = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether or not the remote IP of a WireGuard peer should be exposed via prometheus.
+      '';
+    };
+  };
+  serviceOpts = {
+    path = [ pkgs.wireguard-tools ];
+
+    serviceConfig = {
+      AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+      ExecStart = ''
+        ${pkgs.prometheus-wireguard-exporter}/bin/prometheus_wireguard_exporter \
+          -p ${toString cfg.port} \
+          -l ${cfg.listenAddress} \
+          ${optionalString cfg.verbose "-v"} \
+          ${optionalString cfg.singleSubnetPerField "-s"} \
+          ${optionalString cfg.withRemoteIp "-r"} \
+          ${optionalString (cfg.wireguardConfig != null) "-n ${escapeShellArg cfg.wireguardConfig}"}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix
new file mode 100644
index 000000000000..f8fcc3eb97ef
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix
@@ -0,0 +1,166 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.pushgateway;
+
+  cmdlineArgs =
+       opt "web.listen-address" cfg.web.listen-address
+    ++ opt "web.telemetry-path" cfg.web.telemetry-path
+    ++ opt "web.external-url" cfg.web.external-url
+    ++ opt "web.route-prefix" cfg.web.route-prefix
+    ++ optional cfg.persistMetrics ''--persistence.file="/var/lib/${cfg.stateDir}/metrics"''
+    ++ opt "persistence.interval" cfg.persistence.interval
+    ++ opt "log.level" cfg.log.level
+    ++ opt "log.format" cfg.log.format
+    ++ cfg.extraFlags;
+
+  opt = k : v : optional (v != null) ''--${k}="${v}"'';
+
+in {
+  options = {
+    services.prometheus.pushgateway = {
+      enable = mkEnableOption "Prometheus Pushgateway";
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.prometheus-pushgateway;
+        defaultText = "pkgs.prometheus-pushgateway";
+        description = ''
+          Package that should be used for the prometheus pushgateway.
+        '';
+      };
+
+      web.listen-address = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Address to listen on for the web interface, API and telemetry.
+
+          <literal>null</literal> will default to <literal>:9091</literal>.
+        '';
+      };
+
+      web.telemetry-path = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Path under which to expose metrics.
+
+          <literal>null</literal> will default to <literal>/metrics</literal>.
+        '';
+      };
+
+      web.external-url = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          The URL under which Pushgateway is externally reachable.
+        '';
+      };
+
+      web.route-prefix = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Prefix for the internal routes of web endpoints.
+
+          Defaults to the path of
+          <option>services.prometheus.pushgateway.web.external-url</option>.
+        '';
+      };
+
+      persistence.interval = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "10m";
+        description = ''
+          The minimum interval at which to write out the persistence file.
+
+          <literal>null</literal> will default to <literal>5m</literal>.
+        '';
+      };
+
+      log.level = mkOption {
+        type = types.nullOr (types.enum ["debug" "info" "warn" "error" "fatal"]);
+        default = null;
+        description = ''
+          Only log messages with the given severity or above.
+
+          <literal>null</literal> will default to <literal>info</literal>.
+        '';
+      };
+
+      log.format = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "logger:syslog?appname=bob&local=7";
+        description = ''
+          Set the log target and format.
+
+          <literal>null</literal> will default to <literal>logger:stderr</literal>.
+        '';
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Extra commandline options when launching the Pushgateway.
+        '';
+      };
+
+      persistMetrics = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to persist metrics to a file.
+
+          When enabled metrics will be saved to a file called
+          <literal>metrics</literal> in the directory
+          <literal>/var/lib/pushgateway</literal>. The directory below
+          <literal>/var/lib</literal> can be set using
+          <option>services.prometheus.pushgateway.stateDir</option>.
+        '';
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "pushgateway";
+        description = ''
+          Directory below <literal>/var/lib</literal> to store metrics.
+
+          This directory will be created automatically using systemd's
+          StateDirectory mechanism when
+          <option>services.prometheus.pushgateway.persistMetrics</option>
+          is enabled.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !hasPrefix "/" cfg.stateDir;
+        message =
+          "The option services.prometheus.pushgateway.stateDir" +
+          " shouldn't be an absolute directory." +
+          " It should be a directory relative to /var/lib.";
+      }
+    ];
+    systemd.services.pushgateway = {
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" ];
+      serviceConfig = {
+        Restart  = "always";
+        DynamicUser = true;
+        ExecStart = "${cfg.package}/bin/pushgateway" +
+          optionalString (length cmdlineArgs != 0) (" \\\n  " +
+            concatStringsSep " \\\n  " cmdlineArgs);
+        StateDirectory = if cfg.persistMetrics then cfg.stateDir else null;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix
new file mode 100644
index 000000000000..44b15cb2034c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.xmpp-alerts;
+
+  configFile = pkgs.writeText "prometheus-xmpp-alerts.yml" (builtins.toJSON cfg.configuration);
+
+in
+
+{
+  options.services.prometheus.xmpp-alerts = {
+
+    enable = mkEnableOption "XMPP Web hook service for Alertmanager";
+
+    configuration = mkOption {
+      type = types.attrs;
+      description = "Configuration as attribute set which will be converted to YAML";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.prometheus-xmpp-alerts = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.prometheus-xmpp-alerts}/bin/prometheus-xmpp-alerts --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHome = true;
+        ProtectSystem = "strict";
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        NoNewPrivileges = true;
+        SystemCallArchitectures = "native";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        SystemCallFilter = [ "@system-service" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix b/nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix
new file mode 100644
index 000000000000..16eb83008509
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix
@@ -0,0 +1,81 @@
+{ config, pkgs, lib, ... }:
+
+with pkgs;
+with lib;
+
+let
+
+  cfg = config.services.riemann-dash;
+
+  conf = writeText "config.rb" ''
+    riemann_base = "${cfg.dataDir}"
+    config.store[:ws_config] = "#{riemann_base}/config/config.json"
+    ${cfg.config}
+  '';
+
+  launcher = writeScriptBin "riemann-dash" ''
+    #!/bin/sh
+    exec ${pkgs.riemann-dash}/bin/riemann-dash ${conf}
+  '';
+
+in {
+
+  options = {
+
+    services.riemann-dash = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable the riemann-dash dashboard daemon.
+        '';
+      };
+      config = mkOption {
+        type = types.lines;
+        description = ''
+          Contents added to the end of the riemann-dash configuration file.
+        '';
+      };
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/riemann-dash";
+        description = ''
+          Location of the riemann-base dir. The dashboard configuration file is
+          is stored to this directory. The directory is created automatically on
+          service start, and owner is set to the riemanndash user.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.groups.riemanndash.gid = config.ids.gids.riemanndash;
+
+    users.users.riemanndash = {
+      description = "riemann-dash daemon user";
+      uid = config.ids.uids.riemanndash;
+      group = "riemanndash";
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - riemanndash riemanndash - -"
+    ];
+
+    systemd.services.riemann-dash = {
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "riemann.service" ];
+      after = [ "riemann.service" ];
+      preStart = ''
+        mkdir -p '${cfg.dataDir}/config'
+      '';
+      serviceConfig = {
+        User = "riemanndash";
+        ExecStart = "${launcher}/bin/riemann-dash";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix b/nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix
new file mode 100644
index 000000000000..86a11694e7b4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix
@@ -0,0 +1,70 @@
+{ config, pkgs, lib, ... }:
+
+with pkgs;
+with lib;
+
+let
+
+  cfg = config.services.riemann-tools;
+
+  riemannHost = "${cfg.riemannHost}";
+
+  healthLauncher = writeScriptBin "riemann-health" ''
+    #!/bin/sh
+    exec ${pkgs.riemann-tools}/bin/riemann-health ${builtins.concatStringsSep " " cfg.extraArgs} --host ${riemannHost}
+  '';
+
+
+in {
+
+  options = {
+
+    services.riemann-tools = {
+      enableHealth = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable the riemann-health daemon.
+        '';
+      };
+      riemannHost = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = ''
+          Address of the host riemann node. Defaults to localhost.
+        '';
+      };
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          A list of commandline-switches forwarded to a riemann-tool.
+          See for example `riemann-health --help` for available options.
+        '';
+        example = ["-p 5555" "--timeout=30" "--attribute=myattribute=42"];
+      };
+    };
+  };
+
+  config = mkIf cfg.enableHealth {
+
+    users.groups.riemanntools.gid = config.ids.gids.riemanntools;
+
+    users.users.riemanntools = {
+      description = "riemann-tools daemon user";
+      uid = config.ids.uids.riemanntools;
+      group = "riemanntools";
+    };
+
+    systemd.services.riemann-health = {
+      wantedBy = [ "multi-user.target" ];
+      path = [ procps ];
+      serviceConfig = {
+        User = "riemanntools";
+        ExecStart = "${healthLauncher}/bin/riemann-health";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/riemann.nix b/nixpkgs/nixos/modules/services/monitoring/riemann.nix
new file mode 100644
index 000000000000..13d2b1cc0602
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/riemann.nix
@@ -0,0 +1,105 @@
+{ config, pkgs, lib, ... }:
+
+with pkgs;
+with lib;
+
+let
+
+  cfg = config.services.riemann;
+
+  classpath = concatStringsSep ":" (
+    cfg.extraClasspathEntries ++ [ "${riemann}/share/java/riemann.jar" ]
+  );
+
+  riemannConfig = concatStringsSep "\n" (
+    [cfg.config] ++ (map (f: ''(load-file "${f}")'') cfg.configFiles)
+  );
+
+  launcher = writeScriptBin "riemann" ''
+    #!/bin/sh
+    exec ${jdk}/bin/java ${concatStringsSep " " cfg.extraJavaOpts} \
+      -cp ${classpath} \
+      riemann.bin ${cfg.configFile}
+  '';
+
+in {
+
+  options = {
+
+    services.riemann = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable the Riemann network monitoring daemon.
+        '';
+      };
+      config = mkOption {
+        type = types.lines;
+        description = ''
+          Contents of the Riemann configuration file. For more complicated
+          config you should use configFile.
+        '';
+      };
+      configFiles = mkOption {
+        type = with types; listOf path;
+        default = [];
+        description = ''
+          Extra files containing Riemann configuration. These files will be
+          loaded at runtime by Riemann (with Clojure's
+          <literal>load-file</literal> function) at the end of the
+          configuration if you use the config option, this is ignored if you
+          use configFile.
+        '';
+      };
+      configFile = mkOption {
+        type = types.str;
+        description = ''
+          A Riemann config file. Any files in the same directory as this file
+          will be added to the classpath by Riemann.
+        '';
+      };
+      extraClasspathEntries = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = ''
+          Extra entries added to the Java classpath when running Riemann.
+        '';
+      };
+      extraJavaOpts = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = ''
+          Extra Java options used when launching Riemann.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users.groups.riemann.gid = config.ids.gids.riemann;
+
+    users.users.riemann = {
+      description = "riemann daemon user";
+      uid = config.ids.uids.riemann;
+      group = "riemann";
+    };
+
+    services.riemann.configFile = mkDefault (
+      writeText "riemann-config.clj" riemannConfig
+    );
+
+    systemd.services.riemann = {
+      wantedBy = [ "multi-user.target" ];
+      path = [ inetutils ];
+      serviceConfig = {
+        User = "riemann";
+        ExecStart = "${launcher}/bin/riemann";
+      };
+      serviceConfig.LimitNOFILE = 65536;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/scollector.nix b/nixpkgs/nixos/modules/services/monitoring/scollector.nix
new file mode 100644
index 000000000000..6f13ce889cba
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/scollector.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scollector;
+
+  collectors = pkgs.runCommand "collectors" { preferLocalBuild = true; }
+    ''
+    mkdir -p $out
+    ${lib.concatStringsSep
+        "\n"
+        (lib.mapAttrsToList
+          (frequency: binaries:
+            "mkdir -p $out/${frequency}\n" +
+            (lib.concatStringsSep
+              "\n"
+              (map (path: "ln -s ${path} $out/${frequency}/$(basename ${path})")
+                   binaries)))
+          cfg.collectors)}
+    '';
+
+  conf = pkgs.writeText "scollector.toml" ''
+    Host = "${cfg.bosunHost}"
+    ColDir = "${collectors}"
+    ${cfg.extraConfig}
+  '';
+
+in {
+
+  options = {
+
+    services.scollector = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to run scollector.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.scollector;
+        defaultText = "pkgs.scollector";
+        example = literalExample "pkgs.scollector";
+        description = ''
+          scollector binary to use.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "scollector";
+        description = ''
+          User account under which scollector runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "scollector";
+        description = ''
+          Group account under which scollector runs.
+        '';
+      };
+
+      bosunHost = mkOption {
+        type = types.str;
+        default = "localhost:8070";
+        description = ''
+          Host and port of the bosun server that will store the collected
+          data.
+        '';
+      };
+
+      collectors = mkOption {
+        type = with types; attrsOf (listOf path);
+        default = {};
+        example = literalExample "{ \"0\" = [ \"\${postgresStats}/bin/collect-stats\" ]; }";
+        description = ''
+          An attribute set mapping the frequency of collection to a list of
+          binaries that should be executed at that frequency. You can use "0"
+          to run a binary forever.
+        '';
+      };
+
+      extraOpts = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "-d" ];
+        description = ''
+          Extra scollector command line options
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra scollector configuration added to the end of scollector.toml
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf config.services.scollector.enable {
+
+    systemd.services.scollector = {
+      description = "scollector metrics collector (part of Bosun)";
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ pkgs.coreutils pkgs.iproute ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/scollector -conf=${conf} ${lib.concatStringsSep " " cfg.extraOpts}";
+      };
+    };
+
+    users.users.scollector = {
+      description = "scollector user";
+      group = "scollector";
+      uid = config.ids.uids.scollector;
+    };
+
+    users.groups.scollector.gid = config.ids.gids.scollector;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/smartd.nix b/nixpkgs/nixos/modules/services/monitoring/smartd.nix
new file mode 100644
index 000000000000..c345ec48a018
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/smartd.nix
@@ -0,0 +1,242 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  host = config.networking.hostName or "unknown"
+       + optionalString (config.networking.domain != null) ".${config.networking.domain}";
+
+  cfg = config.services.smartd;
+
+  nm = cfg.notifications.mail;
+  nw = cfg.notifications.wall;
+  nx = cfg.notifications.x11;
+
+  smartdNotify = pkgs.writeScript "smartd-notify.sh" ''
+    #! ${pkgs.runtimeShell}
+    ${optionalString nm.enable ''
+      {
+      ${pkgs.coreutils}/bin/cat << EOF
+      From: smartd on ${host} <root>
+      To: undisclosed-recipients:;
+      Subject: SMART error on $SMARTD_DEVICESTRING: $SMARTD_FAILTYPE
+
+      $SMARTD_FULLMESSAGE
+      EOF
+
+      ${pkgs.smartmontools}/sbin/smartctl -a -d "$SMARTD_DEVICETYPE" "$SMARTD_DEVICE"
+      } | ${nm.mailer} -i "${nm.recipient}"
+    ''}
+    ${optionalString nw.enable ''
+      {
+      ${pkgs.coreutils}/bin/cat << EOF
+      Problem detected with disk: $SMARTD_DEVICESTRING
+      Warning message from smartd is:
+
+      $SMARTD_MESSAGE
+      EOF
+      } | ${pkgs.utillinux}/bin/wall 2>/dev/null
+    ''}
+    ${optionalString nx.enable ''
+      export DISPLAY=${nx.display}
+      {
+      ${pkgs.coreutils}/bin/cat << EOF
+      Problem detected with disk: $SMARTD_DEVICESTRING
+      Warning message from smartd is:
+
+      $SMARTD_FULLMESSAGE
+      EOF
+      } | ${pkgs.xorg.xmessage}/bin/xmessage -file - 2>/dev/null &
+    ''}
+  '';
+
+  notifyOpts = optionalString (nm.enable || nw.enable || nx.enable)
+    ("-m <nomailer> -M exec ${smartdNotify} " + optionalString cfg.notifications.test "-M test ");
+
+  smartdConf = pkgs.writeText "smartd.conf" ''
+    # Autogenerated smartd startup config file
+    DEFAULT ${notifyOpts}${cfg.defaults.monitored}
+
+    ${concatMapStringsSep "\n" (d: "${d.device} ${d.options}") cfg.devices}
+
+    ${optionalString cfg.autodetect
+       "DEVICESCAN ${notifyOpts}${cfg.defaults.autodetected}"}
+  '';
+
+  smartdDeviceOpts = { ... }: {
+
+    options = {
+
+      device = mkOption {
+        example = "/dev/sda";
+        type = types.str;
+        description = "Location of the device.";
+      };
+
+      options = mkOption {
+        default = "";
+        example = "-d sat";
+        type = types.separatedString " ";
+        description = "Options that determine how smartd monitors the device.";
+      };
+
+    };
+
+  };
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    services.smartd = {
+
+      enable = mkEnableOption "smartd daemon from <literal>smartmontools</literal> package";
+
+      autodetect = mkOption {
+        default = true;
+        type = types.bool;
+        description = ''
+          Whenever smartd should monitor all devices connected to the
+          machine at the time it's being started (the default).
+
+          Set to false to monitor the devices listed in
+          <option>services.smartd.devices</option> only.
+        '';
+      };
+
+      extraOptions = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = ["-A /var/log/smartd/" "--interval=3600"];
+        description = ''
+          Extra command-line options passed to the <literal>smartd</literal>
+          daemon on startup.
+
+          (See <literal>man 8 smartd</literal>.)
+        '';
+      };
+
+      notifications = {
+
+        mail = {
+          enable = mkOption {
+            default = config.services.mail.sendmailSetuidWrapper != null;
+            type = types.bool;
+            description = "Whenever to send e-mail notifications.";
+          };
+
+          recipient = mkOption {
+            default = "root";
+            type = types.str;
+            description = "Recipient of the notification messages.";
+          };
+
+          mailer = mkOption {
+            default = "/run/wrappers/bin/sendmail";
+            type = types.path;
+            description = ''
+              Sendmail-compatible binary to be used to send the messages.
+
+              You should probably enable
+              <option>services.postfix</option> or some other MTA for
+              this to work.
+            '';
+          };
+        };
+
+        wall = {
+          enable = mkOption {
+            default = true;
+            type = types.bool;
+            description = "Whenever to send wall notifications to all users.";
+          };
+        };
+
+        x11 = {
+          enable = mkOption {
+            default = config.services.xserver.enable;
+            type = types.bool;
+            description = "Whenever to send X11 xmessage notifications.";
+          };
+
+          display = mkOption {
+            default = ":${toString config.services.xserver.display}";
+            type = types.str;
+            description = "DISPLAY to send X11 notifications to.";
+          };
+        };
+
+        test = mkOption {
+          default = false;
+          type = types.bool;
+          description = "Whenever to send a test notification on startup.";
+        };
+
+      };
+
+      defaults = {
+        monitored = mkOption {
+          default = "-a";
+          type = types.separatedString " ";
+          example = "-a -o on -s (S/../.././02|L/../../7/04)";
+          description = ''
+            Common default options for explicitly monitored (listed in
+            <option>services.smartd.devices</option>) devices.
+
+            The default value turns on monitoring of all the things (see
+            <literal>man 5 smartd.conf</literal>).
+
+            The example also turns on SMART Automatic Offline Testing on
+            startup, and schedules short self-tests daily, and long
+            self-tests weekly.
+          '';
+        };
+
+        autodetected = mkOption {
+          default = cfg.defaults.monitored;
+          type = types.separatedString " ";
+          description = ''
+            Like <option>services.smartd.defaults.monitored</option>, but for the
+            autodetected devices.
+          '';
+        };
+      };
+
+      devices = mkOption {
+        default = [];
+        example = [ { device = "/dev/sda"; } { device = "/dev/sdb"; options = "-d sat"; } ];
+        type = with types; listOf (submodule smartdDeviceOpts);
+        description = "List of devices to monitor.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [ {
+      assertion = cfg.autodetect || cfg.devices != [];
+      message = "smartd can't run with both disabled autodetect and an empty list of devices to monitor.";
+    } ];
+
+    systemd.services.smartd = {
+      description = "S.M.A.R.T. Daemon";
+
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ pkgs.nettools ]; # for hostname and dnsdomanname calls in smartd
+
+      serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd ${lib.concatStringsSep " " cfg.extraOptions} --no-fork --configfile=${smartdConf}";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/statsd.nix b/nixpkgs/nixos/modules/services/monitoring/statsd.nix
new file mode 100644
index 000000000000..30b2916a9928
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/statsd.nix
@@ -0,0 +1,149 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.statsd;
+
+  isBuiltinBackend = name:
+    builtins.elem name [ "graphite" "console" "repeater" ];
+
+  backendsToPackages = let
+    mkMap = list: name:
+      if isBuiltinBackend name then list
+      else list ++ [ pkgs.nodePackages.${name} ];
+  in foldl mkMap [];
+
+  configFile = pkgs.writeText "statsd.conf" ''
+    {
+      address: "${cfg.listenAddress}",
+      port: "${toString cfg.port}",
+      mgmt_address: "${cfg.mgmt_address}",
+      mgmt_port: "${toString cfg.mgmt_port}",
+      backends: [${
+        concatMapStringsSep "," (name:
+          if (isBuiltinBackend name)
+          then ''"./backends/${name}"''
+          else ''"${name}"''
+        ) cfg.backends}],
+      ${optionalString (cfg.graphiteHost!=null) ''graphiteHost: "${cfg.graphiteHost}",''}
+      ${optionalString (cfg.graphitePort!=null) ''graphitePort: "${toString cfg.graphitePort}",''}
+      console: {
+        prettyprint: false
+      },
+      log: {
+        backend: "stdout"
+      },
+      automaticConfigReload: false${optionalString (cfg.extraConfig != null) ","}
+      ${cfg.extraConfig}
+    }
+  '';
+
+  deps = pkgs.buildEnv {
+    name = "statsd-runtime-deps";
+    pathsToLink = [ "/lib" ];
+    ignoreCollisions = true;
+
+    paths = backendsToPackages cfg.backends;
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options.services.statsd = {
+
+    enable = mkEnableOption "statsd";
+
+    listenAddress = mkOption {
+      description = "Address that statsd listens on over UDP";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    port = mkOption {
+      description = "Port that stats listens for messages on over UDP";
+      default = 8125;
+      type = types.int;
+    };
+
+    mgmt_address = mkOption {
+      description = "Address to run management TCP interface on";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    mgmt_port = mkOption {
+      description = "Port to run the management TCP interface on";
+      default = 8126;
+      type = types.int;
+    };
+
+    backends = mkOption {
+      description = "List of backends statsd will use for data persistence";
+      default = [];
+      example = [
+        "graphite"
+        "console"
+        "repeater"
+        "statsd-librato-backend"
+        "stackdriver-statsd-backend"
+        "statsd-influxdb-backend"
+      ];
+      type = types.listOf types.str;
+    };
+
+    graphiteHost = mkOption {
+      description = "Hostname or IP of Graphite server";
+      default = null;
+      type = types.nullOr types.str;
+    };
+
+    graphitePort = mkOption {
+      description = "Port of Graphite server (i.e. carbon-cache).";
+      default = null;
+      type = types.nullOr types.int;
+    };
+
+    extraConfig = mkOption {
+      description = "Extra configuration options for statsd";
+      default = "";
+      type = types.nullOr types.str;
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = map (backend: {
+      assertion = !isBuiltinBackend backend -> hasAttrByPath [ backend ] pkgs.nodePackages;
+      message = "Only builtin backends (graphite, console, repeater) or backends enumerated in `pkgs.nodePackages` are allowed!";
+    }) cfg.backends;
+
+    users.users.statsd = {
+      uid = config.ids.uids.statsd;
+      description = "Statsd daemon user";
+    };
+
+    systemd.services.statsd = {
+      description = "Statsd Server";
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        NODE_PATH = "${deps}/lib/node_modules";
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.statsd}/bin/statsd ${configFile}";
+        User = "statsd";
+      };
+    };
+
+    environment.systemPackages = [ pkgs.statsd ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/sysstat.nix b/nixpkgs/nixos/modules/services/monitoring/sysstat.nix
new file mode 100644
index 000000000000..ca2cff827232
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/sysstat.nix
@@ -0,0 +1,76 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.sysstat;
+in {
+  options = {
+    services.sysstat = {
+      enable = mkEnableOption "sar system activity collection";
+
+      collect-frequency = mkOption {
+        type = types.str;
+        default = "*:00/10";
+        description = ''
+          OnCalendar specification for sysstat-collect
+        '';
+      };
+
+      collect-args = mkOption {
+        type = types.str;
+        default = "1 1";
+        description = ''
+          Arguments to pass sa1 when collecting statistics
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.sysstat = {
+      description = "Resets System Activity Logs";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = "root";
+        RemainAfterExit = true;
+        Type = "oneshot";
+        ExecStart = "${pkgs.sysstat}/lib/sa/sa1 --boot";
+        LogsDirectory = "sa";
+      };
+    };
+
+    systemd.services.sysstat-collect = {
+      description = "system activity accounting tool";
+      unitConfig.Documentation = "man:sa1(8)";
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "root";
+        ExecStart = "${pkgs.sysstat}/lib/sa/sa1 ${cfg.collect-args}";
+      };
+    };
+
+    systemd.timers.sysstat-collect = {
+      description = "Run system activity accounting tool on a regular basis";
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = cfg.collect-frequency;
+    };
+
+    systemd.services.sysstat-summary = {
+      description = "Generate a daily summary of process accounting";
+      unitConfig.Documentation = "man:sa2(8)";
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "root";
+        ExecStart = "${pkgs.sysstat}/lib/sa/sa2 -A";
+      };
+    };
+
+    systemd.timers.sysstat-summary = {
+      description = "Generate summary of yesterday's process accounting";
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = "00:07:00";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/teamviewer.nix b/nixpkgs/nixos/modules/services/monitoring/teamviewer.nix
new file mode 100644
index 000000000000..dd98ecab828d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/teamviewer.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.teamviewer;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.teamviewer.enable = mkEnableOption "TeamViewer daemon";
+      
+  };
+
+  ###### implementation
+
+  config = mkIf (cfg.enable) {
+
+    environment.systemPackages = [ pkgs.teamviewer ];
+
+    systemd.services.teamviewerd = {
+      description = "TeamViewer remote control daemon";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "NetworkManager-wait-online.service" "network.target" ];
+      preStart = "mkdir -pv /var/lib/teamviewer /var/log/teamviewer";
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.teamviewer}/bin/teamviewerd -d";
+        PIDFile = "/run/teamviewerd.pid";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "on-abort";
+        StartLimitInterval = "60";
+        StartLimitBurst = "10";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/telegraf.nix b/nixpkgs/nixos/modules/services/monitoring/telegraf.nix
new file mode 100644
index 000000000000..5d131557e8be
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/telegraf.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.telegraf;
+
+  configFile = pkgs.runCommand "config.toml" {
+    buildInputs = [ pkgs.remarshal ];
+    preferLocalBuild = true;
+  } ''
+    remarshal -if json -of toml \
+      < ${pkgs.writeText "config.json" (builtins.toJSON cfg.extraConfig)} \
+      > $out
+  '';
+in {
+  ###### interface
+  options = {
+    services.telegraf = {
+      enable = mkEnableOption "telegraf server";
+
+      package = mkOption {
+        default = pkgs.telegraf;
+        defaultText = "pkgs.telegraf";
+        description = "Which telegraf derivation to use";
+        type = types.package;
+      };
+
+      extraConfig = mkOption {
+        default = {};
+        description = "Extra configuration options for telegraf";
+        type = types.attrs;
+        example = {
+          outputs = {
+            influxdb = {
+              urls = ["http://localhost:8086"];
+              database = "telegraf";
+            };
+          };
+          inputs = {
+            statsd = {
+              service_address = ":8125";
+              delete_timings = true;
+            };
+          };
+        };
+      };
+    };
+  };
+
+
+  ###### implementation
+  config = mkIf config.services.telegraf.enable {
+    systemd.services.telegraf = {
+      description = "Telegraf Agent";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      serviceConfig = {
+        ExecStart=''${cfg.package}/bin/telegraf -config "${configFile}"'';
+        ExecReload="${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        User = "telegraf";
+        Restart = "on-failure";
+      };
+    };
+
+    users.users.telegraf = {
+      uid = config.ids.uids.telegraf;
+      description = "telegraf daemon user";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/thanos.nix b/nixpkgs/nixos/modules/services/monitoring/thanos.nix
new file mode 100644
index 000000000000..52dab28cf72f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/thanos.nix
@@ -0,0 +1,835 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.thanos;
+
+  nullOpt = type: description: mkOption {
+    type = types.nullOr type;
+    default = null;
+    inherit description;
+  };
+
+  optionToArgs = opt: v  : optional (v != null)  ''--${opt}="${toString v}"'';
+  flagToArgs   = opt: v  : optional v            ''--${opt}'';
+  listToArgs   = opt: vs : map               (v: ''--${opt}="${v}"'') vs;
+  attrsToArgs  = opt: kvs: mapAttrsToList (k: v: ''--${opt}=${k}=\"${v}\"'') kvs;
+
+  mkParamDef = type: default: description: mkParam type (description + ''
+
+    Defaults to <literal>${toString default}</literal> in Thanos
+    when set to <literal>null</literal>.
+  '');
+
+  mkParam = type: description: {
+    toArgs = optionToArgs;
+    option = nullOpt type description;
+  };
+
+  mkFlagParam = description: {
+    toArgs = flagToArgs;
+    option = mkOption {
+      type = types.bool;
+      default = false;
+      inherit description;
+    };
+  };
+
+  mkListParam = opt: description: {
+    toArgs = _opt: listToArgs opt;
+    option = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      inherit description;
+    };
+  };
+
+  mkAttrsParam = opt: description: {
+    toArgs = _opt: attrsToArgs opt;
+    option = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      inherit description;
+    };
+  };
+
+  mkStateDirParam = opt: default: description: {
+    toArgs = _opt: stateDir: optionToArgs opt "/var/lib/${stateDir}";
+    option = mkOption {
+      type = types.str;
+      inherit default;
+      inherit description;
+    };
+  };
+
+  toYAML = name: attrs: pkgs.runCommandNoCC name {
+    preferLocalBuild = true;
+    json = builtins.toFile "${name}.json" (builtins.toJSON attrs);
+    nativeBuildInputs = [ pkgs.remarshal ];
+  } ''json2yaml -i $json -o $out'';
+
+  thanos = cmd: "${cfg.package}/bin/thanos ${cmd}" +
+    (let args = cfg.${cmd}.arguments;
+     in optionalString (length args != 0) (" \\\n  " +
+         concatStringsSep " \\\n  " args));
+
+  argumentsOf = cmd: concatLists (collect isList
+    (flip mapParamsRecursive params.${cmd} (path: param:
+      let opt = concatStringsSep "." path;
+          v = getAttrFromPath path cfg.${cmd};
+      in param.toArgs opt v)));
+
+  mkArgumentsOption = cmd: mkOption {
+    type = types.listOf types.str;
+    default = argumentsOf cmd;
+    description = ''
+      Arguments to the <literal>thanos ${cmd}</literal> command.
+
+      Defaults to a list of arguments formed by converting the structured
+      options of <option>services.thanos.${cmd}</option> to a list of arguments.
+
+      Overriding this option will cause none of the structured options to have
+      any effect. So only set this if you know what you're doing!
+    '';
+  };
+
+  mapParamsRecursive =
+    let noParam = attr: !(attr ? toArgs && attr ? option);
+    in mapAttrsRecursiveCond noParam;
+
+  paramsToOptions = mapParamsRecursive (_path: param: param.option);
+
+  params = {
+
+    log = {
+
+      log.level = mkParamDef (types.enum ["debug" "info" "warn" "error" "fatal"]) "info" ''
+        Log filtering level.
+      '';
+
+      log.format = mkParam types.str ''
+        Log format to use.
+      '';
+    };
+
+    tracing = cfg: {
+      tracing.config-file = {
+        toArgs = _opt: path: optionToArgs "tracing.config-file" path;
+        option = mkOption {
+          type = with types; nullOr str;
+          default = if cfg.tracing.config == null then null
+                    else toString (toYAML "tracing.yaml" cfg.tracing.config);
+          defaultText = ''
+            if config.services.thanos.<cmd>.tracing.config == null then null
+            else toString (toYAML "tracing.yaml" config.services.thanos.<cmd>.tracing.config);
+          '';
+          description = ''
+            Path to YAML file that contains tracing configuration.
+
+            See format details: <link xlink:href="https://thanos.io/tracing.md/#configuration"/>
+          '';
+        };
+      };
+
+      tracing.config =
+        {
+          toArgs = _opt: _attrs: [];
+          option = nullOpt types.attrs ''
+            Tracing configuration.
+
+            When not <literal>null</literal> the attribute set gets converted to
+            a YAML file and stored in the Nix store. The option
+            <option>tracing.config-file</option> will default to its path.
+
+            If <option>tracing.config-file</option> is set this option has no effect.
+
+            See format details: <link xlink:href="https://thanos.io/tracing.md/#configuration"/>
+          '';
+        };
+    };
+
+    common = cfg: params.log // params.tracing cfg // {
+
+      http-address = mkParamDef types.str "0.0.0.0:10902" ''
+        Listen <literal>host:port</literal> for HTTP endpoints.
+      '';
+
+      grpc-address = mkParamDef types.str "0.0.0.0:10901" ''
+        Listen <literal>ip:port</literal> address for gRPC endpoints (StoreAPI).
+
+        Make sure this address is routable from other components.
+      '';
+
+      grpc-server-tls-cert = mkParam types.str ''
+        TLS Certificate for gRPC server, leave blank to disable TLS
+      '';
+
+      grpc-server-tls-key = mkParam types.str ''
+        TLS Key for the gRPC server, leave blank to disable TLS
+      '';
+
+      grpc-server-tls-client-ca = mkParam types.str ''
+        TLS CA to verify clients against.
+
+        If no client CA is specified, there is no client verification on server side.
+        (tls.NoClientCert)
+      '';
+    };
+
+    objstore = cfg: {
+
+      objstore.config-file = {
+        toArgs = _opt: path: optionToArgs "objstore.config-file" path;
+        option = mkOption {
+          type = with types; nullOr str;
+          default = if cfg.objstore.config == null then null
+                    else toString (toYAML "objstore.yaml" cfg.objstore.config);
+          defaultText = ''
+            if config.services.thanos.<cmd>.objstore.config == null then null
+            else toString (toYAML "objstore.yaml" config.services.thanos.<cmd>.objstore.config);
+          '';
+          description = ''
+            Path to YAML file that contains object store configuration.
+
+            See format details: <link xlink:href="https://thanos.io/storage.md/#configuration"/>
+          '';
+        };
+      };
+
+      objstore.config =
+        {
+          toArgs = _opt: _attrs: [];
+          option = nullOpt types.attrs ''
+            Object store configuration.
+
+            When not <literal>null</literal> the attribute set gets converted to
+            a YAML file and stored in the Nix store. The option
+            <option>objstore.config-file</option> will default to its path.
+
+            If <option>objstore.config-file</option> is set this option has no effect.
+
+            See format details: <link xlink:href="https://thanos.io/storage.md/#configuration"/>
+          '';
+        };
+    };
+
+    sidecar = params.common cfg.sidecar // params.objstore cfg.sidecar // {
+
+      prometheus.url = mkParamDef types.str "http://localhost:9090" ''
+        URL at which to reach Prometheus's API.
+
+        For better performance use local network.
+      '';
+
+      tsdb.path = {
+        toArgs = optionToArgs;
+        option = mkOption {
+          type = types.str;
+          default = "/var/lib/${config.services.prometheus.stateDir}/data";
+          defaultText = "/var/lib/\${config.services.prometheus.stateDir}/data";
+          description = ''
+            Data directory of TSDB.
+          '';
+        };
+      };
+
+      reloader.config-file = mkParam types.str ''
+        Config file watched by the reloader.
+      '';
+
+      reloader.config-envsubst-file = mkParam types.str ''
+        Output file for environment variable substituted config file.
+      '';
+
+      reloader.rule-dirs = mkListParam "reloader.rule-dir" ''
+        Rule directories for the reloader to refresh.
+      '';
+
+    };
+
+    store = params.common cfg.store // params.objstore cfg.store // {
+
+      stateDir = mkStateDirParam "data-dir" "thanos-store" ''
+        Data directory relative to <literal>/var/lib</literal>
+        in which to cache remote blocks.
+      '';
+
+      index-cache-size = mkParamDef types.str "250MB" ''
+        Maximum size of items held in the index cache.
+      '';
+
+      chunk-pool-size = mkParamDef types.str "2GB" ''
+        Maximum size of concurrently allocatable bytes for chunks.
+      '';
+
+      store.grpc.series-sample-limit = mkParamDef types.int 0 ''
+        Maximum amount of samples returned via a single Series call.
+
+        <literal>0</literal> means no limit.
+
+        NOTE: for efficiency we take 120 as the number of samples in chunk (it
+        cannot be bigger than that), so the actual number of samples might be
+        lower, even though the maximum could be hit.
+      '';
+
+      store.grpc.series-max-concurrency = mkParamDef types.int 20 ''
+        Maximum number of concurrent Series calls.
+      '';
+
+      sync-block-duration = mkParamDef types.str "3m" ''
+        Repeat interval for syncing the blocks between local and remote view.
+      '';
+
+      block-sync-concurrency = mkParamDef types.int 20 ''
+        Number of goroutines to use when syncing blocks from object storage.
+      '';
+
+      min-time = mkParamDef types.str "0000-01-01T00:00:00Z" ''
+        Start of time range limit to serve.
+
+        Thanos Store serves only metrics, which happened later than this
+        value. Option can be a constant time in RFC3339 format or time duration
+        relative to current time, such as -1d or 2h45m. Valid duration units are
+        ms, s, m, h, d, w, y.
+      '';
+
+      max-time = mkParamDef types.str "9999-12-31T23:59:59Z" ''
+        End of time range limit to serve.
+
+        Thanos Store serves only blocks, which happened eariler than this
+        value. Option can be a constant time in RFC3339 format or time duration
+        relative to current time, such as -1d or 2h45m. Valid duration units are
+        ms, s, m, h, d, w, y.
+      '';
+    };
+
+    query = params.common cfg.query // {
+
+      grpc-client-tls-secure = mkFlagParam ''
+        Use TLS when talking to the gRPC server
+      '';
+
+      grpc-client-tls-cert = mkParam types.str ''
+        TLS Certificates to use to identify this client to the server
+      '';
+
+      grpc-client-tls-key = mkParam types.str ''
+        TLS Key for the client's certificate
+      '';
+
+      grpc-client-tls-ca = mkParam types.str ''
+        TLS CA Certificates to use to verify gRPC servers
+      '';
+
+      grpc-client-server-name = mkParam types.str ''
+        Server name to verify the hostname on the returned gRPC certificates.
+        See <link xlink:href="https://tools.ietf.org/html/rfc4366#section-3.1"/>
+      '';
+
+      web.route-prefix = mkParam types.str ''
+        Prefix for API and UI endpoints.
+
+        This allows thanos UI to be served on a sub-path. This option is
+        analogous to <option>web.route-prefix</option> of Promethus.
+      '';
+
+      web.external-prefix = mkParam types.str ''
+        Static prefix for all HTML links and redirect URLs in the UI query web
+        interface.
+
+        Actual endpoints are still served on / or the
+        <option>web.route-prefix</option>. This allows thanos UI to be served
+        behind a reverse proxy that strips a URL sub-path.
+      '';
+
+      web.prefix-header = mkParam types.str ''
+        Name of HTTP request header used for dynamic prefixing of UI links and
+        redirects.
+
+        This option is ignored if the option
+        <literal>web.external-prefix</literal> is set.
+
+        Security risk: enable this option only if a reverse proxy in front of
+        thanos is resetting the header.
+
+        The setting <literal>web.prefix-header="X-Forwarded-Prefix"</literal>
+        can be useful, for example, if Thanos UI is served via Traefik reverse
+        proxy with <literal>PathPrefixStrip</literal> option enabled, which
+        sends the stripped prefix value in <literal>X-Forwarded-Prefix</literal>
+        header. This allows thanos UI to be served on a sub-path.
+      '';
+
+      query.timeout = mkParamDef types.str "2m" ''
+        Maximum time to process query by query node.
+      '';
+
+      query.max-concurrent = mkParamDef types.int 20 ''
+        Maximum number of queries processed concurrently by query node.
+      '';
+
+      query.replica-label = mkParam types.str ''
+        Label to treat as a replica indicator along which data is
+        deduplicated.
+
+        Still you will be able to query without deduplication using
+        <literal>dedup=false</literal> parameter.
+      '';
+
+      selector-labels = mkAttrsParam "selector-label" ''
+        Query selector labels that will be exposed in info endpoint.
+      '';
+
+      store.addresses = mkListParam "store" ''
+        Addresses of statically configured store API servers.
+
+        The scheme may be prefixed with <literal>dns+</literal> or
+        <literal>dnssrv+</literal> to detect store API servers through
+        respective DNS lookups.
+      '';
+
+      store.sd-files = mkListParam "store.sd-files" ''
+        Path to files that contain addresses of store API servers. The path
+        can be a glob pattern.
+      '';
+
+      store.sd-interval = mkParamDef types.str "5m" ''
+        Refresh interval to re-read file SD files. It is used as a resync fallback.
+      '';
+
+      store.sd-dns-interval = mkParamDef types.str "30s" ''
+        Interval between DNS resolutions.
+      '';
+
+      store.unhealthy-timeout = mkParamDef types.str "5m" ''
+        Timeout before an unhealthy store is cleaned from the store UI page.
+      '';
+
+      query.auto-downsampling = mkFlagParam ''
+        Enable automatic adjustment (step / 5) to what source of data should
+        be used in store gateways if no
+        <literal>max_source_resolution</literal> param is specified.
+      '';
+
+      query.partial-response = mkFlagParam ''
+        Enable partial response for queries if no
+        <literal>partial_response</literal> param is specified.
+      '';
+
+      query.default-evaluation-interval = mkParamDef types.str "1m" ''
+        Set default evaluation interval for sub queries.
+      '';
+
+      store.response-timeout = mkParamDef types.str "0ms" ''
+        If a Store doesn't send any data in this specified duration then a
+        Store will be ignored and partial data will be returned if it's
+        enabled. <literal>0</literal> disables timeout.
+      '';
+    };
+
+    rule = params.common cfg.rule // params.objstore cfg.rule // {
+
+      labels = mkAttrsParam "label" ''
+        Labels to be applied to all generated metrics.
+
+        Similar to external labels for Prometheus,
+        used to identify ruler and its blocks as unique source.
+      '';
+
+      stateDir = mkStateDirParam "data-dir" "thanos-rule" ''
+        Data directory relative to <literal>/var/lib</literal>.
+      '';
+
+      rule-files = mkListParam "rule-file" ''
+        Rule files that should be used by rule manager. Can be in glob format.
+      '';
+
+      eval-interval = mkParamDef types.str "30s" ''
+        The default evaluation interval to use.
+      '';
+
+      tsdb.block-duration = mkParamDef types.str "2h" ''
+        Block duration for TSDB block.
+      '';
+
+      tsdb.retention = mkParamDef types.str "48h" ''
+        Block retention time on local disk.
+      '';
+
+      alertmanagers.urls = mkListParam "alertmanagers.url" ''
+        Alertmanager replica URLs to push firing alerts.
+
+        Ruler claims success if push to at least one alertmanager from
+        discovered succeeds. The scheme may be prefixed with
+        <literal>dns+</literal> or <literal>dnssrv+</literal> to detect
+        Alertmanager IPs through respective DNS lookups. The port defaults to
+        <literal>9093</literal> or the SRV record's value. The URL path is
+        used as a prefix for the regular Alertmanager API path.
+      '';
+
+      alertmanagers.send-timeout = mkParamDef types.str "10s" ''
+        Timeout for sending alerts to alertmanager.
+      '';
+
+      alert.query-url = mkParam types.str ''
+        The external Thanos Query URL that would be set in all alerts 'Source' field.
+      '';
+
+      alert.label-drop = mkListParam "alert.label-drop" ''
+        Labels by name to drop before sending to alertmanager.
+
+        This allows alert to be deduplicated on replica label.
+
+        Similar Prometheus alert relabelling
+      '';
+
+      web.route-prefix = mkParam types.str ''
+        Prefix for API and UI endpoints.
+
+        This allows thanos UI to be served on a sub-path.
+
+        This option is analogous to <literal>--web.route-prefix</literal> of Promethus.
+      '';
+
+      web.external-prefix = mkParam types.str ''
+        Static prefix for all HTML links and redirect URLs in the UI query web
+        interface.
+
+        Actual endpoints are still served on / or the
+        <option>web.route-prefix</option>. This allows thanos UI to be served
+        behind a reverse proxy that strips a URL sub-path.
+      '';
+
+      web.prefix-header = mkParam types.str ''
+        Name of HTTP request header used for dynamic prefixing of UI links and
+        redirects.
+
+        This option is ignored if the option
+        <option>web.external-prefix</option> is set.
+
+        Security risk: enable this option only if a reverse proxy in front of
+        thanos is resetting the header.
+
+        The header <literal>X-Forwarded-Prefix</literal> can be useful, for
+        example, if Thanos UI is served via Traefik reverse proxy with
+        <literal>PathPrefixStrip</literal> option enabled, which sends the
+        stripped prefix value in <literal>X-Forwarded-Prefix</literal>
+        header. This allows thanos UI to be served on a sub-path.
+      '';
+
+      query.addresses = mkListParam "query" ''
+        Addresses of statically configured query API servers.
+
+        The scheme may be prefixed with <literal>dns+</literal> or
+        <literal>dnssrv+</literal> to detect query API servers through
+        respective DNS lookups.
+      '';
+
+      query.sd-files = mkListParam "query.sd-files" ''
+        Path to file that contain addresses of query peers.
+        The path can be a glob pattern.
+      '';
+
+      query.sd-interval = mkParamDef types.str "5m" ''
+        Refresh interval to re-read file SD files. (used as a fallback)
+      '';
+
+      query.sd-dns-interval = mkParamDef types.str "30s" ''
+        Interval between DNS resolutions.
+      '';
+    };
+
+    compact = params.log // params.tracing cfg.compact // params.objstore cfg.compact // {
+
+      http-address = mkParamDef types.str "0.0.0.0:10902" ''
+        Listen <literal>host:port</literal> for HTTP endpoints.
+      '';
+
+      stateDir = mkStateDirParam "data-dir" "thanos-compact" ''
+        Data directory relative to <literal>/var/lib</literal>
+        in which to cache blocks and process compactions.
+      '';
+
+      consistency-delay = mkParamDef types.str "30m" ''
+        Minimum age of fresh (non-compacted) blocks before they are being
+        processed. Malformed blocks older than the maximum of consistency-delay
+        and 30m0s will be removed.
+      '';
+
+      retention.resolution-raw = mkParamDef types.str "0d" ''
+        How long to retain raw samples in bucket.
+
+        <literal>0d</literal> - disables this retention
+      '';
+
+      retention.resolution-5m = mkParamDef types.str "0d" ''
+        How long to retain samples of resolution 1 (5 minutes) in bucket.
+
+        <literal>0d</literal> - disables this retention
+      '';
+
+      retention.resolution-1h = mkParamDef types.str "0d" ''
+        How long to retain samples of resolution 2 (1 hour) in bucket.
+
+        <literal>0d</literal> - disables this retention
+      '';
+
+      startAt = {
+        toArgs = _opt: startAt: flagToArgs "wait" (startAt == null);
+        option = nullOpt types.str ''
+          When this option is set to a <literal>systemd.time</literal>
+          specification the Thanos compactor will run at the specified period.
+
+          When this option is <literal>null</literal> the Thanos compactor service
+          will run continuously. So it will not exit after all compactions have
+          been processed but wait for new work.
+        '';
+      };
+
+      downsampling.disable = mkFlagParam ''
+        Disables downsampling.
+
+        This is not recommended as querying long time ranges without
+        non-downsampled data is not efficient and useful e.g it is not possible
+        to render all samples for a human eye anyway
+      '';
+
+      block-sync-concurrency = mkParamDef types.int 20 ''
+        Number of goroutines to use when syncing block metadata from object storage.
+      '';
+
+      compact.concurrency = mkParamDef types.int 1 ''
+        Number of goroutines to use when compacting groups.
+      '';
+    };
+
+    downsample = params.log // params.tracing cfg.downsample // params.objstore cfg.downsample // {
+
+      stateDir = mkStateDirParam "data-dir" "thanos-downsample" ''
+        Data directory relative to <literal>/var/lib</literal>
+        in which to cache blocks and process downsamplings.
+      '';
+
+    };
+
+    receive = params.common cfg.receive // params.objstore cfg.receive // {
+
+      remote-write.address = mkParamDef types.str "0.0.0.0:19291" ''
+        Address to listen on for remote write requests.
+      '';
+
+      stateDir = mkStateDirParam "tsdb.path" "thanos-receive" ''
+        Data directory relative to <literal>/var/lib</literal> of TSDB.
+      '';
+
+      labels = mkAttrsParam "labels" ''
+        External labels to announce.
+
+        This flag will be removed in the future when handling multiple tsdb
+        instances is added.
+      '';
+
+      tsdb.retention = mkParamDef types.str "15d" ''
+        How long to retain raw samples on local storage.
+
+        <literal>0d</literal> - disables this retention
+      '';
+    };
+
+  };
+
+  assertRelativeStateDir = cmd: {
+    assertions = [
+      {
+        assertion = !hasPrefix "/" cfg.${cmd}.stateDir;
+        message =
+          "The option services.thanos.${cmd}.stateDir should not be an absolute directory." +
+          " It should be a directory relative to /var/lib.";
+      }
+    ];
+  };
+
+in {
+
+  options.services.thanos = {
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.thanos;
+      defaultText = "pkgs.thanos";
+      description = ''
+        The thanos package that should be used.
+      '';
+    };
+
+    sidecar = paramsToOptions params.sidecar // {
+      enable = mkEnableOption
+        "the Thanos sidecar for Prometheus server";
+      arguments = mkArgumentsOption "sidecar";
+    };
+
+    store = paramsToOptions params.store // {
+      enable = mkEnableOption
+        "the Thanos store node giving access to blocks in a bucket provider.";
+      arguments = mkArgumentsOption "store";
+    };
+
+    query = paramsToOptions params.query // {
+      enable = mkEnableOption
+        ("the Thanos query node exposing PromQL enabled Query API " +
+         "with data retrieved from multiple store nodes");
+      arguments = mkArgumentsOption "query";
+    };
+
+    rule = paramsToOptions params.rule // {
+      enable = mkEnableOption
+        ("the Thanos ruler service which evaluates Prometheus rules against" +
+        " given Query nodes, exposing Store API and storing old blocks in bucket");
+      arguments = mkArgumentsOption "rule";
+    };
+
+    compact = paramsToOptions params.compact // {
+      enable = mkEnableOption
+        "the Thanos compactor which continuously compacts blocks in an object store bucket";
+      arguments = mkArgumentsOption "compact";
+    };
+
+    downsample = paramsToOptions params.downsample // {
+      enable = mkEnableOption
+        "the Thanos downsampler which continuously downsamples blocks in an object store bucket";
+      arguments = mkArgumentsOption "downsample";
+    };
+
+    receive = paramsToOptions params.receive // {
+      enable = mkEnableOption
+        ("the Thanos receiver which accept Prometheus remote write API requests " +
+         "and write to local tsdb (EXPERIMENTAL, this may change drastically without notice)");
+      arguments = mkArgumentsOption "receive";
+    };
+  };
+
+  config = mkMerge [
+
+    (mkIf cfg.sidecar.enable {
+      assertions = [
+        {
+          assertion = config.services.prometheus.enable;
+          message =
+            "Please enable services.prometheus when enabling services.thanos.sidecar.";
+        }
+        {
+          assertion = !(config.services.prometheus.globalConfig.external_labels == null ||
+                        config.services.prometheus.globalConfig.external_labels == {});
+          message =
+            "services.thanos.sidecar requires uniquely identifying external labels " +
+            "to be configured in the Prometheus server. " +
+            "Please set services.prometheus.globalConfig.external_labels.";
+        }
+      ];
+      systemd.services.thanos-sidecar = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network.target" "prometheus.service" ];
+        serviceConfig = {
+          User = "prometheus";
+          Restart = "always";
+          ExecStart = thanos "sidecar";
+        };
+      };
+    })
+
+    (mkIf cfg.store.enable (mkMerge [
+      (assertRelativeStateDir "store")
+      {
+        systemd.services.thanos-store = {
+          wantedBy = [ "multi-user.target" ];
+          after    = [ "network.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            StateDirectory = cfg.store.stateDir;
+            Restart = "always";
+            ExecStart = thanos "store";
+          };
+        };
+      }
+    ]))
+
+    (mkIf cfg.query.enable {
+      systemd.services.thanos-query = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network.target" ];
+        serviceConfig = {
+          DynamicUser = true;
+          Restart = "always";
+          ExecStart = thanos "query";
+        };
+      };
+    })
+
+    (mkIf cfg.rule.enable (mkMerge [
+      (assertRelativeStateDir "rule")
+      {
+        systemd.services.thanos-rule = {
+          wantedBy = [ "multi-user.target" ];
+          after    = [ "network.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            StateDirectory = cfg.rule.stateDir;
+            Restart = "always";
+            ExecStart = thanos "rule";
+          };
+        };
+      }
+    ]))
+
+    (mkIf cfg.compact.enable (mkMerge [
+      (assertRelativeStateDir "compact")
+      {
+        systemd.services.thanos-compact =
+          let wait = cfg.compact.startAt == null; in {
+            wantedBy = [ "multi-user.target" ];
+            after    = [ "network.target" ];
+            serviceConfig = {
+              Type    = if wait then "simple" else "oneshot";
+              Restart = if wait then "always" else "no";
+              DynamicUser = true;
+              StateDirectory = cfg.compact.stateDir;
+              ExecStart = thanos "compact";
+            };
+          } // optionalAttrs (!wait) { inherit (cfg.compact) startAt; };
+      }
+    ]))
+
+    (mkIf cfg.downsample.enable (mkMerge [
+      (assertRelativeStateDir "downsample")
+      {
+        systemd.services.thanos-downsample = {
+          wantedBy = [ "multi-user.target" ];
+          after    = [ "network.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            StateDirectory = cfg.downsample.stateDir;
+            Restart = "always";
+            ExecStart = thanos "downsample";
+          };
+        };
+      }
+    ]))
+
+    (mkIf cfg.receive.enable (mkMerge [
+      (assertRelativeStateDir "receive")
+      {
+        systemd.services.thanos-receive = {
+          wantedBy = [ "multi-user.target" ];
+          after    = [ "network.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            StateDirectory = cfg.receive.stateDir;
+            Restart = "always";
+            ExecStart = thanos "receive";
+          };
+        };
+      }
+    ]))
+
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/tuptime.nix b/nixpkgs/nixos/modules/services/monitoring/tuptime.nix
new file mode 100644
index 000000000000..731260a5c20a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/tuptime.nix
@@ -0,0 +1,84 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.tuptime;
+
+in {
+
+  options.services.tuptime = {
+
+    enable = mkEnableOption "the total uptime service";
+
+    timer = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = "Whether to regularly log uptime to detect bad shutdowns.";
+      };
+
+      period = mkOption {
+        type = types.str;
+        default = "*:0/5";
+        description = "systemd calendar event";
+      };
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.tuptime ];
+
+    users.users.tuptime.description = "tuptime database owner";
+
+    systemd = {
+      services = {
+
+        tuptime = {
+          description = "the total uptime service";
+          documentation = [ "man:tuptime(1)" ];
+          after = [ "time-sync.target" ];
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            StateDirectory = "tuptime";
+            Type = "oneshot";
+            User = "tuptime";
+            RemainAfterExit = true;
+            ExecStart = "${pkgs.tuptime}/bin/tuptime -x";
+            ExecStop = "${pkgs.tuptime}/bin/tuptime -xg";
+          };
+        };
+
+        tuptime-oneshot = mkIf cfg.timer.enable {
+          description = "the tuptime scheduled execution unit";
+          serviceConfig = {
+            StateDirectory = "tuptime";
+            Type = "oneshot";
+            User = "tuptime";
+            ExecStart = "${pkgs.tuptime}/bin/tuptime -x";
+          };
+        };
+      };
+
+      timers.tuptime = mkIf cfg.timer.enable {
+        description = "the tuptime scheduled execution timer";
+        # this timer should be started if the service is started
+        # even if the timer was previously stopped
+        wantedBy = [ "tuptime.service" "timers.target" ];
+        # this timer should be stopped if the service is stopped
+        partOf = [ "tuptime.service" ];
+        timerConfig = {
+          OnBootSec = "1min";
+          OnCalendar = cfg.timer.period;
+          Unit = "tuptime-oneshot.service";
+        };
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.evils ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/ups.nix b/nixpkgs/nixos/modules/services/monitoring/ups.nix
new file mode 100644
index 000000000000..a45e806d4ad8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/ups.nix
@@ -0,0 +1,263 @@
+{ config, lib, pkgs, ... }:
+
+# TODO: This is not secure, have a look at the file docs/security.txt inside
+# the project sources.
+with lib;
+
+let
+  cfg = config.power.ups;
+in
+
+let
+  upsOptions = {name, config, ...}:
+  {
+    options = {
+      # This can be infered from the UPS model by looking at
+      # /nix/store/nut/share/driver.list
+      driver = mkOption {
+        type = types.str;
+        description = ''
+          Specify the program to run to talk to this UPS.  apcsmart,
+          bestups, and sec are some examples.
+        '';
+      };
+
+      port = mkOption {
+        type = types.str;
+        description = ''
+          The serial port to which your UPS is connected.  /dev/ttyS0 is
+          usually the first port on Linux boxes, for example.
+        '';
+      };
+
+      shutdownOrder = mkOption {
+        default = 0;
+        type = types.int;
+        description = ''
+          When you have multiple UPSes on your system, you usually need to
+          turn them off in a certain order.  upsdrvctl shuts down all the
+          0s, then the 1s, 2s, and so on.  To exclude a UPS from the
+          shutdown sequence, set this to -1.
+        '';
+      };
+
+      maxStartDelay = mkOption {
+        default = null;
+        type = types.uniq (types.nullOr types.int);
+        description = ''
+          This can be set as a global variable above your first UPS
+          definition and it can also be set in a UPS section.  This value
+          controls how long upsdrvctl will wait for the driver to finish
+          starting.  This keeps your system from getting stuck due to a
+          broken driver or UPS.
+        '';
+      };
+
+      description = mkOption {
+        default = "";
+        type = types.str;
+        description = ''
+          Description of the UPS.
+        '';
+      };
+
+      directives = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = ''
+          List of configuration directives for this UPS.
+        '';
+      };
+
+      summary = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Lines which would be added inside ups.conf for handling this UPS.
+        '';
+      };
+
+    };
+
+    config = {
+      directives = mkOrder 10 ([
+        "driver = ${config.driver}"
+        "port = ${config.port}"
+        ''desc = "${config.description}"''
+        "sdorder = ${toString config.shutdownOrder}"
+      ] ++ (optional (config.maxStartDelay != null)
+            "maxstartdelay = ${toString config.maxStartDelay}")
+      );
+
+      summary =
+        concatStringsSep "\n      "
+          (["[${name}]"] ++ config.directives);
+    };
+  };
+
+in
+
+
+{
+  options = {
+    # powerManagement.powerDownCommands
+
+    power.ups = {
+      enable = mkOption {
+        default = false;
+        type = with types; bool;
+        description = ''
+          Enables support for Power Devices, such as Uninterruptible Power
+          Supplies, Power Distribution Units and Solar Controllers.
+        '';
+      };
+
+      # This option is not used yet.
+      mode = mkOption {
+        default = "standalone";
+        type = types.str;
+        description = ''
+          The MODE determines which part of the NUT is to be started, and
+          which configuration files must be modified.
+
+          The values of MODE can be:
+
+          - none: NUT is not configured, or use the Integrated Power
+            Management, or use some external system to startup NUT
+            components. So nothing is to be started.
+
+          - standalone: This mode address a local only configuration, with 1
+            UPS protecting the local system. This implies to start the 3 NUT
+            layers (driver, upsd and upsmon) and the matching configuration
+            files. This mode can also address UPS redundancy.
+
+          - netserver: same as for the standalone configuration, but also
+            need some more ACLs and possibly a specific LISTEN directive in
+            upsd.conf.  Since this MODE is opened to the network, a special
+            care should be applied to security concerns.
+
+          - netclient: this mode only requires upsmon.
+        '';
+      };
+
+      schedulerRules = mkOption {
+        example = "/etc/nixos/upssched.conf";
+        type = types.str;
+        description = ''
+          File which contains the rules to handle UPS events.
+        '';
+      };
+
+
+      maxStartDelay = mkOption {
+        default = 45;
+        type = types.int;
+        description = ''
+          This can be set as a global variable above your first UPS
+          definition and it can also be set in a UPS section.  This value
+          controls how long upsdrvctl will wait for the driver to finish
+          starting.  This keeps your system from getting stuck due to a
+          broken driver or UPS.
+        '';
+      };
+
+      ups = mkOption {
+        default = {};
+        # see nut/etc/ups.conf.sample
+        description = ''
+          This is where you configure all the UPSes that this system will be
+          monitoring directly.  These are usually attached to serial ports,
+          but USB devices are also supported.
+        '';
+        type = with types; attrsOf (submodule upsOptions);
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.nut ];
+
+    systemd.services.upsmon = {
+      description = "Uninterruptible Power Supplies (Monitor)";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.Type = "forking";
+      script = "${pkgs.nut}/sbin/upsmon";
+      environment.NUT_CONFPATH = "/etc/nut/";
+      environment.NUT_STATEPATH = "/var/lib/nut/";
+    };
+
+    systemd.services.upsd = {
+      description = "Uninterruptible Power Supplies (Daemon)";
+      after = [ "network.target" "upsmon.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.Type = "forking";
+      # TODO: replace 'root' by another username.
+      script = "${pkgs.nut}/sbin/upsd -u root";
+      environment.NUT_CONFPATH = "/etc/nut/";
+      environment.NUT_STATEPATH = "/var/lib/nut/";
+    };
+
+    systemd.services.upsdrv = {
+      description = "Uninterruptible Power Supplies (Register all UPS)";
+      after = [ "upsd.service" ];
+      wantedBy = [ "multi-user.target" ];
+      # TODO: replace 'root' by another username.
+      script = ''${pkgs.nut}/bin/upsdrvctl -u root start'';
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+      environment.NUT_CONFPATH = "/etc/nut/";
+      environment.NUT_STATEPATH = "/var/lib/nut/";
+    };
+
+    environment.etc = {
+      "nut/nut.conf".source = pkgs.writeText "nut.conf"
+        ''
+          MODE = ${cfg.mode}
+        '';
+      "nut/ups.conf".source = pkgs.writeText "ups.conf"
+        ''
+          maxstartdelay = ${toString cfg.maxStartDelay}
+
+          ${flip concatStringsSep (forEach (attrValues cfg.ups) (ups: ups.summary)) "
+
+          "}
+        '';
+      "nut/upssched.conf".source = cfg.schedulerRules;
+      # These file are containing private informations and thus should not
+      # be stored inside the Nix store.
+      /*
+      "nut/upsd.conf".source = "";
+      "nut/upsd.users".source = "";
+      "nut/upsmon.conf".source = "";
+      */
+    };
+
+    power.ups.schedulerRules = mkDefault "${pkgs.nut}/etc/upssched.conf.sample";
+
+    system.activationScripts.upsSetup = stringAfter [ "users" "groups" ]
+      ''
+        # Used to store pid files of drivers.
+        mkdir -p /var/state/ups
+      '';
+
+
+/*
+    users.users.nut =
+      { uid = 84;
+        home = "/var/lib/nut";
+        createHome = true;
+        group = "nut";
+        description = "UPnP A/V Media Server user";
+      };
+
+    users.groups."nut" =
+      { gid = 84; };
+*/
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/uptime.nix b/nixpkgs/nixos/modules/services/monitoring/uptime.nix
new file mode 100644
index 000000000000..245badc3e44f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/uptime.nix
@@ -0,0 +1,96 @@
+{ config, pkgs, lib, ... }:
+let
+  inherit (lib) mkOption mkEnableOption mkIf mkMerge types optional;
+
+  cfg = config.services.uptime;
+
+  configDir = pkgs.runCommand "config" { preferLocalBuild = true; }
+  (if cfg.configFile != null then ''
+    mkdir $out
+    ext=`echo ${cfg.configFile} | grep -o \\..*`
+    ln -sv ${cfg.configFile} $out/default$ext
+    ln -sv /var/lib/uptime/runtime.json $out/runtime.json
+  '' else ''
+    mkdir $out
+    cat ${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/config/default.yaml > $out/default.yaml
+    cat >> $out/default.yaml <<EOF
+
+    autoStartMonitor: false
+
+    mongodb:
+      connectionString: 'mongodb://localhost/uptime'
+    EOF
+    ln -sv /var/lib/uptime/runtime.json $out/runtime.json
+  '');
+in {
+  options.services.uptime = {
+    configFile = mkOption {
+      description = ''
+        The uptime configuration file
+
+        If mongodb: server != localhost, please set usesRemoteMongo = true
+
+        If you only want to run the monitor, please set enableWebService = false
+        and enableSeparateMonitoringService = true
+
+        If autoStartMonitor: false (recommended) and you want to run both
+        services, please set enableSeparateMonitoringService = true
+      '';
+
+      type = types.nullOr types.path;
+
+      default = null;
+    };
+
+    usesRemoteMongo = mkOption {
+      description = "Whether the configuration file specifies a remote mongo instance";
+
+      default = false;
+
+      type = types.bool;
+    };
+
+    enableWebService = mkEnableOption "the uptime monitoring program web service";
+
+    enableSeparateMonitoringService = mkEnableOption "the uptime monitoring service" // { default = cfg.enableWebService; };
+
+    nodeEnv = mkOption {
+      description = "The node environment to run in (development, production, etc.)";
+
+      type = types.str;
+
+      default = "production";
+    };
+  };
+
+  config = mkMerge [ (mkIf cfg.enableWebService {
+    systemd.services.uptime = {
+      description = "uptime web service";
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        NODE_CONFIG_DIR = configDir;
+        NODE_ENV = cfg.nodeEnv;
+        NODE_PATH = "${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/node_modules";
+      };
+      preStart = "mkdir -p /var/lib/uptime";
+      serviceConfig.ExecStart = "${pkgs.nodejs}/bin/node ${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/app.js";
+    };
+
+    services.mongodb.enable = mkIf (!cfg.usesRemoteMongo) true;
+  }) (mkIf cfg.enableSeparateMonitoringService {
+    systemd.services.uptime-monitor = {
+      description = "uptime monitoring service";
+      wantedBy = [ "multi-user.target" ];
+      requires = optional cfg.enableWebService "uptime.service";
+      after = optional cfg.enableWebService "uptime.service";
+      environment = {
+        NODE_CONFIG_DIR = configDir;
+        NODE_ENV = cfg.nodeEnv;
+        NODE_PATH = "${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/node_modules";
+      };
+      # Ugh, need to wait for web service to be up
+      preStart = if cfg.enableWebService then "sleep 1s" else "mkdir -p /var/lib/uptime";
+      serviceConfig.ExecStart = "${pkgs.nodejs}/bin/node ${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/monitor.js";
+    };
+  }) ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/vnstat.nix b/nixpkgs/nixos/modules/services/monitoring/vnstat.nix
new file mode 100644
index 000000000000..e9bedb704a43
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/vnstat.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.vnstat;
+in {
+  options.services.vnstat = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to enable update of network usage statistics via vnstatd.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.vnstatd = {
+      isSystemUser = true;
+      description = "vnstat daemon user";
+      home = "/var/lib/vnstat";
+      createHome = true;
+    };
+
+    systemd.services.vnstat = {
+      description = "vnStat network traffic monitor";
+      path = [ pkgs.coreutils ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      documentation = [
+        "man:vnstatd(1)"
+        "man:vnstat(1)"
+        "man:vnstat.conf(5)"
+      ];
+      preStart = "chmod 755 /var/lib/vnstat";
+      serviceConfig = {
+        ExecStart = "${pkgs.vnstat}/bin/vnstatd -n";
+        ExecReload = "${pkgs.procps}/bin/kill -HUP $MAINPID";
+
+        # Hardening (from upstream example service)
+        ProtectSystem = "strict";
+        StateDirectory = "vnstat";
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectKernelModules = true;
+        PrivateTmp = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+
+        User = "vnstatd";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix
new file mode 100644
index 000000000000..b3383ed628b2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix
@@ -0,0 +1,159 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.zabbixAgent;
+
+  inherit (lib) mkDefault mkEnableOption mkIf mkOption;
+  inherit (lib) attrValues concatMapStringsSep literalExample optionalString types;
+
+  user = "zabbix-agent";
+  group = "zabbix-agent";
+
+  moduleEnv = pkgs.symlinkJoin {
+    name = "zabbix-agent-module-env";
+    paths = attrValues cfg.modules;
+  };
+
+  configFile = pkgs.writeText "zabbix_agent.conf" ''
+    LogType = console
+    Server = ${cfg.server}
+    ListenIP = ${cfg.listen.ip}
+    ListenPort = ${toString cfg.listen.port}
+    ${optionalString (cfg.modules != {}) "LoadModulePath = ${moduleEnv}/lib"}
+    ${concatMapStringsSep "\n" (name: "LoadModule = ${name}") (builtins.attrNames cfg.modules)}
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+  # interface
+
+  options = {
+
+    services.zabbixAgent = {
+      enable = mkEnableOption "the Zabbix Agent";
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.zabbix.agent;
+        defaultText = "pkgs.zabbix.agent";
+        description = "The Zabbix package to use.";
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = with pkgs; [ nettools ];
+        defaultText = "[ nettools ]";
+        example = "[ nettools mysql ]";
+        description = ''
+          Packages to be added to the Zabbix <envar>PATH</envar>.
+          Typically used to add executables for scripts, but can be anything.
+        '';
+      };
+
+      modules = mkOption {
+        type = types.attrsOf types.package;
+        description = "A set of modules to load.";
+        default = {};
+        example = literalExample ''
+          {
+            "dummy.so" = pkgs.stdenv.mkDerivation {
+              name = "zabbix-dummy-module-''${cfg.package.version}";
+              src = cfg.package.src;
+              buildInputs = [ cfg.package ];
+              sourceRoot = "zabbix-''${cfg.package.version}/src/modules/dummy";
+              installPhase = '''
+                mkdir -p $out/lib
+                cp dummy.so $out/lib/
+              ''';
+            };
+          }
+        '';
+      };
+
+      server = mkOption {
+        type = types.str;
+        description = ''
+          The IP address or hostname of the Zabbix server to connect to.
+        '';
+      };
+
+      listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = ''
+            List of comma delimited IP addresses that the agent should listen on.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 10050;
+          description = ''
+            Agent will listen on this port for connections from the server.
+          '';
+        };
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Open ports in the firewall for the Zabbix Agent.
+        '';
+      };
+
+      # TODO: for bonus points migrate this to https://github.com/NixOS/rfcs/pull/42
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Configuration that is injected verbatim into the configuration file. Refer to
+          <link xlink:href="https://www.zabbix.com/documentation/current/manual/appendix/config/zabbix_agentd"/>
+          for details on supported values.
+        '';
+      };
+
+    };
+
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ];
+    };
+
+    users.users.${user} = {
+      description = "Zabbix Agent daemon user";
+      inherit group;
+      isSystemUser = true;
+    };
+
+    users.groups.${group} = { };
+
+    systemd.services.zabbix-agent = {
+      description = "Zabbix Agent";
+
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ "/run/wrappers" ] ++ cfg.extraPackages;
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/sbin/zabbix_agentd zabbix_agentd -f --config ${configFile}";
+        Restart = "always";
+        RestartSec = 2;
+
+        User = user;
+        Group = group;
+        PrivateTmp = true;
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-proxy.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-proxy.nix
new file mode 100644
index 000000000000..9d214469c3b3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-proxy.nix
@@ -0,0 +1,299 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.zabbixProxy;
+  pgsql = config.services.postgresql;
+  mysql = config.services.mysql;
+
+  inherit (lib) mkDefault mkEnableOption mkIf mkOption;
+  inherit (lib) attrValues concatMapStringsSep literalExample optional optionalAttrs optionalString types;
+
+  user = "zabbix";
+  group = "zabbix";
+  runtimeDir = "/run/zabbix";
+  stateDir = "/var/lib/zabbix";
+  passwordFile = "${runtimeDir}/zabbix-dbpassword.conf";
+
+  moduleEnv = pkgs.symlinkJoin {
+    name = "zabbix-proxy-module-env";
+    paths = attrValues cfg.modules;
+  };
+
+  configFile = pkgs.writeText "zabbix_proxy.conf" ''
+    LogType = console
+    ListenIP = ${cfg.listen.ip}
+    ListenPort = ${toString cfg.listen.port}
+    Server = ${cfg.server}
+    # TODO: set to cfg.database.socket if database type is pgsql?
+    DBHost = ${optionalString (cfg.database.createLocally != true) cfg.database.host}
+    ${optionalString (cfg.database.createLocally != true) "DBPort = ${cfg.database.port}"}
+    DBName = ${cfg.database.name}
+    DBUser = ${cfg.database.user}
+    ${optionalString (cfg.database.passwordFile != null) "Include ${passwordFile}"}
+    ${optionalString (mysqlLocal && cfg.database.socket != null) "DBSocket = ${cfg.database.socket}"}
+    SocketDir = ${runtimeDir}
+    FpingLocation = /run/wrappers/bin/fping
+    ${optionalString (cfg.modules != {}) "LoadModulePath = ${moduleEnv}/lib"}
+    ${concatMapStringsSep "\n" (name: "LoadModule = ${name}") (builtins.attrNames cfg.modules)}
+    ${cfg.extraConfig}
+  '';
+
+  mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql";
+  pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql";
+
+in
+
+{
+  # interface
+
+  options = {
+
+    services.zabbixProxy = {
+      enable = mkEnableOption "the Zabbix Proxy";
+
+      server = mkOption {
+        type = types.str;
+        description = ''
+          The IP address or hostname of the Zabbix server to connect to.
+          '';
+        };
+
+      package = mkOption {
+        type = types.package;
+        default =
+          if cfg.database.type == "mysql" then pkgs.zabbix.proxy-mysql
+          else if cfg.database.type == "pgsql" then pkgs.zabbix.proxy-pgsql
+          else pkgs.zabbix.proxy-sqlite;
+        defaultText = "pkgs.zabbix.proxy-pgsql";
+        description = "The Zabbix package to use.";
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = with pkgs; [ nettools nmap traceroute ];
+        defaultText = "[ nettools nmap traceroute ]";
+        description = ''
+          Packages to be added to the Zabbix <envar>PATH</envar>.
+          Typically used to add executables for scripts, but can be anything.
+        '';
+      };
+
+      modules = mkOption {
+        type = types.attrsOf types.package;
+        description = "A set of modules to load.";
+        default = {};
+        example = literalExample ''
+          {
+            "dummy.so" = pkgs.stdenv.mkDerivation {
+              name = "zabbix-dummy-module-''${cfg.package.version}";
+              src = cfg.package.src;
+              buildInputs = [ cfg.package ];
+              sourceRoot = "zabbix-''${cfg.package.version}/src/modules/dummy";
+              installPhase = '''
+                mkdir -p $out/lib
+                cp dummy.so $out/lib/
+              ''';
+            };
+          }
+        '';
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "mysql" "pgsql" "sqlite" ];
+          example = "mysql";
+          default = "pgsql";
+          description = "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.int;
+          default = if cfg.database.type == "mysql" then mysql.port else pgsql.port;
+          description = "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = if cfg.database.type == "sqlite" then "${stateDir}/zabbix.db" else "zabbix";
+          defaultText = "zabbix";
+          description = "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "zabbix";
+          description = "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/zabbix-dbpassword";
+          description = ''
+            A file containing the password corresponding to
+            <option>database.user</option>.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/postgresql";
+          description = "Path to the unix socket file to use for authentication.";
+        };
+
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = "Whether to create a local database automatically.";
+        };
+      };
+
+      listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = ''
+            List of comma delimited IP addresses that the trapper should listen on.
+            Trapper will listen on all network interfaces if this parameter is missing.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 10051;
+          description = ''
+            Listen port for trapper.
+          '';
+        };
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Open ports in the firewall for the Zabbix Proxy.
+        '';
+      };
+
+      # TODO: for bonus points migrate this to https://github.com/NixOS/rfcs/pull/42
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Configuration that is injected verbatim into the configuration file. Refer to
+          <link xlink:href="https://www.zabbix.com/documentation/current/manual/appendix/config/zabbix_proxy"/>
+          for details on supported values.
+        '';
+      };
+
+    };
+
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = !config.services.zabbixServer.enable;
+        message = "Please choose one of services.zabbixServer or services.zabbixProxy.";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.user == user;
+        message = "services.zabbixProxy.database.user must be set to ${user} if services.zabbixProxy.database.createLocally is set true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "a password cannot be specified if services.zabbixProxy.database.createLocally is set to true";
+      }
+    ];
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ];
+    };
+
+    services.mysql = optionalAttrs mysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    services.postgresql = optionalAttrs pgsqlLocal {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    users.users.${user} = {
+      description = "Zabbix daemon user";
+      uid = config.ids.uids.zabbix;
+      inherit group;
+    };
+
+    users.groups.${group} = {
+      gid = config.ids.gids.zabbix;
+    };
+
+    security.wrappers = {
+      fping.source = "${pkgs.fping}/bin/fping";
+    };
+
+    systemd.services.zabbix-proxy = {
+      description = "Zabbix Proxy";
+
+      wantedBy = [ "multi-user.target" ];
+      after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+
+      path = [ "/run/wrappers" ] ++ cfg.extraPackages;
+      preStart = optionalString pgsqlLocal ''
+        if ! test -e "${stateDir}/db-created"; then
+          cat ${cfg.package}/share/zabbix/database/postgresql/schema.sql | ${pgsql.package}/bin/psql ${cfg.database.name}
+          touch "${stateDir}/db-created"
+        fi
+      '' + optionalString mysqlLocal ''
+        if ! test -e "${stateDir}/db-created"; then
+          cat ${cfg.package}/share/zabbix/database/mysql/schema.sql | ${mysql.package}/bin/mysql ${cfg.database.name}
+          touch "${stateDir}/db-created"
+        fi
+      '' + optionalString (cfg.database.type == "sqlite") ''
+        if ! test -e "${cfg.database.name}"; then
+          ${pkgs.sqlite}/bin/sqlite3 "${cfg.database.name}" < ${cfg.package}/share/zabbix/database/sqlite3/schema.sql
+        fi
+      '' + optionalString (cfg.database.passwordFile != null) ''
+        # create a copy of the supplied password file in a format zabbix can consume
+        touch ${passwordFile}
+        chmod 0600 ${passwordFile}
+        echo -n "DBPassword = " > ${passwordFile}
+        cat ${cfg.database.passwordFile} >> ${passwordFile}
+      '';
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/sbin/zabbix_proxy zabbix_proxy -f --config ${configFile}";
+        Restart = "always";
+        RestartSec = 2;
+
+        User = user;
+        Group = group;
+        RuntimeDirectory = "zabbix";
+        StateDirectory = "zabbix";
+        PrivateTmp = true;
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix
new file mode 100644
index 000000000000..b4e4378ce1e7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix
@@ -0,0 +1,299 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.zabbixServer;
+  pgsql = config.services.postgresql;
+  mysql = config.services.mysql;
+
+  inherit (lib) mkDefault mkEnableOption mkIf mkOption;
+  inherit (lib) attrValues concatMapStringsSep literalExample optional optionalAttrs optionalString types;
+
+  user = "zabbix";
+  group = "zabbix";
+  runtimeDir = "/run/zabbix";
+  stateDir = "/var/lib/zabbix";
+  passwordFile = "${runtimeDir}/zabbix-dbpassword.conf";
+
+  moduleEnv = pkgs.symlinkJoin {
+    name = "zabbix-server-module-env";
+    paths = attrValues cfg.modules;
+  };
+
+  configFile = pkgs.writeText "zabbix_server.conf" ''
+    LogType = console
+    ListenIP = ${cfg.listen.ip}
+    ListenPort = ${toString cfg.listen.port}
+    # TODO: set to cfg.database.socket if database type is pgsql?
+    DBHost = ${optionalString (cfg.database.createLocally != true) cfg.database.host}
+    ${optionalString (cfg.database.createLocally != true) "DBPort = ${cfg.database.port}"}
+    DBName = ${cfg.database.name}
+    DBUser = ${cfg.database.user}
+    ${optionalString (cfg.database.passwordFile != null) "Include ${passwordFile}"}
+    ${optionalString (mysqlLocal && cfg.database.socket != null) "DBSocket = ${cfg.database.socket}"}
+    PidFile = ${runtimeDir}/zabbix_server.pid
+    SocketDir = ${runtimeDir}
+    FpingLocation = /run/wrappers/bin/fping
+    ${optionalString (cfg.modules != {}) "LoadModulePath = ${moduleEnv}/lib"}
+    ${concatMapStringsSep "\n" (name: "LoadModule = ${name}") (builtins.attrNames cfg.modules)}
+    ${cfg.extraConfig}
+  '';
+
+  mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql";
+  pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql";
+
+in
+
+{
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "zabbixServer" "dbServer" ] [ "services" "zabbixServer" "database" "host" ])
+    (lib.mkRemovedOptionModule [ "services" "zabbixServer" "dbPassword" ] "Use services.zabbixServer.database.passwordFile instead.")
+  ];
+
+  # interface
+
+  options = {
+
+    services.zabbixServer = {
+      enable = mkEnableOption "the Zabbix Server";
+
+      package = mkOption {
+        type = types.package;
+        default = if cfg.database.type == "mysql" then pkgs.zabbix.server-mysql else pkgs.zabbix.server-pgsql;
+        defaultText = "pkgs.zabbix.server-pgsql";
+        description = "The Zabbix package to use.";
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = with pkgs; [ nettools nmap traceroute ];
+        defaultText = "[ nettools nmap traceroute ]";
+        description = ''
+          Packages to be added to the Zabbix <envar>PATH</envar>.
+          Typically used to add executables for scripts, but can be anything.
+        '';
+      };
+
+      modules = mkOption {
+        type = types.attrsOf types.package;
+        description = "A set of modules to load.";
+        default = {};
+        example = literalExample ''
+          {
+            "dummy.so" = pkgs.stdenv.mkDerivation {
+              name = "zabbix-dummy-module-''${cfg.package.version}";
+              src = cfg.package.src;
+              buildInputs = [ cfg.package ];
+              sourceRoot = "zabbix-''${cfg.package.version}/src/modules/dummy";
+              installPhase = '''
+                mkdir -p $out/lib
+                cp dummy.so $out/lib/
+              ''';
+            };
+          }
+        '';
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "mysql" "pgsql" ];
+          example = "mysql";
+          default = "pgsql";
+          description = "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.int;
+          default = if cfg.database.type == "mysql" then mysql.port else pgsql.port;
+          description = "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "zabbix";
+          description = "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "zabbix";
+          description = "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/zabbix-dbpassword";
+          description = ''
+            A file containing the password corresponding to
+            <option>database.user</option>.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/postgresql";
+          description = "Path to the unix socket file to use for authentication.";
+        };
+
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = "Whether to create a local database automatically.";
+        };
+      };
+
+      listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = ''
+            List of comma delimited IP addresses that the trapper should listen on.
+            Trapper will listen on all network interfaces if this parameter is missing.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 10051;
+          description = ''
+            Listen port for trapper.
+          '';
+        };
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Open ports in the firewall for the Zabbix Server.
+        '';
+      };
+
+      # TODO: for bonus points migrate this to https://github.com/NixOS/rfcs/pull/42
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Configuration that is injected verbatim into the configuration file. Refer to
+          <link xlink:href="https://www.zabbix.com/documentation/current/manual/appendix/config/zabbix_server"/>
+          for details on supported values.
+        '';
+      };
+
+    };
+
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.database.createLocally -> cfg.database.user == user;
+        message = "services.zabbixServer.database.user must be set to ${user} if services.zabbixServer.database.createLocally is set true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "a password cannot be specified if services.zabbixServer.database.createLocally is set to true";
+      }
+    ];
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ];
+    };
+
+    services.mysql = optionalAttrs mysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    services.postgresql = optionalAttrs pgsqlLocal {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    users.users.${user} = {
+      description = "Zabbix daemon user";
+      uid = config.ids.uids.zabbix;
+      inherit group;
+    };
+
+    users.groups.${group} = {
+      gid = config.ids.gids.zabbix;
+    };
+
+    security.wrappers = {
+      fping.source = "${pkgs.fping}/bin/fping";
+    };
+
+    systemd.services.zabbix-server = {
+      description = "Zabbix Server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+
+      path = [ "/run/wrappers" ] ++ cfg.extraPackages;
+      preStart = ''
+        # pre 19.09 compatibility
+        if test -e "${runtimeDir}/db-created"; then
+          mv "${runtimeDir}/db-created" "${stateDir}/"
+        fi
+      '' + optionalString pgsqlLocal ''
+        if ! test -e "${stateDir}/db-created"; then
+          cat ${cfg.package}/share/zabbix/database/postgresql/schema.sql | ${pgsql.package}/bin/psql ${cfg.database.name}
+          cat ${cfg.package}/share/zabbix/database/postgresql/images.sql | ${pgsql.package}/bin/psql ${cfg.database.name}
+          cat ${cfg.package}/share/zabbix/database/postgresql/data.sql | ${pgsql.package}/bin/psql ${cfg.database.name}
+          touch "${stateDir}/db-created"
+        fi
+      '' + optionalString mysqlLocal ''
+        if ! test -e "${stateDir}/db-created"; then
+          cat ${cfg.package}/share/zabbix/database/mysql/schema.sql | ${mysql.package}/bin/mysql ${cfg.database.name}
+          cat ${cfg.package}/share/zabbix/database/mysql/images.sql | ${mysql.package}/bin/mysql ${cfg.database.name}
+          cat ${cfg.package}/share/zabbix/database/mysql/data.sql | ${mysql.package}/bin/mysql ${cfg.database.name}
+          touch "${stateDir}/db-created"
+        fi
+      '' + optionalString (cfg.database.passwordFile != null) ''
+        # create a copy of the supplied password file in a format zabbix can consume
+        touch ${passwordFile}
+        chmod 0600 ${passwordFile}
+        echo -n "DBPassword = " > ${passwordFile}
+        cat ${cfg.database.passwordFile} >> ${passwordFile}
+      '';
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/sbin/zabbix_server zabbix_server -f --config ${configFile}";
+        Restart = "always";
+        RestartSec = 2;
+
+        User = user;
+        Group = group;
+        RuntimeDirectory = "zabbix";
+        StateDirectory = "zabbix";
+        PrivateTmp = true;
+      };
+    };
+
+    systemd.services.httpd.after =
+      optional (config.services.zabbixWeb.enable && mysqlLocal) "mysql.service" ++
+      optional (config.services.zabbixWeb.enable && pgsqlLocal) "postgresql.service";
+
+  };
+
+}