diff options
Diffstat (limited to 'nixpkgs/nixos/modules/services/monitoring')
59 files changed, 7336 insertions, 0 deletions
diff --git a/nixpkgs/nixos/modules/services/monitoring/alerta.nix b/nixpkgs/nixos/modules/services/monitoring/alerta.nix new file mode 100644 index 000000000000..8f4258e26ded --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/alerta.nix @@ -0,0 +1,116 @@ +{ options, config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.alerta; + + alertaConf = pkgs.writeTextFile { + name = "alertad.conf"; + text = '' + DATABASE_URL = '${cfg.databaseUrl}' + DATABASE_NAME = '${cfg.databaseName}' + LOG_FILE = '${cfg.logDir}/alertad.log' + LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + CORS_ORIGINS = [ ${concatMapStringsSep ", " (s: "\"" + s + "\"") cfg.corsOrigins} ]; + AUTH_REQUIRED = ${if cfg.authenticationRequired then "True" else "False"} + SIGNUP_ENABLED = ${if cfg.signupEnabled then "True" else "False"} + ${cfg.extraConfig} + ''; + }; +in +{ + options.services.alerta = { + enable = mkEnableOption "alerta"; + + port = mkOption { + type = types.int; + default = 5000; + description = "Port of Alerta"; + }; + + bind = mkOption { + type = types.str; + default = "0.0.0.0"; + example = literalExample "0.0.0.0"; + description = "Address to bind to. The default is to bind to all addresses"; + }; + + logDir = mkOption { + type = types.path; + description = "Location where the logfiles are stored"; + default = "/var/log/alerta"; + }; + + databaseUrl = mkOption { + type = types.str; + description = "URL of the MongoDB or PostgreSQL database to connect to"; + default = "mongodb://localhost"; + example = "mongodb://localhost"; + }; + + databaseName = mkOption { + type = types.str; + description = "Name of the database instance to connect to"; + default = "monitoring"; + example = "monitoring"; + }; + + corsOrigins = mkOption { + type = types.listOf types.str; + description = "List of URLs that can access the API for Cross-Origin Resource Sharing (CORS)"; + example = [ "http://localhost" "http://localhost:5000" ]; + default = [ "http://localhost" "http://localhost:5000" ]; + }; + + authenticationRequired = mkOption { + type = types.bool; + description = "Whether users must authenticate when using the web UI or command-line tool"; + default = false; + }; + + signupEnabled = mkOption { + type = types.bool; + description = "Whether to prevent sign-up of new users via the web UI"; + default = true; + }; + + extraConfig = mkOption { + description = "These lines go into alertad.conf verbatim."; + default = ""; + type = types.lines; + }; + }; + + config = mkIf cfg.enable { + systemd.services.alerta = { + description = "Alerta Monitoring System"; + wantedBy = [ "multi-user.target" ]; + after = [ "networking.target" ]; + environment = { + ALERTA_SVR_CONF_FILE = alertaConf; + }; + serviceConfig = { + ExecStart = "${pkgs.python36Packages.alerta-server}/bin/alertad run --port ${toString cfg.port} --host ${cfg.bind}"; + User = "alerta"; + Group = "alerta"; + PermissionsStartOnly = true; + }; + preStart = '' + mkdir -p ${cfg.logDir} + chown alerta:alerta ${cfg.logDir} + ''; + }; + + environment.systemPackages = [ pkgs.python36Packages.alerta ]; + + users.users.alerta = { + uid = config.ids.uids.alerta; + description = "Alerta user"; + }; + + users.groups.alerta = { + gid = config.ids.gids.alerta; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/apcupsd.nix b/nixpkgs/nixos/modules/services/monitoring/apcupsd.nix new file mode 100644 index 000000000000..7ee870183cac --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/apcupsd.nix @@ -0,0 +1,191 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.apcupsd; + + configFile = pkgs.writeText "apcupsd.conf" '' + ## apcupsd.conf v1.1 ## + # apcupsd complains if the first line is not like above. + ${cfg.configText} + SCRIPTDIR ${toString scriptDir} + ''; + + # List of events from "man apccontrol" + eventList = [ + "annoyme" + "battattach" + "battdetach" + "changeme" + "commfailure" + "commok" + "doreboot" + "doshutdown" + "emergency" + "failing" + "killpower" + "loadlimit" + "mainsback" + "onbattery" + "offbattery" + "powerout" + "remotedown" + "runlimit" + "timeout" + "startselftest" + "endselftest" + ]; + + shellCmdsForEventScript = eventname: commands: '' + echo "#!${pkgs.runtimeShell}" > "$out/${eventname}" + echo '${commands}' >> "$out/${eventname}" + chmod a+x "$out/${eventname}" + ''; + + eventToShellCmds = event: if builtins.hasAttr event cfg.hooks then (shellCmdsForEventScript event (builtins.getAttr event cfg.hooks)) else ""; + + scriptDir = pkgs.runCommand "apcupsd-scriptdir" {} ('' + mkdir "$out" + # Copy SCRIPTDIR from apcupsd package + cp -r ${pkgs.apcupsd}/etc/apcupsd/* "$out"/ + # Make the files writeable (nix will unset the write bits afterwards) + chmod u+w "$out"/* + # Remove the sample event notification scripts, because they don't work + # anyways (they try to send mail to "root" with the "mail" command) + (cd "$out" && rm changeme commok commfailure onbattery offbattery) + # Remove the sample apcupsd.conf file (we're generating our own) + rm "$out/apcupsd.conf" + # Set the SCRIPTDIR= line in apccontrol to the dir we're creating now + sed -i -e "s|^SCRIPTDIR=.*|SCRIPTDIR=$out|" "$out/apccontrol" + '' + concatStringsSep "\n" (map eventToShellCmds eventList) + + ); + +in + +{ + + ###### interface + + options = { + + services.apcupsd = { + + enable = mkOption { + default = false; + type = types.bool; + description = '' + Whether to enable the APC UPS daemon. apcupsd monitors your UPS and + permits orderly shutdown of your computer in the event of a power + failure. User manual: http://www.apcupsd.com/manual/manual.html. + Note that apcupsd runs as root (to allow shutdown of computer). + You can check the status of your UPS with the "apcaccess" command. + ''; + }; + + configText = mkOption { + default = '' + UPSTYPE usb + NISIP 127.0.0.1 + BATTERYLEVEL 50 + MINUTES 5 + ''; + type = types.string; + description = '' + Contents of the runtime configuration file, apcupsd.conf. The default + settings makes apcupsd autodetect USB UPSes, limit network access to + localhost and shutdown the system when the battery level is below 50 + percent, or when the UPS has calculated that it has 5 minutes or less + of remaining power-on time. See man apcupsd.conf for details. + ''; + }; + + hooks = mkOption { + default = {}; + example = { + doshutdown = ''# shell commands to notify that the computer is shutting down''; + }; + type = types.attrsOf types.string; + description = '' + Each attribute in this option names an apcupsd event and the string + value it contains will be executed in a shell, in response to that + event (prior to the default action). See "man apccontrol" for the + list of events and what they represent. + + A hook script can stop apccontrol from doing its default action by + exiting with value 99. Do not do this unless you know what you're + doing. + ''; + }; + + }; + + }; + + + ###### implementation + + config = mkIf cfg.enable { + + assertions = [ { + assertion = let hooknames = builtins.attrNames cfg.hooks; in all (x: elem x eventList) hooknames; + message = '' + One (or more) attribute names in services.apcupsd.hooks are invalid. + Current attribute names: ${toString (builtins.attrNames cfg.hooks)} + Valid attribute names : ${toString eventList} + ''; + } ]; + + # Give users access to the "apcaccess" tool + environment.systemPackages = [ pkgs.apcupsd ]; + + # NOTE 1: apcupsd runs as root because it needs permission to run + # "shutdown" + # + # NOTE 2: When apcupsd calls "wall", it prints an error because stdout is + # not connected to a tty (it is connected to the journal): + # wall: cannot get tty name: Inappropriate ioctl for device + # The message still gets through. + systemd.services.apcupsd = { + description = "APC UPS Daemon"; + wantedBy = [ "multi-user.target" ]; + preStart = "mkdir -p /run/apcupsd/"; + serviceConfig = { + ExecStart = "${pkgs.apcupsd}/bin/apcupsd -b -f ${configFile} -d1"; + # TODO: When apcupsd has initiated a shutdown, systemd always ends up + # waiting for it to stop ("A stop job is running for UPS daemon"). This + # is weird, because in the journal one can clearly see that apcupsd has + # received the SIGTERM signal and has already quit (or so it seems). + # This reduces the wait time from 90 seconds (default) to just 5. Then + # systemd kills it with SIGKILL. + TimeoutStopSec = 5; + }; + unitConfig.Documentation = "man:apcupsd(8)"; + }; + + # A special service to tell the UPS to power down/hibernate just before the + # computer shuts down. (The UPS has a built in delay before it actually + # shuts off power.) Copied from here: + # http://forums.opensuse.org/english/get-technical-help-here/applications/479499-apcupsd-systemd-killpower-issues.html + systemd.services.apcupsd-killpower = { + description = "APC UPS Kill Power"; + after = [ "shutdown.target" ]; # append umount.target? + before = [ "final.target" ]; + wantedBy = [ "shutdown.target" ]; + unitConfig = { + ConditionPathExists = "/run/apcupsd/powerfail"; + DefaultDependencies = "no"; + }; + serviceConfig = { + Type = "oneshot"; + ExecStart = "${pkgs.apcupsd}/bin/apcupsd --killpower -f ${configFile}"; + TimeoutSec = "infinity"; + StandardOutput = "tty"; + RemainAfterExit = "yes"; + }; + }; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/arbtt.nix b/nixpkgs/nixos/modules/services/monitoring/arbtt.nix new file mode 100644 index 000000000000..b41a3c7b5016 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/arbtt.nix @@ -0,0 +1,63 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.arbtt; +in { + options = { + services.arbtt = { + enable = mkOption { + type = types.bool; + default = false; + description = '' + Enable the arbtt statistics capture service. + ''; + }; + + package = mkOption { + type = types.package; + default = pkgs.haskellPackages.arbtt; + defaultText = "pkgs.haskellPackages.arbtt"; + example = literalExample "pkgs.haskellPackages.arbtt"; + description = '' + The package to use for the arbtt binaries. + ''; + }; + + logFile = mkOption { + type = types.str; + default = "%h/.arbtt/capture.log"; + example = "/home/username/.arbtt-capture.log"; + description = '' + The log file for captured samples. + ''; + }; + + sampleRate = mkOption { + type = types.int; + default = 60; + example = 120; + description = '' + The sampling interval in seconds. + ''; + }; + }; + }; + + config = mkIf cfg.enable { + systemd.user.services.arbtt = { + description = "arbtt statistics capture service"; + wantedBy = [ "graphical-session.target" ]; + partOf = [ "graphical-session.target" ]; + + serviceConfig = { + Type = "simple"; + ExecStart = "${cfg.package}/bin/arbtt-capture --logfile=${cfg.logFile} --sample-rate=${toString cfg.sampleRate}"; + Restart = "always"; + }; + }; + }; + + meta.maintainers = [ maintainers.michaelpj ]; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/bosun.nix b/nixpkgs/nixos/modules/services/monitoring/bosun.nix new file mode 100644 index 000000000000..8bf741adb6e3 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/bosun.nix @@ -0,0 +1,166 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.bosun; + + configFile = pkgs.writeText "bosun.conf" '' + ${optionalString (cfg.opentsdbHost !=null) "tsdbHost = ${cfg.opentsdbHost}"} + ${optionalString (cfg.influxHost !=null) "influxHost = ${cfg.influxHost}"} + httpListen = ${cfg.listenAddress} + stateFile = ${cfg.stateFile} + ledisDir = ${cfg.ledisDir} + checkFrequency = ${cfg.checkFrequency} + + ${cfg.extraConfig} + ''; + +in { + + options = { + + services.bosun = { + + enable = mkOption { + type = types.bool; + default = false; + description = '' + Whether to run bosun. + ''; + }; + + package = mkOption { + type = types.package; + default = pkgs.bosun; + defaultText = "pkgs.bosun"; + example = literalExample "pkgs.bosun"; + description = '' + bosun binary to use. + ''; + }; + + user = mkOption { + type = types.string; + default = "bosun"; + description = '' + User account under which bosun runs. + ''; + }; + + group = mkOption { + type = types.string; + default = "bosun"; + description = '' + Group account under which bosun runs. + ''; + }; + + opentsdbHost = mkOption { + type = types.nullOr types.string; + default = "localhost:4242"; + description = '' + Host and port of the OpenTSDB database that stores bosun data. + To disable opentsdb you can pass null as parameter. + ''; + }; + + influxHost = mkOption { + type = types.nullOr types.string; + default = null; + example = "localhost:8086"; + description = '' + Host and port of the influxdb database. + ''; + }; + + listenAddress = mkOption { + type = types.string; + default = ":8070"; + description = '' + The host address and port that bosun's web interface will listen on. + ''; + }; + + stateFile = mkOption { + type = types.path; + default = "/var/lib/bosun/bosun.state"; + description = '' + Path to bosun's state file. + ''; + }; + + ledisDir = mkOption { + type = types.path; + default = "/var/lib/bosun/ledis_data"; + description = '' + Path to bosun's ledis data dir + ''; + }; + + checkFrequency = mkOption { + type = types.str; + default = "5m"; + description = '' + Bosun's check frequency + ''; + }; + + extraConfig = mkOption { + type = types.lines; + default = ""; + description = '' + Extra configuration options for Bosun. You should describe your + desired templates, alerts, macros, etc through this configuration + option. + + A detailed description of the supported syntax can be found at-spi2-atk + http://bosun.org/configuration.html + ''; + }; + + }; + + }; + + config = mkIf cfg.enable { + + systemd.services.bosun = { + description = "bosun metrics collector (part of Bosun)"; + wantedBy = [ "multi-user.target" ]; + + preStart = '' + mkdir -p "$(dirname "${cfg.stateFile}")"; + touch "${cfg.stateFile}" + touch "${cfg.stateFile}.tmp" + + mkdir -p "${cfg.ledisDir}"; + + if [ "$(id -u)" = 0 ]; then + chown ${cfg.user}:${cfg.group} "${cfg.stateFile}" + chown ${cfg.user}:${cfg.group} "${cfg.stateFile}.tmp" + chown ${cfg.user}:${cfg.group} "${cfg.ledisDir}" + fi + ''; + + serviceConfig = { + PermissionsStartOnly = true; + User = cfg.user; + Group = cfg.group; + ExecStart = '' + ${cfg.package.bin}/bin/bosun -c ${configFile} + ''; + }; + }; + + users.users.bosun = { + description = "bosun user"; + group = "bosun"; + uid = config.ids.uids.bosun; + }; + + users.groups.bosun.gid = config.ids.gids.bosun; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/cadvisor.nix b/nixpkgs/nixos/modules/services/monitoring/cadvisor.nix new file mode 100644 index 000000000000..6ca420a05b23 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/cadvisor.nix @@ -0,0 +1,130 @@ +{ config, pkgs, lib, ... }: + +with lib; + +let + cfg = config.services.cadvisor; + +in { + options = { + services.cadvisor = { + enable = mkOption { + default = false; + type = types.bool; + description = "Whether to enable cadvisor service."; + }; + + listenAddress = mkOption { + default = "127.0.0.1"; + type = types.str; + description = "Cadvisor listening host"; + }; + + port = mkOption { + default = 8080; + type = types.int; + description = "Cadvisor listening port"; + }; + + storageDriver = mkOption { + default = null; + type = types.nullOr types.str; + example = "influxdb"; + description = "Cadvisor storage driver."; + }; + + storageDriverHost = mkOption { + default = "localhost:8086"; + type = types.str; + description = "Cadvisor storage driver host."; + }; + + storageDriverDb = mkOption { + default = "root"; + type = types.str; + description = "Cadvisord storage driver database name."; + }; + + storageDriverUser = mkOption { + default = "root"; + type = types.str; + description = "Cadvisor storage driver username."; + }; + + storageDriverPassword = mkOption { + default = "root"; + type = types.str; + description = '' + Cadvisor storage driver password. + + Warning: this password is stored in the world-readable Nix store. It's + recommended to use the <option>storageDriverPasswordFile</option> option + since that gives you control over the security of the password. + <option>storageDriverPasswordFile</option> also takes precedence over <option>storageDriverPassword</option>. + ''; + }; + + storageDriverPasswordFile = mkOption { + type = types.str; + description = '' + File that contains the cadvisor storage driver password. + + <option>storageDriverPasswordFile</option> takes precedence over <option>storageDriverPassword</option> + + Warning: when <option>storageDriverPassword</option> is non-empty this defaults to a file in the + world-readable Nix store that contains the value of <option>storageDriverPassword</option>. + + It's recommended to override this with a path not in the Nix store. + Tip: use <link xlink:href='https://nixos.org/nixops/manual/#idm140737318306400'>nixops key management</link> + ''; + }; + + storageDriverSecure = mkOption { + default = false; + type = types.bool; + description = "Cadvisor storage driver, enable secure communication."; + }; + }; + }; + + config = mkMerge [ + { services.cadvisor.storageDriverPasswordFile = mkIf (cfg.storageDriverPassword != "") ( + mkDefault (toString (pkgs.writeTextFile { + name = "cadvisor-storage-driver-password"; + text = cfg.storageDriverPassword; + })) + ); + } + + (mkIf cfg.enable { + systemd.services.cadvisor = { + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" "docker.service" "influxdb.service" ]; + + postStart = mkBefore '' + until ${pkgs.curl.bin}/bin/curl -s -o /dev/null 'http://${cfg.listenAddress}:${toString cfg.port}/containers/'; do + sleep 1; + done + ''; + + script = '' + exec ${pkgs.cadvisor}/bin/cadvisor \ + -logtostderr=true \ + -listen_ip="${cfg.listenAddress}" \ + -port="${toString cfg.port}" \ + ${optionalString (cfg.storageDriver != null) '' + -storage_driver "${cfg.storageDriver}" \ + -storage_driver_user "${cfg.storageDriverHost}" \ + -storage_driver_db "${cfg.storageDriverDb}" \ + -storage_driver_user "${cfg.storageDriverUser}" \ + -storage_driver_password "$(cat "${cfg.storageDriverPasswordFile}")" \ + ${optionalString cfg.storageDriverSecure "-storage_driver_secure"} + ''} + ''; + + serviceConfig.TimeoutStartSec=300; + }; + virtualisation.docker.enable = mkDefault true; + }) + ]; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/collectd.nix b/nixpkgs/nixos/modules/services/monitoring/collectd.nix new file mode 100644 index 000000000000..6606980cdad8 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/collectd.nix @@ -0,0 +1,104 @@ +{ config, pkgs, lib, ... }: + +with lib; + +let + cfg = config.services.collectd; + + conf = pkgs.writeText "collectd.conf" '' + BaseDir "${cfg.dataDir}" + AutoLoadPlugin ${boolToString cfg.autoLoadPlugin} + Hostname "${config.networking.hostName}" + + LoadPlugin syslog + <Plugin "syslog"> + LogLevel "info" + NotifyLevel "OKAY" + </Plugin> + + ${concatMapStrings (f: '' + Include "${f}" + '') cfg.include} + + ${cfg.extraConfig} + ''; + +in { + options.services.collectd = with types; { + enable = mkEnableOption "collectd agent"; + + package = mkOption { + default = pkgs.collectd; + defaultText = "pkgs.collectd"; + description = '' + Which collectd package to use. + ''; + type = package; + }; + + user = mkOption { + default = "collectd"; + description = '' + User under which to run collectd. + ''; + type = nullOr str; + }; + + dataDir = mkOption { + default = "/var/lib/collectd"; + description = '' + Data directory for collectd agent. + ''; + type = path; + }; + + autoLoadPlugin = mkOption { + default = false; + description = '' + Enable plugin autoloading. + ''; + type = bool; + }; + + include = mkOption { + default = []; + description = '' + Additional paths to load config from. + ''; + type = listOf str; + }; + + extraConfig = mkOption { + default = ""; + description = '' + Extra configuration for collectd. + ''; + type = lines; + }; + + }; + + config = mkIf cfg.enable { + systemd.services.collectd = { + description = "Collectd Monitoring Agent"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + + serviceConfig = { + ExecStart = "${cfg.package}/sbin/collectd -C ${conf} -f"; + User = cfg.user; + PermissionsStartOnly = true; + }; + + preStart = '' + mkdir -p "${cfg.dataDir}" + chmod 755 "${cfg.dataDir}" + chown -R ${cfg.user} "${cfg.dataDir}" + ''; + }; + + users.users = optional (cfg.user == "collectd") { + name = "collectd"; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix b/nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix new file mode 100644 index 000000000000..88ca3a9227d2 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix @@ -0,0 +1,34 @@ +# A general watchdog for the linux operating system that should run in the +# background at all times to ensure a realtime process won't hang the machine +{ config, lib, pkgs, ... }: + +with lib; + +let + + inherit (pkgs) das_watchdog; + +in { + ###### interface + + options = { + services.das_watchdog.enable = mkEnableOption "realtime watchdog"; + }; + + ###### implementation + + config = mkIf config.services.das_watchdog.enable { + environment.systemPackages = [ das_watchdog ]; + systemd.services.das_watchdog = { + description = "Watchdog to ensure a realtime process won't hang the machine"; + after = [ "multi-user.target" "sound.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + User = "root"; + Type = "simple"; + ExecStart = "${das_watchdog}/bin/das_watchdog"; + RemainAfterExit = true; + }; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix b/nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix new file mode 100644 index 000000000000..5434fe99347d --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix @@ -0,0 +1,271 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.datadog-agent; + + ddConf = { + dd_url = "https://app.datadoghq.com"; + skip_ssl_validation = false; + confd_path = "/etc/datadog-agent/conf.d"; + additional_checksd = "/etc/datadog-agent/checks.d"; + use_dogstatsd = true; + } + // optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; } + // optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; } + // optionalAttrs (cfg.tags != null ) { tags = concatStringsSep ", " cfg.tags; } + // optionalAttrs (cfg.enableLiveProcessCollection) { process_config = { enabled = "true"; }; } + // optionalAttrs (cfg.enableTraceAgent) { apm_config = { enabled = true; }; } + // cfg.extraConfig; + + # Generate Datadog configuration files for each configured checks. + # This works because check configurations have predictable paths, + # and because JSON is a valid subset of YAML. + makeCheckConfigs = entries: mapAttrsToList (name: conf: { + source = pkgs.writeText "${name}-check-conf.yaml" (builtins.toJSON conf); + target = "datadog-agent/conf.d/${name}.d/conf.yaml"; + }) entries; + + defaultChecks = { + disk = cfg.diskCheck; + network = cfg.networkCheck; + }; + + # Assemble all check configurations and the top-level agent + # configuration. + etcfiles = with pkgs; with builtins; [{ + source = writeText "datadog.yaml" (toJSON ddConf); + target = "datadog-agent/datadog.yaml"; + }] ++ makeCheckConfigs (cfg.checks // defaultChecks); + + # Apply the configured extraIntegrations to the provided agent + # package. See the documentation of `dd-agent/integrations-core.nix` + # for detailed information on this. + datadogPkg = cfg.package.overrideAttrs(_: { + python = (pkgs.datadog-integrations-core cfg.extraIntegrations).python; + }); +in { + options.services.datadog-agent = { + enable = mkOption { + description = '' + Whether to enable the datadog-agent v6 monitoring service + ''; + default = false; + type = types.bool; + }; + + package = mkOption { + default = pkgs.datadog-agent; + defaultText = "pkgs.datadog-agent"; + description = '' + Which DataDog v6 agent package to use. Note that the provided + package is expected to have an overridable `python`-attribute + which configures the Python environment with the Datadog + checks. + ''; + type = types.package; + }; + + apiKeyFile = mkOption { + description = '' + Path to a file containing the Datadog API key to associate the + agent with your account. + ''; + example = "/run/keys/datadog_api_key"; + type = types.path; + }; + + tags = mkOption { + description = "The tags to mark this Datadog agent"; + example = [ "test" "service" ]; + default = null; + type = types.nullOr (types.listOf types.str); + }; + + hostname = mkOption { + description = "The hostname to show in the Datadog dashboard (optional)"; + default = null; + example = "mymachine.mydomain"; + type = types.uniq (types.nullOr types.string); + }; + + logLevel = mkOption { + description = "Logging verbosity."; + default = null; + type = types.nullOr (types.enum ["DEBUG" "INFO" "WARN" "ERROR"]); + }; + + extraIntegrations = mkOption { + default = {}; + type = types.attrs; + + description = '' + Extra integrations from the Datadog core-integrations + repository that should be built and included. + + By default the included integrations are disk, mongo, network, + nginx and postgres. + + To include additional integrations the name of the derivation + and a function to filter its dependencies from the Python + package set must be provided. + ''; + + example = { + ntp = (pythonPackages: [ pythonPackages.ntplib ]); + }; + }; + + extraConfig = mkOption { + default = {}; + type = types.attrs; + description = '' + Extra configuration options that will be merged into the + main config file <filename>datadog.yaml</filename>. + ''; + }; + + enableLiveProcessCollection = mkOption { + description = '' + Whether to enable the live process collection agent. + ''; + default = false; + type = types.bool; + }; + + enableTraceAgent = mkOption { + description = '' + Whether to enable the trace agent. + ''; + default = false; + type = types.bool; + }; + + checks = mkOption { + description = '' + Configuration for all Datadog checks. Keys of this attribute + set will be used as the name of the check to create the + appropriate configuration in `conf.d/$check.d/conf.yaml`. + + The configuration is converted into JSON from the plain Nix + language configuration, meaning that you should write + configuration adhering to Datadog's documentation - but in Nix + language. + + Refer to the implementation of this module (specifically the + definition of `defaultChecks`) for an example. + + Note: The 'disk' and 'network' check are configured in + separate options because they exist by default. Attempting to + override their configuration here will have no effect. + ''; + + example = { + http_check = { + init_config = null; # sic! + instances = [ + { + name = "some-service"; + url = "http://localhost:1337/healthz"; + tags = [ "some-service" ]; + } + ]; + }; + }; + + default = {}; + + # sic! The structure of the values is up to the check, so we can + # not usefully constrain the type further. + type = with types; attrsOf attrs; + }; + + diskCheck = mkOption { + description = "Disk check config"; + type = types.attrs; + default = { + init_config = {}; + instances = [ { use-mount = "no"; } ]; + }; + }; + + networkCheck = mkOption { + description = "Network check config"; + type = types.attrs; + default = { + init_config = {}; + # Network check only supports one configured instance + instances = [ { collect_connection_state = false; + excluded_interfaces = [ "lo" "lo0" ]; } ]; + }; + }; + }; + config = mkIf cfg.enable { + environment.systemPackages = [ datadogPkg pkgs.sysstat pkgs.procps ]; + + users.extraUsers.datadog = { + description = "Datadog Agent User"; + uid = config.ids.uids.datadog; + group = "datadog"; + home = "/var/log/datadog/"; + createHome = true; + }; + + users.extraGroups.datadog.gid = config.ids.gids.datadog; + + systemd.services = let + makeService = attrs: recursiveUpdate { + path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + User = "datadog"; + Group = "datadog"; + Restart = "always"; + RestartSec = 2; + }; + restartTriggers = [ datadogPkg ] ++ map (etc: etc.source) etcfiles; + } attrs; + in { + datadog-agent = makeService { + description = "Datadog agent monitor"; + preStart = '' + chown -R datadog: /etc/datadog-agent + rm -f /etc/datadog-agent/auth_token + ''; + script = '' + export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile}) + exec ${datadogPkg}/bin/agent start -c /etc/datadog-agent/datadog.yaml + ''; + serviceConfig.PermissionsStartOnly = true; + }; + + dd-jmxfetch = lib.mkIf (lib.hasAttr "jmx" cfg.checks) (makeService { + description = "Datadog JMX Fetcher"; + path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ]; + serviceConfig.ExecStart = "${datadogPkg}/bin/dd-jmxfetch"; + }); + + datadog-process-agent = lib.mkIf cfg.enableLiveProcessCollection (makeService { + description = "Datadog Live Process Agent"; + path = [ ]; + script = '' + export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile}) + ${pkgs.datadog-process-agent}/bin/agent --config /etc/datadog-agent/datadog.yaml + ''; + }); + + datadog-trace-agent = lib.mkIf cfg.enableTraceAgent (makeService { + description = "Datadog Trace Agent"; + path = [ ]; + script = '' + export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile}) + ${pkgs.datadog-trace-agent}/bin/trace-agent -config /etc/datadog-agent/datadog.yaml + ''; + }); + + }; + + environment.etc = etcfiles; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent-defaults.nix b/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent-defaults.nix new file mode 100644 index 000000000000..045128197421 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent-defaults.nix @@ -0,0 +1,8 @@ +# Generated using update-dd-agent-default, please re-run after updating dd-agent. DO NOT EDIT MANUALLY. +[ + "auto_conf" + "agent_metrics.yaml.default" + "disk.yaml.default" + "network.yaml.default" + "ntp.yaml.default" +] diff --git a/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent.nix b/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent.nix new file mode 100644 index 000000000000..abc8d65d58f2 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/dd-agent/dd-agent.nix @@ -0,0 +1,239 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.dd-agent; + + ddConf = pkgs.writeText "datadog.conf" '' + [Main] + dd_url: https://app.datadoghq.com + skip_ssl_validation: no + api_key: ${cfg.api_key} + ${optionalString (cfg.hostname != null) "hostname: ${cfg.hostname}"} + + collector_log_file: /var/log/datadog/collector.log + forwarder_log_file: /var/log/datadog/forwarder.log + dogstatsd_log_file: /var/log/datadog/dogstatsd.log + pup_log_file: /var/log/datadog/pup.log + + # proxy_host: my-proxy.com + # proxy_port: 3128 + # proxy_user: user + # proxy_password: password + + # tags: mytag0, mytag1 + ${optionalString (cfg.tags != null ) "tags: ${concatStringsSep ", " cfg.tags }"} + + # collect_ec2_tags: no + # recent_point_threshold: 30 + # use_mount: no + # listen_port: 17123 + # graphite_listen_port: 17124 + # non_local_traffic: no + # use_curl_http_client: False + # bind_host: localhost + + # use_pup: no + # pup_port: 17125 + # pup_interface: localhost + # pup_url: http://localhost:17125 + + # dogstatsd_port : 8125 + # dogstatsd_interval : 10 + # dogstatsd_normalize : yes + # statsd_forward_host: address_of_own_statsd_server + # statsd_forward_port: 8125 + + # device_blacklist_re: .*\/dev\/mapper\/lxc-box.* + + # ganglia_host: localhost + # ganglia_port: 8651 + ''; + + diskConfig = pkgs.writeText "disk.yaml" '' + init_config: + + instances: + - use_mount: no + ''; + + networkConfig = pkgs.writeText "network.yaml" '' + init_config: + + instances: + # Network check only supports one configured instance + - collect_connection_state: false + excluded_interfaces: + - lo + - lo0 + ''; + + postgresqlConfig = pkgs.writeText "postgres.yaml" cfg.postgresqlConfig; + nginxConfig = pkgs.writeText "nginx.yaml" cfg.nginxConfig; + mongoConfig = pkgs.writeText "mongo.yaml" cfg.mongoConfig; + jmxConfig = pkgs.writeText "jmx.yaml" cfg.jmxConfig; + processConfig = pkgs.writeText "process.yaml" cfg.processConfig; + + etcfiles = + let + defaultConfd = import ./dd-agent-defaults.nix; + in (map (f: { source = "${pkgs.dd-agent}/agent/conf.d-system/${f}"; + target = "dd-agent/conf.d/${f}"; + }) defaultConfd) ++ [ + { source = ddConf; + target = "dd-agent/datadog.conf"; + } + { source = diskConfig; + target = "dd-agent/conf.d/disk.yaml"; + } + { source = networkConfig; + target = "dd-agent/conf.d/network.yaml"; + } ] ++ + (optional (cfg.postgresqlConfig != null) + { source = postgresqlConfig; + target = "dd-agent/conf.d/postgres.yaml"; + }) ++ + (optional (cfg.nginxConfig != null) + { source = nginxConfig; + target = "dd-agent/conf.d/nginx.yaml"; + }) ++ + (optional (cfg.mongoConfig != null) + { source = mongoConfig; + target = "dd-agent/conf.d/mongo.yaml"; + }) ++ + (optional (cfg.processConfig != null) + { source = processConfig; + target = "dd-agent/conf.d/process.yaml"; + }) ++ + (optional (cfg.jmxConfig != null) + { source = jmxConfig; + target = "dd-agent/conf.d/jmx.yaml"; + }); + +in { + options.services.dd-agent = { + enable = mkOption { + description = '' + Whether to enable the dd-agent v5 monitoring service. + For datadog-agent v6, see <option>services.datadog-agent.enable</option>. + ''; + default = false; + type = types.bool; + }; + + api_key = mkOption { + description = '' + The Datadog API key to associate the agent with your account. + + Warning: this key is stored in cleartext within the world-readable + Nix store! Consider using the new v6 + <option>services.datadog-agent</option> module instead. + ''; + example = "ae0aa6a8f08efa988ba0a17578f009ab"; + type = types.str; + }; + + tags = mkOption { + description = "The tags to mark this Datadog agent"; + example = [ "test" "service" ]; + default = null; + type = types.nullOr (types.listOf types.str); + }; + + hostname = mkOption { + description = "The hostname to show in the Datadog dashboard (optional)"; + default = null; + example = "mymachine.mydomain"; + type = types.uniq (types.nullOr types.string); + }; + + postgresqlConfig = mkOption { + description = "Datadog PostgreSQL integration configuration"; + default = null; + type = types.uniq (types.nullOr types.string); + }; + + nginxConfig = mkOption { + description = "Datadog nginx integration configuration"; + default = null; + type = types.uniq (types.nullOr types.string); + }; + + mongoConfig = mkOption { + description = "MongoDB integration configuration"; + default = null; + type = types.uniq (types.nullOr types.string); + }; + + jmxConfig = mkOption { + description = "JMX integration configuration"; + default = null; + type = types.uniq (types.nullOr types.string); + }; + + processConfig = mkOption { + description = '' + Process integration configuration + + See http://docs.datadoghq.com/integrations/process/ + ''; + default = null; + type = types.uniq (types.nullOr types.string); + }; + + }; + + config = mkIf cfg.enable { + environment.systemPackages = [ pkgs."dd-agent" pkgs.sysstat pkgs.procps ]; + + users.users.datadog = { + description = "Datadog Agent User"; + uid = config.ids.uids.datadog; + group = "datadog"; + home = "/var/log/datadog/"; + createHome = true; + }; + + users.groups.datadog.gid = config.ids.gids.datadog; + + systemd.services = let + makeService = attrs: recursiveUpdate { + path = [ pkgs.dd-agent pkgs.python pkgs.sysstat pkgs.procps pkgs.gohai ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + User = "datadog"; + Group = "datadog"; + Restart = "always"; + RestartSec = 2; + PrivateTmp = true; + }; + restartTriggers = [ pkgs.dd-agent ddConf diskConfig networkConfig postgresqlConfig nginxConfig mongoConfig jmxConfig processConfig ]; + } attrs; + in { + dd-agent = makeService { + description = "Datadog agent monitor"; + serviceConfig.ExecStart = "${pkgs.dd-agent}/bin/dd-agent foreground"; + }; + + dogstatsd = makeService { + description = "Datadog statsd"; + environment.TMPDIR = "/run/dogstatsd"; + serviceConfig = { + ExecStart = "${pkgs.dd-agent}/bin/dogstatsd start"; + Type = "forking"; + PIDFile = "/run/dogstatsd/dogstatsd.pid"; + RuntimeDirectory = "dogstatsd"; + }; + }; + + dd-jmxfetch = lib.mkIf (cfg.jmxConfig != null) { + description = "Datadog JMX Fetcher"; + path = [ pkgs.dd-agent pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ]; + serviceConfig.ExecStart = "${pkgs.dd-agent}/bin/dd-jmxfetch"; + }; + }; + + environment.etc = etcfiles; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/dd-agent/update-dd-agent-defaults b/nixpkgs/nixos/modules/services/monitoring/dd-agent/update-dd-agent-defaults new file mode 100755 index 000000000000..76724173171a --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/dd-agent/update-dd-agent-defaults @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +dd=$(nix-build --no-out-link -A dd-agent ../../../..) +echo '# Generated using update-dd-agent-default, please re-run after updating dd-agent. DO NOT EDIT MANUALLY.' > dd-agent-defaults.nix +echo '[' >> dd-agent-defaults.nix +echo ' "auto_conf"' >> dd-agent-defaults.nix +for f in $(find $dd/agent/conf.d-system -maxdepth 1 -type f | grep -v '\.example' | sort); do + echo " \"$(basename $f)\"" >> dd-agent-defaults.nix +done +echo ']' >> dd-agent-defaults.nix diff --git a/nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix b/nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix new file mode 100644 index 000000000000..9c976c65ea49 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix @@ -0,0 +1,63 @@ +# Fusion Inventory daemon. +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.fusionInventory; + + configFile = pkgs.writeText "fusion_inventory.conf" '' + server = ${concatStringsSep ", " cfg.servers} + + logger = stderr + + ${cfg.extraConfig} + ''; + +in { + + ###### interface + + options = { + + services.fusionInventory = { + + enable = mkEnableOption "Fusion Inventory Agent"; + + servers = mkOption { + type = types.listOf types.str; + description = '' + The urls of the OCS/GLPI servers to connect to. + ''; + }; + + extraConfig = mkOption { + default = ""; + type = types.lines; + description = '' + Configuration that is injected verbatim into the configuration file. + ''; + }; + }; + }; + + + ###### implementation + + config = mkIf cfg.enable { + + users.users = singleton { + name = "fusion-inventory"; + description = "FusionInventory user"; + }; + + systemd.services."fusion-inventory" = { + description = "Fusion Inventory Agent"; + wantedBy = [ "multi-user.target" ]; + + serviceConfig = { + ExecStart = "${pkgs.fusionInventory}/bin/fusioninventory-agent --conf-file=${configFile} --daemon --no-fork"; + }; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix b/nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix new file mode 100644 index 000000000000..149026d20188 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix @@ -0,0 +1,66 @@ +{ options, config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.grafana_reporter; + +in { + options.services.grafana_reporter = { + enable = mkEnableOption "grafana_reporter"; + + grafana = { + protocol = mkOption { + description = "Grafana protocol."; + default = "http"; + type = types.enum ["http" "https"]; + }; + addr = mkOption { + description = "Grafana address."; + default = "127.0.0.1"; + type = types.str; + }; + port = mkOption { + description = "Grafana port."; + default = 3000; + type = types.int; + }; + + }; + addr = mkOption { + description = "Listening address."; + default = "127.0.0.1"; + type = types.str; + }; + + port = mkOption { + description = "Listening port."; + default = 8686; + type = types.int; + }; + + templateDir = mkOption { + description = "Optional template directory to use custom tex templates"; + default = "${pkgs.grafana_reporter}"; + type = types.str; + }; + }; + + config = mkIf cfg.enable { + systemd.services.grafana_reporter = { + description = "Grafana Reporter Service Daemon"; + wantedBy = ["multi-user.target"]; + after = ["network.target"]; + serviceConfig = let + args = lib.concatSepString " " [ + "-proto ${cfg.grafana.protocol}://" + "-ip ${cfg.grafana.addr}:${toString cfg.grafana.port}" + "-port :${toString cfg.port}" + "-templates ${cfg.templateDir}" + ]; + in { + ExecStart = "${pkgs.grafana_reporter.bin}/bin/grafana-reporter ${args}"; + }; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/grafana.nix b/nixpkgs/nixos/modules/services/monitoring/grafana.nix new file mode 100644 index 000000000000..5fb3e3771221 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/grafana.nix @@ -0,0 +1,381 @@ +{ options, config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.grafana; + opt = options.services.grafana; + + envOptions = { + PATHS_DATA = cfg.dataDir; + PATHS_PLUGINS = "${cfg.dataDir}/plugins"; + PATHS_LOGS = "${cfg.dataDir}/log"; + + SERVER_PROTOCOL = cfg.protocol; + SERVER_HTTP_ADDR = cfg.addr; + SERVER_HTTP_PORT = cfg.port; + SERVER_DOMAIN = cfg.domain; + SERVER_ROOT_URL = cfg.rootUrl; + SERVER_STATIC_ROOT_PATH = cfg.staticRootPath; + SERVER_CERT_FILE = cfg.certFile; + SERVER_CERT_KEY = cfg.certKey; + + DATABASE_TYPE = cfg.database.type; + DATABASE_HOST = cfg.database.host; + DATABASE_NAME = cfg.database.name; + DATABASE_USER = cfg.database.user; + DATABASE_PASSWORD = cfg.database.password; + DATABASE_PATH = cfg.database.path; + DATABASE_CONN_MAX_LIFETIME = cfg.database.connMaxLifetime; + + SECURITY_ADMIN_USER = cfg.security.adminUser; + SECURITY_ADMIN_PASSWORD = cfg.security.adminPassword; + SECURITY_SECRET_KEY = cfg.security.secretKey; + + USERS_ALLOW_SIGN_UP = boolToString cfg.users.allowSignUp; + USERS_ALLOW_ORG_CREATE = boolToString cfg.users.allowOrgCreate; + USERS_AUTO_ASSIGN_ORG = boolToString cfg.users.autoAssignOrg; + USERS_AUTO_ASSIGN_ORG_ROLE = cfg.users.autoAssignOrgRole; + + AUTH_ANONYMOUS_ENABLED = boolToString cfg.auth.anonymous.enable; + AUTH_ANONYMOUS_ORG_NAME = cfg.auth.anonymous.org_name; + AUTH_ANONYMOUS_ORG_ROLE = cfg.auth.anonymous.org_role; + + ANALYTICS_REPORTING_ENABLED = boolToString cfg.analytics.reporting.enable; + + SMTP_ENABLE = boolToString cfg.smtp.enable; + SMTP_HOST = cfg.smtp.host; + SMTP_USER = cfg.smtp.user; + SMTP_PASSWORD = cfg.smtp.password; + SMTP_FROM_ADDRESS = cfg.smtp.fromAddress; + } // cfg.extraOptions; + +in { + options.services.grafana = { + enable = mkEnableOption "grafana"; + + protocol = mkOption { + description = "Which protocol to listen."; + default = "http"; + type = types.enum ["http" "https" "socket"]; + }; + + addr = mkOption { + description = "Listening address."; + default = "127.0.0.1"; + type = types.str; + }; + + port = mkOption { + description = "Listening port."; + default = 3000; + type = types.int; + }; + + domain = mkOption { + description = "The public facing domain name used to access grafana from a browser."; + default = "localhost"; + type = types.str; + }; + + rootUrl = mkOption { + description = "Full public facing url."; + default = "%(protocol)s://%(domain)s:%(http_port)s/"; + type = types.str; + }; + + certFile = mkOption { + description = "Cert file for ssl."; + default = ""; + type = types.str; + }; + + certKey = mkOption { + description = "Cert key for ssl."; + default = ""; + type = types.str; + }; + + staticRootPath = mkOption { + description = "Root path for static assets."; + default = "${cfg.package}/share/grafana/public"; + type = types.str; + }; + + package = mkOption { + description = "Package to use."; + default = pkgs.grafana; + defaultText = "pkgs.grafana"; + type = types.package; + }; + + dataDir = mkOption { + description = "Data directory."; + default = "/var/lib/grafana"; + type = types.path; + }; + + database = { + type = mkOption { + description = "Database type."; + default = "sqlite3"; + type = types.enum ["mysql" "sqlite3" "postgres"]; + }; + + host = mkOption { + description = "Database host."; + default = "127.0.0.1:3306"; + type = types.str; + }; + + name = mkOption { + description = "Database name."; + default = "grafana"; + type = types.str; + }; + + user = mkOption { + description = "Database user."; + default = "root"; + type = types.str; + }; + + password = mkOption { + description = '' + Database password. + This option is mutual exclusive with the passwordFile option. + ''; + default = ""; + type = types.str; + }; + + passwordFile = mkOption { + description = '' + File that containts the database password. + This option is mutual exclusive with the password option. + ''; + default = null; + type = types.nullOr types.path; + }; + + path = mkOption { + description = "Database path."; + default = "${cfg.dataDir}/data/grafana.db"; + type = types.path; + }; + + connMaxLifetime = mkOption { + description = '' + Sets the maximum amount of time (in seconds) a connection may be reused. + For MySQL this setting should be shorter than the `wait_timeout' variable. + ''; + default = "unlimited"; + example = 14400; + type = types.either types.int (types.enum [ "unlimited" ]); + }; + }; + + security = { + adminUser = mkOption { + description = "Default admin username."; + default = "admin"; + type = types.str; + }; + + adminPassword = mkOption { + description = '' + Default admin password. + This option is mutual exclusive with the adminPasswordFile option. + ''; + default = "admin"; + type = types.str; + }; + + adminPasswordFile = mkOption { + description = '' + Default admin password. + This option is mutual exclusive with the <literal>adminPassword</literal> option. + ''; + default = null; + type = types.nullOr types.path; + }; + + secretKey = mkOption { + description = "Secret key used for signing."; + default = "SW2YcwTIb9zpOOhoPsMm"; + type = types.str; + }; + + secretKeyFile = mkOption { + description = "Secret key used for signing."; + default = null; + type = types.nullOr types.path; + }; + }; + + smtp = { + enable = mkEnableOption "smtp"; + host = mkOption { + description = "Host to connect to"; + default = "localhost:25"; + type = types.str; + }; + user = mkOption { + description = "User used for authentication"; + default = ""; + type = types.str; + }; + password = mkOption { + description = '' + Password used for authentication. + This option is mutual exclusive with the passwordFile option. + ''; + default = ""; + type = types.str; + }; + passwordFile = mkOption { + description = '' + Password used for authentication. + This option is mutual exclusive with the password option. + ''; + default = null; + type = types.nullOr types.path; + }; + fromAddress = mkOption { + description = "Email address used for sending"; + default = "admin@grafana.localhost"; + type = types.str; + }; + }; + + users = { + allowSignUp = mkOption { + description = "Disable user signup / registration"; + default = false; + type = types.bool; + }; + + allowOrgCreate = mkOption { + description = "Whether user is allowed to create organizations."; + default = false; + type = types.bool; + }; + + autoAssignOrg = mkOption { + description = "Whether to automatically assign new users to default org."; + default = true; + type = types.bool; + }; + + autoAssignOrgRole = mkOption { + description = "Default role new users will be auto assigned."; + default = "Viewer"; + type = types.enum ["Viewer" "Editor"]; + }; + }; + + auth.anonymous = { + enable = mkOption { + description = "Whether to allow anonymous access"; + default = false; + type = types.bool; + }; + org_name = mkOption { + description = "Which organization to allow anonymous access to"; + default = "Main Org."; + type = types.str; + }; + org_role = mkOption { + description = "Which role anonymous users have in the organization"; + default = "Viewer"; + type = types.str; + }; + + }; + + analytics.reporting = { + enable = mkOption { + description = "Whether to allow anonymous usage reporting to stats.grafana.net"; + default = true; + type = types.bool; + }; + }; + + extraOptions = mkOption { + description = '' + Extra configuration options passed as env variables as specified in + <link xlink:href="http://docs.grafana.org/installation/configuration/">documentation</link>, + but without GF_ prefix + ''; + default = {}; + type = with types; attrsOf (either str path); + }; + }; + + config = mkIf cfg.enable { + warnings = optional ( + cfg.database.password != opt.database.password.default || + cfg.security.adminPassword != opt.security.adminPassword.default + ) "Grafana passwords will be stored as plaintext in the Nix store!"; + + environment.systemPackages = [ cfg.package ]; + + assertions = [ + { + assertion = cfg.database.password != opt.database.password.default -> cfg.database.passwordFile == null; + message = "Cannot set both password and passwordFile"; + } + { + assertion = cfg.security.adminPassword != opt.security.adminPassword.default -> cfg.security.adminPasswordFile == null; + message = "Cannot set both adminPassword and adminPasswordFile"; + } + { + assertion = cfg.security.secretKeyFile != opt.security.secretKeyFile.default -> cfg.security.secretKeyFile == null; + message = "Cannot set both secretKey and secretKeyFile"; + } + { + assertion = cfg.smtp.password != opt.smtp.password.default -> cfg.smtp.passwordFile == null; + message = "Cannot set both password and secretKeyFile"; + } + ]; + + systemd.services.grafana = { + description = "Grafana Service Daemon"; + wantedBy = ["multi-user.target"]; + after = ["networking.target"]; + environment = { + QT_QPA_PLATFORM = "offscreen"; + } // mapAttrs' (n: v: nameValuePair "GF_${n}" (toString v)) envOptions; + script = '' + ${optionalString (cfg.database.passwordFile != null) '' + export GF_DATABASE_PASSWORD="$(cat ${escapeShellArg cfg.database.passwordFile})" + ''} + ${optionalString (cfg.security.adminPasswordFile != null) '' + export GF_SECURITY_ADMIN_PASSWORD="$(cat ${escapeShellArg cfg.security.adminPasswordFile})" + ''} + ${optionalString (cfg.security.secretKeyFile != null) '' + export GF_SECURITY_SECRET_KEY="$(cat ${escapeShellArg cfg.security.secretKeyFile})" + ''} + ${optionalString (cfg.smtp.passwordFile != null) '' + export GF_SMTP_PASSWORD="$(cat ${escapeShellArg cfg.smtp.passwordFile})" + ''} + exec ${cfg.package.bin}/bin/grafana-server -homepath ${cfg.dataDir} + ''; + serviceConfig = { + WorkingDirectory = cfg.dataDir; + User = "grafana"; + }; + preStart = '' + ln -fs ${cfg.package}/share/grafana/conf ${cfg.dataDir} + ln -fs ${cfg.package}/share/grafana/tools ${cfg.dataDir} + ''; + }; + + users.users.grafana = { + uid = config.ids.uids.grafana; + description = "Grafana user"; + home = cfg.dataDir; + createHome = true; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/graphite.nix b/nixpkgs/nixos/modules/services/monitoring/graphite.nix new file mode 100644 index 000000000000..cdc98b407e90 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/graphite.nix @@ -0,0 +1,642 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.graphite; + writeTextOrNull = f: t: mapNullable (pkgs.writeTextDir f) t; + + dataDir = cfg.dataDir; + staticDir = cfg.dataDir + "/static"; + + graphiteLocalSettingsDir = pkgs.runCommand "graphite_local_settings" + {inherit graphiteLocalSettings;} '' + mkdir -p $out + ln -s $graphiteLocalSettings $out/graphite_local_settings.py + ''; + + graphiteLocalSettings = pkgs.writeText "graphite_local_settings.py" ( + "STATIC_ROOT = '${staticDir}'\n" + + optionalString (! isNull config.time.timeZone) "TIME_ZONE = '${config.time.timeZone}'\n" + + cfg.web.extraConfig + ); + + graphiteApiConfig = pkgs.writeText "graphite-api.yaml" '' + search_index: ${dataDir}/index + ${optionalString (!isNull config.time.timeZone) ''time_zone: ${config.time.timeZone}''} + ${optionalString (cfg.api.finders != []) ''finders:''} + ${concatMapStringsSep "\n" (f: " - " + f.moduleName) cfg.api.finders} + ${optionalString (cfg.api.functions != []) ''functions:''} + ${concatMapStringsSep "\n" (f: " - " + f) cfg.api.functions} + ${cfg.api.extraConfig} + ''; + + seyrenConfig = { + SEYREN_URL = cfg.seyren.seyrenUrl; + MONGO_URL = cfg.seyren.mongoUrl; + GRAPHITE_URL = cfg.seyren.graphiteUrl; + } // cfg.seyren.extraConfig; + + pagerConfig = pkgs.writeText "alarms.yaml" cfg.pager.alerts; + + configDir = pkgs.buildEnv { + name = "graphite-config"; + paths = lists.filter (el: el != null) [ + (writeTextOrNull "carbon.conf" cfg.carbon.config) + (writeTextOrNull "storage-aggregation.conf" cfg.carbon.storageAggregation) + (writeTextOrNull "storage-schemas.conf" cfg.carbon.storageSchemas) + (writeTextOrNull "blacklist.conf" cfg.carbon.blacklist) + (writeTextOrNull "whitelist.conf" cfg.carbon.whitelist) + (writeTextOrNull "rewrite-rules.conf" cfg.carbon.rewriteRules) + (writeTextOrNull "relay-rules.conf" cfg.carbon.relayRules) + (writeTextOrNull "aggregation-rules.conf" cfg.carbon.aggregationRules) + ]; + }; + + carbonOpts = name: with config.ids; '' + --nodaemon --syslog --prefix=${name} --pidfile /run/${name}/${name}.pid ${name} + ''; + + carbonEnv = { + PYTHONPATH = let + cenv = pkgs.python.buildEnv.override { + extraLibs = [ pkgs.python27Packages.carbon ]; + }; + cenvPack = "${cenv}/${pkgs.python.sitePackages}"; + # opt/graphite/lib contains twisted.plugins.carbon-cache + in "${cenvPack}/opt/graphite/lib:${cenvPack}"; + GRAPHITE_ROOT = dataDir; + GRAPHITE_CONF_DIR = configDir; + GRAPHITE_STORAGE_DIR = dataDir; + }; + +in { + + ###### interface + + options.services.graphite = { + dataDir = mkOption { + type = types.path; + default = "/var/db/graphite"; + description = '' + Data directory for graphite. + ''; + }; + + web = { + enable = mkOption { + description = "Whether to enable graphite web frontend."; + default = false; + type = types.bool; + }; + + listenAddress = mkOption { + description = "Graphite web frontend listen address."; + default = "127.0.0.1"; + type = types.str; + }; + + port = mkOption { + description = "Graphite web frontend port."; + default = 8080; + type = types.int; + }; + + extraConfig = mkOption { + type = types.str; + default = ""; + description = '' + Graphite webapp settings. See: + <link xlink:href="http://graphite.readthedocs.io/en/latest/config-local-settings.html"/> + ''; + }; + }; + + api = { + enable = mkOption { + description = '' + Whether to enable graphite api. Graphite api is lightweight alternative + to graphite web, with api and without dashboard. It's advised to use + grafana as alternative dashboard and influxdb as alternative to + graphite carbon. + + For more information visit + <link xlink:href="http://graphite-api.readthedocs.org/en/latest/"/> + ''; + default = false; + type = types.bool; + }; + + finders = mkOption { + description = "List of finder plugins to load."; + default = []; + example = literalExample "[ pkgs.python27Packages.influxgraph ]"; + type = types.listOf types.package; + }; + + functions = mkOption { + description = "List of functions to load."; + default = [ + "graphite_api.functions.SeriesFunctions" + "graphite_api.functions.PieFunctions" + ]; + type = types.listOf types.str; + }; + + listenAddress = mkOption { + description = "Graphite web service listen address."; + default = "127.0.0.1"; + type = types.str; + }; + + port = mkOption { + description = "Graphite api service port."; + default = 8080; + type = types.int; + }; + + package = mkOption { + description = "Package to use for graphite api."; + default = pkgs.python27Packages.graphite_api; + defaultText = "pkgs.python27Packages.graphite_api"; + type = types.package; + }; + + extraConfig = mkOption { + description = "Extra configuration for graphite api."; + default = '' + whisper: + directories: + - ${dataDir}/whisper + ''; + example = '' + allowed_origins: + - dashboard.example.com + cheat_times: true + influxdb: + host: localhost + port: 8086 + user: influxdb + pass: influxdb + db: metrics + cache: + CACHE_TYPE: 'filesystem' + CACHE_DIR: '/tmp/graphite-api-cache' + ''; + type = types.lines; + }; + }; + + carbon = { + config = mkOption { + description = "Content of carbon configuration file."; + default = '' + [cache] + # Listen on localhost by default for security reasons + UDP_RECEIVER_INTERFACE = 127.0.0.1 + PICKLE_RECEIVER_INTERFACE = 127.0.0.1 + LINE_RECEIVER_INTERFACE = 127.0.0.1 + CACHE_QUERY_INTERFACE = 127.0.0.1 + # Do not log every update + LOG_UPDATES = False + LOG_CACHE_HITS = False + ''; + type = types.str; + }; + + enableCache = mkOption { + description = "Whether to enable carbon cache, the graphite storage daemon."; + default = false; + type = types.bool; + }; + + storageAggregation = mkOption { + description = "Defines how to aggregate data to lower-precision retentions."; + default = null; + type = types.uniq (types.nullOr types.string); + example = '' + [all_min] + pattern = \.min$ + xFilesFactor = 0.1 + aggregationMethod = min + ''; + }; + + storageSchemas = mkOption { + description = "Defines retention rates for storing metrics."; + default = ""; + type = types.uniq (types.nullOr types.string); + example = '' + [apache_busyWorkers] + pattern = ^servers\.www.*\.workers\.busyWorkers$ + retentions = 15s:7d,1m:21d,15m:5y + ''; + }; + + blacklist = mkOption { + description = "Any metrics received which match one of the experssions will be dropped."; + default = null; + type = types.uniq (types.nullOr types.string); + example = "^some\.noisy\.metric\.prefix\..*"; + }; + + whitelist = mkOption { + description = "Only metrics received which match one of the experssions will be persisted."; + default = null; + type = types.uniq (types.nullOr types.string); + example = ".*"; + }; + + rewriteRules = mkOption { + description = '' + Regular expression patterns that can be used to rewrite metric names + in a search and replace fashion. + ''; + default = null; + type = types.uniq (types.nullOr types.string); + example = '' + [post] + _sum$ = + _avg$ = + ''; + }; + + enableRelay = mkOption { + description = "Whether to enable carbon relay, the carbon replication and sharding service."; + default = false; + type = types.bool; + }; + + relayRules = mkOption { + description = "Relay rules are used to send certain metrics to a certain backend."; + default = null; + type = types.uniq (types.nullOr types.string); + example = '' + [example] + pattern = ^mydata\.foo\..+ + servers = 10.1.2.3, 10.1.2.4:2004, myserver.mydomain.com + ''; + }; + + enableAggregator = mkOption { + description = "Whether to enable carbon aggregator, the carbon buffering service."; + default = false; + type = types.bool; + }; + + aggregationRules = mkOption { + description = "Defines if and how received metrics will be aggregated."; + default = null; + type = types.uniq (types.nullOr types.string); + example = '' + <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests + <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency + ''; + }; + }; + + seyren = { + enable = mkOption { + description = "Whether to enable seyren service."; + default = false; + type = types.bool; + }; + + port = mkOption { + description = "Seyren listening port."; + default = 8081; + type = types.int; + }; + + seyrenUrl = mkOption { + default = "http://localhost:${toString cfg.seyren.port}/"; + description = "Host where seyren is accessible."; + type = types.str; + }; + + graphiteUrl = mkOption { + default = "http://${cfg.web.listenAddress}:${toString cfg.web.port}"; + description = "Host where graphite service runs."; + type = types.str; + }; + + mongoUrl = mkOption { + default = "mongodb://${config.services.mongodb.bind_ip}:27017/seyren"; + description = "Mongodb connection string."; + type = types.str; + }; + + extraConfig = mkOption { + default = {}; + description = '' + Extra seyren configuration. See + <link xlink:href='https://github.com/scobal/seyren#config' /> + ''; + type = types.attrsOf types.str; + example = literalExample '' + { + GRAPHITE_USERNAME = "user"; + GRAPHITE_PASSWORD = "pass"; + } + ''; + }; + }; + + pager = { + enable = mkOption { + description = '' + Whether to enable graphite-pager service. For more information visit + <link xlink:href="https://github.com/seatgeek/graphite-pager"/> + ''; + default = false; + type = types.bool; + }; + + redisUrl = mkOption { + description = "Redis connection string."; + default = "redis://localhost:${toString config.services.redis.port}/"; + type = types.str; + }; + + graphiteUrl = mkOption { + description = "URL to your graphite service."; + default = "http://${cfg.web.listenAddress}:${toString cfg.web.port}"; + type = types.str; + }; + + alerts = mkOption { + description = "Alerts configuration for graphite-pager."; + default = '' + alerts: + - target: constantLine(100) + warning: 90 + critical: 200 + name: Test + ''; + example = '' + pushbullet_key: pushbullet_api_key + alerts: + - target: stats.seatgeek.app.deal_quality.venue_info_cache.hit + warning: .5 + critical: 1 + name: Deal quality venue cache hits + ''; + type = types.lines; + }; + }; + + beacon = { + enable = mkEnableOption "graphite beacon"; + + config = mkOption { + description = "Graphite beacon configuration."; + default = {}; + type = types.attrs; + }; + }; + }; + + ###### implementation + + config = mkMerge [ + (mkIf cfg.carbon.enableCache { + systemd.services.carbonCache = let name = "carbon-cache"; in { + description = "Graphite Data Storage Backend"; + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + environment = carbonEnv; + serviceConfig = { + RuntimeDirectory = name; + ExecStart = "${pkgs.pythonPackages.twisted}/bin/twistd ${carbonOpts name}"; + User = "graphite"; + Group = "graphite"; + PermissionsStartOnly = true; + PIDFile="/run/${name}/${name}.pid"; + }; + preStart = '' + install -dm0700 -o graphite -g graphite ${cfg.dataDir} + install -dm0700 -o graphite -g graphite ${cfg.dataDir}/whisper + ''; + }; + }) + + (mkIf cfg.carbon.enableAggregator { + systemd.services.carbonAggregator = let name = "carbon-aggregator"; in { + enable = cfg.carbon.enableAggregator; + description = "Carbon Data Aggregator"; + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + environment = carbonEnv; + serviceConfig = { + RuntimeDirectory = name; + ExecStart = "${pkgs.pythonPackages.twisted}/bin/twistd ${carbonOpts name}"; + User = "graphite"; + Group = "graphite"; + PIDFile="/run/${name}/${name}.pid"; + }; + }; + }) + + (mkIf cfg.carbon.enableRelay { + systemd.services.carbonRelay = let name = "carbon-relay"; in { + description = "Carbon Data Relay"; + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + environment = carbonEnv; + serviceConfig = { + RuntimeDirectory = name; + ExecStart = "${pkgs.pythonPackages.twisted}/bin/twistd ${carbonOpts name}"; + User = "graphite"; + Group = "graphite"; + PIDFile="/run/${name}/${name}.pid"; + }; + }; + }) + + (mkIf (cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay) { + environment.systemPackages = [ + pkgs.pythonPackages.carbon + ]; + }) + + (mkIf cfg.web.enable (let + python27' = pkgs.python27.override { + packageOverrides = self: super: { + django = self.django_1_8; + django_tagging = self.django_tagging_0_4_3; + }; + }; + pythonPackages = python27'.pkgs; + in { + systemd.services.graphiteWeb = { + description = "Graphite Web Interface"; + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + path = [ pkgs.perl ]; + environment = { + PYTHONPATH = let + penv = pkgs.python.buildEnv.override { + extraLibs = [ + pythonPackages.graphite-web + pythonPackages.pysqlite + ]; + }; + penvPack = "${penv}/${pkgs.python.sitePackages}"; + in concatStringsSep ":" [ + "${graphiteLocalSettingsDir}" + "${penvPack}/opt/graphite/webapp" + "${penvPack}" + # explicitly adding pycairo in path because it cannot be imported via buildEnv + "${pkgs.pythonPackages.pycairo}/${pkgs.python.sitePackages}" + ]; + DJANGO_SETTINGS_MODULE = "graphite.settings"; + GRAPHITE_CONF_DIR = configDir; + GRAPHITE_STORAGE_DIR = dataDir; + LD_LIBRARY_PATH = "${pkgs.cairo.out}/lib"; + }; + serviceConfig = { + ExecStart = '' + ${pkgs.python27Packages.waitress-django}/bin/waitress-serve-django \ + --host=${cfg.web.listenAddress} --port=${toString cfg.web.port} + ''; + User = "graphite"; + Group = "graphite"; + PermissionsStartOnly = true; + }; + preStart = '' + if ! test -e ${dataDir}/db-created; then + mkdir -p ${dataDir}/{whisper/,log/webapp/} + chmod 0700 ${dataDir}/{whisper/,log/webapp/} + + ${pkgs.pythonPackages.django_1_8}/bin/django-admin.py migrate --noinput + + chown -R graphite:graphite ${dataDir} + + touch ${dataDir}/db-created + fi + + # Only collect static files when graphite_web changes. + if ! [ "${dataDir}/current_graphite_web" -ef "${pythonPackages.graphite-web}" ]; then + mkdir -p ${staticDir} + ${pkgs.pythonPackages.django_1_8}/bin/django-admin.py collectstatic --noinput --clear + chown -R graphite:graphite ${staticDir} + ln -sfT "${pythonPackages.graphite-web}" "${dataDir}/current_graphite_web" + fi + ''; + }; + + environment.systemPackages = [ pythonPackages.graphite-web ]; + })) + + (mkIf cfg.api.enable { + systemd.services.graphiteApi = { + description = "Graphite Api Interface"; + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + environment = { + PYTHONPATH = let + aenv = pkgs.python.buildEnv.override { + extraLibs = [ cfg.api.package pkgs.cairo pkgs.pythonPackages.cffi ] ++ cfg.api.finders; + }; + in "${aenv}/${pkgs.python.sitePackages}"; + GRAPHITE_API_CONFIG = graphiteApiConfig; + LD_LIBRARY_PATH = "${pkgs.cairo.out}/lib"; + }; + serviceConfig = { + ExecStart = '' + ${pkgs.python27Packages.waitress}/bin/waitress-serve \ + --host=${cfg.api.listenAddress} --port=${toString cfg.api.port} \ + graphite_api.app:app + ''; + User = "graphite"; + Group = "graphite"; + PermissionsStartOnly = true; + }; + preStart = '' + if ! test -e ${dataDir}/db-created; then + mkdir -p ${dataDir}/cache/ + chmod 0700 ${dataDir}/cache/ + + chown graphite:graphite ${cfg.dataDir} + chown -R graphite:graphite ${cfg.dataDir}/cache + + touch ${dataDir}/db-created + fi + ''; + }; + }) + + (mkIf cfg.seyren.enable { + systemd.services.seyren = { + description = "Graphite Alerting Dashboard"; + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" "mongodb.service" ]; + environment = seyrenConfig; + serviceConfig = { + ExecStart = "${pkgs.seyren}/bin/seyren -httpPort ${toString cfg.seyren.port}"; + WorkingDirectory = dataDir; + User = "graphite"; + Group = "graphite"; + }; + preStart = '' + if ! test -e ${dataDir}/db-created; then + mkdir -p ${dataDir} + chown graphite:graphite ${dataDir} + fi + ''; + }; + + services.mongodb.enable = mkDefault true; + }) + + (mkIf cfg.pager.enable { + systemd.services.graphitePager = { + description = "Graphite Pager Alerting Daemon"; + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" "redis.service" ]; + environment = { + REDIS_URL = cfg.pager.redisUrl; + GRAPHITE_URL = cfg.pager.graphiteUrl; + }; + serviceConfig = { + ExecStart = "${pkgs.pythonPackages.graphitepager}/bin/graphite-pager --config ${pagerConfig}"; + User = "graphite"; + Group = "graphite"; + }; + }; + + services.redis.enable = mkDefault true; + + environment.systemPackages = [ pkgs.pythonPackages.graphitepager ]; + }) + + (mkIf cfg.beacon.enable { + systemd.services.graphite-beacon = { + description = "Grpahite Beacon Alerting Daemon"; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + ExecStart = '' + ${pkgs.pythonPackages.graphite_beacon}/bin/graphite-beacon \ + --config=${pkgs.writeText "graphite-beacon.json" (builtins.toJSON cfg.beacon.config)} + ''; + User = "graphite"; + Group = "graphite"; + }; + }; + }) + + (mkIf ( + cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay || + cfg.web.enable || cfg.api.enable || + cfg.seyren.enable || cfg.pager.enable || cfg.beacon.enable + ) { + users.users = singleton { + name = "graphite"; + uid = config.ids.uids.graphite; + description = "Graphite daemon user"; + home = dataDir; + }; + users.groups.graphite.gid = config.ids.gids.graphite; + }) + ]; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/hdaps.nix b/nixpkgs/nixos/modules/services/monitoring/hdaps.nix new file mode 100644 index 000000000000..be26c44e78d1 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/hdaps.nix @@ -0,0 +1,22 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.hdapsd; + hdapsd = [ pkgs.hdapsd ]; +in +{ + options = { + services.hdapsd.enable = mkEnableOption + '' + Hard Drive Active Protection System Daemon, + devices are detected and managed automatically by udev and systemd + ''; + }; + + config = mkIf cfg.enable { + services.udev.packages = hdapsd; + systemd.packages = hdapsd; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/heapster.nix b/nixpkgs/nixos/modules/services/monitoring/heapster.nix new file mode 100644 index 000000000000..fbdff2eb5dbe --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/heapster.nix @@ -0,0 +1,58 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.heapster; +in { + options.services.heapster = { + enable = mkOption { + description = "Whether to enable heapster monitoring"; + default = false; + type = types.bool; + }; + + source = mkOption { + description = "Heapster metric source"; + example = "kubernetes:https://kubernetes.default"; + type = types.string; + }; + + sink = mkOption { + description = "Heapster metic sink"; + example = "influxdb:http://localhost:8086"; + type = types.string; + }; + + extraOpts = mkOption { + description = "Heapster extra options"; + default = ""; + type = types.string; + }; + + package = mkOption { + description = "Package to use by heapster"; + default = pkgs.heapster; + defaultText = "pkgs.heapster"; + type = types.package; + }; + }; + + config = mkIf cfg.enable { + systemd.services.heapster = { + wantedBy = ["multi-user.target"]; + after = ["cadvisor.service" "kube-apiserver.service"]; + + serviceConfig = { + ExecStart = "${cfg.package}/bin/heapster --source=${cfg.source} --sink=${cfg.sink} ${cfg.extraOpts}"; + User = "heapster"; + }; + }; + + users.users = singleton { + name = "heapster"; + uid = config.ids.uids.heapster; + description = "Heapster user"; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/incron.nix b/nixpkgs/nixos/modules/services/monitoring/incron.nix new file mode 100644 index 000000000000..1789fd9f2051 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/incron.nix @@ -0,0 +1,98 @@ + +{ config, lib, pkgs, ... }: + +with lib; + +let + + cfg = config.services.incron; + +in + +{ + options = { + + services.incron = { + + enable = mkOption { + type = types.bool; + default = false; + description = '' + Whether to enable the incron daemon. + + Note that commands run under incrontab only support common Nix profiles for the <envar>PATH</envar> provided variable. + ''; + }; + + allow = mkOption { + type = types.nullOr (types.listOf types.str); + default = null; + description = '' + Users allowed to use incrontab. + + If empty then no user will be allowed to have their own incrontab. + If <literal>null</literal> then will defer to <option>deny</option>. + If both <option>allow</option> and <option>deny</option> are null + then all users will be allowed to have their own incrontab. + ''; + }; + + deny = mkOption { + type = types.nullOr (types.listOf types.str); + default = null; + description = "Users forbidden from using incrontab."; + }; + + systab = mkOption { + type = types.lines; + default = ""; + description = "The system incrontab contents."; + example = '' + /var/mail IN_CLOSE_WRITE abc $@/$# + /tmp IN_ALL_EVENTS efg $@/$# $& + ''; + }; + + extraPackages = mkOption { + type = types.listOf types.package; + default = []; + example = literalExample "[ pkgs.rsync ]"; + description = "Extra packages available to the system incrontab."; + }; + + }; + + }; + + config = mkIf cfg.enable { + + warnings = optional (cfg.allow != null && cfg.deny != null) + ''If `services.incron.allow` is set then `services.incron.deny` will be ignored.''; + + environment.systemPackages = [ pkgs.incron ]; + + security.wrappers.incrontab.source = "${pkgs.incron}/bin/incrontab"; + + # incron won't read symlinks + environment.etc."incron.d/system" = { + mode = "0444"; + text = cfg.systab; + }; + environment.etc."incron.allow" = mkIf (cfg.allow != null) { + text = concatStringsSep "\n" cfg.allow; + }; + environment.etc."incron.deny" = mkIf (cfg.deny != null) { + text = concatStringsSep "\n" cfg.deny; + }; + + systemd.services.incron = { + description = "File System Events Scheduler"; + wantedBy = [ "multi-user.target" ]; + path = cfg.extraPackages; + serviceConfig.PIDFile = "/run/incrond.pid"; + serviceConfig.ExecStartPre = "${pkgs.coreutils}/bin/mkdir -m 710 -p /var/spool/incron"; + serviceConfig.ExecStart = "${pkgs.incron}/bin/incrond --foreground"; + }; + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/kapacitor.nix b/nixpkgs/nixos/modules/services/monitoring/kapacitor.nix new file mode 100644 index 000000000000..a4bdfa8f8053 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/kapacitor.nix @@ -0,0 +1,192 @@ +{ options, config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.kapacitor; + + kapacitorConf = pkgs.writeTextFile { + name = "kapacitord.conf"; + text = '' + hostname="${config.networking.hostName}" + data_dir="${cfg.dataDir}" + + [http] + bind-address = "${cfg.bind}:${toString cfg.port}" + log-enabled = false + auth-enabled = false + + [task] + dir = "${cfg.dataDir}/tasks" + snapshot-interval = "${cfg.taskSnapshotInterval}" + + [replay] + dir = "${cfg.dataDir}/replay" + + [storage] + boltdb = "${cfg.dataDir}/kapacitor.db" + + ${optionalString (cfg.loadDirectory != null) '' + [load] + enabled = true + dir = "${cfg.loadDirectory}" + ''} + + ${optionalString (cfg.defaultDatabase.enable) '' + [[influxdb]] + name = "default" + enabled = true + default = true + urls = [ "${cfg.defaultDatabase.url}" ] + username = "${cfg.defaultDatabase.username}" + password = "${cfg.defaultDatabase.password}" + ''} + + ${optionalString (cfg.alerta.enable) '' + [alerta] + enabled = true + url = "${cfg.alerta.url}" + token = "${cfg.alerta.token}" + environment = "${cfg.alerta.environment}" + origin = "${cfg.alerta.origin}" + ''} + + ${cfg.extraConfig} + ''; + }; +in +{ + options.services.kapacitor = { + enable = mkEnableOption "kapacitor"; + + dataDir = mkOption { + type = types.path; + example = "/var/lib/kapacitor"; + default = "/var/lib/kapacitor"; + description = "Location where Kapacitor stores its state"; + }; + + port = mkOption { + type = types.int; + default = 9092; + description = "Port of Kapacitor"; + }; + + bind = mkOption { + type = types.str; + default = ""; + example = literalExample "0.0.0.0"; + description = "Address to bind to. The default is to bind to all addresses"; + }; + + extraConfig = mkOption { + description = "These lines go into kapacitord.conf verbatim."; + default = ""; + type = types.lines; + }; + + user = mkOption { + type = types.str; + default = "kapacitor"; + description = "User account under which Kapacitor runs"; + }; + + group = mkOption { + type = types.str; + default = "kapacitor"; + description = "Group under which Kapacitor runs"; + }; + + taskSnapshotInterval = mkOption { + type = types.str; + description = "Specifies how often to snapshot the task state (in InfluxDB time units)"; + default = "1m0s"; + example = "1m0s"; + }; + + loadDirectory = mkOption { + type = types.nullOr types.path; + description = "Directory where to load services from, such as tasks, templates and handlers (or null to disable service loading on startup)"; + default = null; + }; + + defaultDatabase = { + enable = mkEnableOption "kapacitor.defaultDatabase"; + + url = mkOption { + description = "The URL to an InfluxDB server that serves as the default database"; + example = "http://localhost:8086"; + type = types.string; + }; + + username = mkOption { + description = "The username to connect to the remote InfluxDB server"; + type = types.string; + }; + + password = mkOption { + description = "The password to connect to the remote InfluxDB server"; + type = types.string; + }; + }; + + alerta = { + enable = mkEnableOption "kapacitor alerta integration"; + + url = mkOption { + description = "The URL to the Alerta REST API"; + default = "http://localhost:5000"; + example = "http://localhost:5000"; + type = types.string; + }; + + token = mkOption { + description = "Default Alerta authentication token"; + type = types.str; + default = ""; + }; + + environment = mkOption { + description = "Default Alerta environment"; + type = types.str; + default = "Production"; + }; + + origin = mkOption { + description = "Default origin of alert"; + type = types.str; + default = "kapacitor"; + }; + }; + }; + + config = mkIf cfg.enable { + environment.systemPackages = [ pkgs.kapacitor ]; + + systemd.services.kapacitor = { + description = "Kapacitor Real-Time Stream Processing Engine"; + wantedBy = [ "multi-user.target" ]; + after = [ "networking.target" ]; + serviceConfig = { + ExecStart = "${pkgs.kapacitor}/bin/kapacitord -config ${kapacitorConf}"; + User = "kapacitor"; + Group = "kapacitor"; + PermissionsStartOnly = true; + }; + preStart = '' + mkdir -p ${cfg.dataDir} + chown ${cfg.user}:${cfg.group} ${cfg.dataDir} + ''; + }; + + users.users.kapacitor = { + uid = config.ids.uids.kapacitor; + description = "Kapacitor user"; + home = cfg.dataDir; + }; + + users.groups.kapacitor = { + gid = config.ids.gids.kapacitor; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/longview.nix b/nixpkgs/nixos/modules/services/monitoring/longview.nix new file mode 100644 index 000000000000..9c38956f9ba8 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/longview.nix @@ -0,0 +1,160 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.longview; + + runDir = "/run/longview"; + configsDir = "${runDir}/longview.d"; + +in { + options = { + + services.longview = { + + enable = mkOption { + type = types.bool; + default = false; + description = '' + If enabled, system metrics will be sent to Linode LongView. + ''; + }; + + apiKey = mkOption { + type = types.str; + default = ""; + example = "01234567-89AB-CDEF-0123456789ABCDEF"; + description = '' + Longview API key. To get this, look in Longview settings which + are found at https://manager.linode.com/longview/. + + Warning: this secret is stored in the world-readable Nix store! + Use <option>apiKeyFile</option> instead. + ''; + }; + + apiKeyFile = mkOption { + type = types.nullOr types.path; + default = null; + example = "/run/keys/longview-api-key"; + description = '' + A file containing the Longview API key. + To get this, look in Longview settings which + are found at https://manager.linode.com/longview/. + + <option>apiKeyFile</option> takes precedence over <option>apiKey</option>. + ''; + }; + + apacheStatusUrl = mkOption { + type = types.str; + default = ""; + example = "http://127.0.0.1/server-status"; + description = '' + The Apache status page URL. If provided, Longview will + gather statistics from this location. This requires Apache + mod_status to be loaded and enabled. + ''; + }; + + nginxStatusUrl = mkOption { + type = types.str; + default = ""; + example = "http://127.0.0.1/nginx_status"; + description = '' + The Nginx status page URL. Longview will gather statistics + from this URL. This requires the Nginx stub_status module to + be enabled and configured at the given location. + ''; + }; + + mysqlUser = mkOption { + type = types.str; + default = ""; + description = '' + The user for connecting to the MySQL database. If provided, + Longview will connect to MySQL and collect statistics about + queries, etc. This user does not need to have been granted + any extra privileges. + ''; + }; + + mysqlPassword = mkOption { + type = types.str; + default = ""; + description = '' + The password corresponding to <option>mysqlUser</option>. + Warning: this is stored in cleartext in the Nix store! + Use <option>mysqlPasswordFile</option> instead. + ''; + }; + + mysqlPasswordFile = mkOption { + type = types.nullOr types.path; + default = null; + example = "/run/keys/dbpassword"; + description = '' + A file containing the password corresponding to <option>mysqlUser</option>. + ''; + }; + + }; + + }; + + config = mkIf cfg.enable { + systemd.services.longview = + { description = "Longview Metrics Collection"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig.Type = "forking"; + serviceConfig.ExecStop = "-${pkgs.coreutils}/bin/kill -TERM $MAINPID"; + serviceConfig.ExecReload = "-${pkgs.coreutils}/bin/kill -HUP $MAINPID"; + serviceConfig.PIDFile = "${runDir}/longview.pid"; + serviceConfig.ExecStart = "${pkgs.longview}/bin/longview"; + preStart = '' + umask 077 + mkdir -p ${configsDir} + '' + (optionalString (cfg.apiKeyFile != null) '' + cp --no-preserve=all "${cfg.apiKeyFile}" ${runDir}/longview.key + '') + (optionalString (cfg.apacheStatusUrl != "") '' + cat > ${configsDir}/Apache.conf <<EOF + location ${cfg.apacheStatusUrl}?auto + EOF + '') + (optionalString (cfg.mysqlUser != "" && cfg.mysqlPasswordFile != null) '' + cat > ${configsDir}/MySQL.conf <<EOF + username ${cfg.mysqlUser} + password `head -n1 "${cfg.mysqlPasswordFile}"` + EOF + '') + (optionalString (cfg.nginxStatusUrl != "") '' + cat > ${configsDir}/Nginx.conf <<EOF + location ${cfg.nginxStatusUrl} + EOF + ''); + }; + + warnings = let warn = k: optional (cfg.${k} != "") + "config.services.longview.${k} is insecure. Use ${k}File instead."; + in concatMap warn [ "apiKey" "mysqlPassword" ]; + + assertions = [ + { assertion = cfg.apiKeyFile != null; + message = "Longview needs an API key configured"; + } + ]; + + # Create API key file if not configured. + services.longview.apiKeyFile = mkIf (cfg.apiKey != "") + (mkDefault (toString (pkgs.writeTextFile { + name = "longview.key"; + text = cfg.apiKey; + }))); + + # Create MySQL password file if not configured. + services.longview.mysqlPasswordFile = mkDefault (toString (pkgs.writeTextFile { + name = "mysql-password-file"; + text = cfg.mysqlPassword; + })); + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/monit.nix b/nixpkgs/nixos/modules/services/monitoring/monit.nix new file mode 100644 index 000000000000..32e14ab21ffc --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/monit.nix @@ -0,0 +1,46 @@ +{config, pkgs, lib, ...}: + +with lib; + +let + cfg = config.services.monit; +in + +{ + options.services.monit = { + + enable = mkEnableOption "Monit"; + + config = mkOption { + type = types.lines; + default = ""; + description = "monitrc content"; + }; + + }; + + config = mkIf cfg.enable { + + environment.systemPackages = [ pkgs.monit ]; + + environment.etc."monitrc" = { + text = cfg.config; + mode = "0400"; + }; + + systemd.services.monit = { + description = "Pro-active monitoring utility for unix systems"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + ExecStart = "${pkgs.monit}/bin/monit -I -c /etc/monitrc"; + ExecStop = "${pkgs.monit}/bin/monit -c /etc/monitrc quit"; + ExecReload = "${pkgs.monit}/bin/monit -c /etc/monitrc reload"; + KillMode = "process"; + Restart = "always"; + }; + restartTriggers = [ config.environment.etc."monitrc".source ]; + }; + + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/munin.nix b/nixpkgs/nixos/modules/services/monitoring/munin.nix new file mode 100644 index 000000000000..2b265d5b5a90 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/munin.nix @@ -0,0 +1,219 @@ +{ config, lib, pkgs, ... }: + +# TODO: support munin-async +# TODO: LWP/Pg perl libs aren't recognized + +# TODO: support fastcgi +# http://munin-monitoring.org/wiki/CgiHowto2 +# spawn-fcgi -s /run/munin/fastcgi-graph.sock -U www-data -u munin -g munin /usr/lib/munin/cgi/munin-cgi-graph +# spawn-fcgi -s /run/munin/fastcgi-html.sock -U www-data -u munin -g munin /usr/lib/munin/cgi/munin-cgi-html +# https://paste.sh/vofcctHP#-KbDSXVeWoifYncZmLfZzgum +# nginx http://munin.readthedocs.org/en/latest/example/webserver/nginx.html + + +with lib; + +let + nodeCfg = config.services.munin-node; + cronCfg = config.services.munin-cron; + + muninConf = pkgs.writeText "munin.conf" + '' + dbdir /var/lib/munin + htmldir /var/www/munin + logdir /var/log/munin + rundir /run/munin + + ${cronCfg.extraGlobalConfig} + + ${cronCfg.hosts} + ''; + + nodeConf = pkgs.writeText "munin-node.conf" + '' + log_level 3 + log_file Sys::Syslog + port 4949 + host * + background 0 + user root + group root + host_name ${config.networking.hostName} + setsid 0 + + # wrapped plugins by makeWrapper being with dots + ignore_file ^\. + + allow ^::1$ + allow ^127\.0\.0\.1$ + + ${nodeCfg.extraConfig} + ''; + + pluginConf = pkgs.writeText "munin-plugin-conf" + '' + [hddtemp_smartctl] + user root + group root + + [meminfo] + user root + group root + + [ipmi*] + user root + group root + ''; + + pluginConfDir = pkgs.stdenv.mkDerivation { + name = "munin-plugin-conf.d"; + buildCommand = '' + mkdir $out + ln -s ${pluginConf} $out/nixos-config + ''; + }; +in + +{ + + options = { + + services.munin-node = { + + enable = mkOption { + default = false; + description = '' + Enable Munin Node agent. Munin node listens on 0.0.0.0 and + by default accepts connections only from 127.0.0.1 for security reasons. + + See <link xlink:href='http://munin-monitoring.org/wiki/munin-node.conf' />. + ''; + }; + + extraConfig = mkOption { + default = ""; + type = types.lines; + description = '' + <filename>munin-node.conf</filename> extra configuration. See + <link xlink:href='http://munin-monitoring.org/wiki/munin-node.conf' /> + ''; + }; + + # TODO: add option to add additional plugins + + }; + + services.munin-cron = { + + enable = mkOption { + default = false; + description = '' + Enable munin-cron. Takes care of all heavy lifting to collect data from + nodes and draws graphs to html. Runs munin-update, munin-limits, + munin-graphs and munin-html in that order. + + HTML output is in <filename>/var/www/munin/</filename>, configure your + favourite webserver to serve static files. + ''; + }; + + extraGlobalConfig = mkOption { + default = ""; + description = '' + <filename>munin.conf</filename> extra global configuration. + See <link xlink:href='http://munin-monitoring.org/wiki/munin.conf' />. + Useful to setup notifications, see + <link xlink:href='http://munin-monitoring.org/wiki/HowToContact' /> + ''; + example = '' + contact.email.command mail -s "Munin notification for ''${var:host}" someone@example.com + ''; + }; + + hosts = mkOption { + example = '' + [''${config.networking.hostName}] + address localhost + ''; + description = '' + Definitions of hosts of nodes to collect data from. Needs at least one + hosts for cron to succeed. See + <link xlink:href='http://munin-monitoring.org/wiki/munin.conf' /> + ''; + }; + + }; + + }; + + config = mkMerge [ (mkIf (nodeCfg.enable || cronCfg.enable) { + + environment.systemPackages = [ pkgs.munin ]; + + users.users = [{ + name = "munin"; + description = "Munin monitoring user"; + group = "munin"; + uid = config.ids.uids.munin; + }]; + + users.groups = [{ + name = "munin"; + gid = config.ids.gids.munin; + }]; + + }) (mkIf nodeCfg.enable { + + systemd.services.munin-node = { + description = "Munin Node"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + path = with pkgs; [ munin smartmontools "/run/current-system/sw" "/run/wrappers" ]; + environment.MUNIN_LIBDIR = "${pkgs.munin}/lib"; + environment.MUNIN_PLUGSTATE = "/run/munin"; + environment.MUNIN_LOGDIR = "/var/log/munin"; + preStart = '' + echo "updating munin plugins..." + + mkdir -p /etc/munin/plugins + rm -rf /etc/munin/plugins/* + ${pkgs.munin}/bin/munin-node-configure --suggest --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${pkgs.munin}/lib/plugins --servicedir=/etc/munin/plugins --sconfdir=${pluginConfDir} 2>/dev/null | ${pkgs.bash}/bin/bash + + # NOTE: we disable disktstats because plugin seems to fail and it hangs html generation (100% CPU + memory leak) + rm /etc/munin/plugins/diskstats || true + ''; + serviceConfig = { + ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/ --sconfdir=${pluginConfDir}"; + }; + }; + + # munin_stats plugin breaks as of 2.0.33 when this doesn't exist + systemd.tmpfiles.rules = [ "d /run/munin 0755 munin munin -" ]; + + }) (mkIf cronCfg.enable { + + systemd.timers.munin-cron = { + description = "batch Munin master programs"; + wantedBy = [ "timers.target" ]; + timerConfig.OnCalendar = "*:0/5"; + }; + + systemd.services.munin-cron = { + description = "batch Munin master programs"; + unitConfig.Documentation = "man:munin-cron(8)"; + + serviceConfig = { + Type = "oneshot"; + User = "munin"; + ExecStart = "${pkgs.munin}/bin/munin-cron --config ${muninConf}"; + }; + }; + + systemd.tmpfiles.rules = [ + "d /run/munin 0755 munin munin -" + "d /var/log/munin 0755 munin munin -" + "d /var/www/munin 0755 munin munin -" + "d /var/lib/munin 0755 munin munin -" + ]; + })]; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/nagios.nix b/nixpkgs/nixos/modules/services/monitoring/nagios.nix new file mode 100644 index 000000000000..3e1d727b416e --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/nagios.nix @@ -0,0 +1,190 @@ +# Nagios system/network monitoring daemon. +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.nagios; + + nagiosState = "/var/lib/nagios"; + nagiosLogDir = "/var/log/nagios"; + + nagiosObjectDefs = cfg.objectDefs; + + nagiosObjectDefsDir = pkgs.runCommand "nagios-objects" {inherit nagiosObjectDefs;} + "mkdir -p $out; ln -s $nagiosObjectDefs $out/"; + + nagiosCfgFile = pkgs.writeText "nagios.cfg" + '' + # Paths for state and logs. + log_file=${nagiosLogDir}/current + log_archive_path=${nagiosLogDir}/archive + status_file=${nagiosState}/status.dat + object_cache_file=${nagiosState}/objects.cache + temp_file=${nagiosState}/nagios.tmp + lock_file=/var/run/nagios.lock # Not used I think. + state_retention_file=${nagiosState}/retention.dat + query_socket=${nagiosState}/nagios.qh + check_result_path=${nagiosState} + command_file=${nagiosState}/nagios.cmd + + # Configuration files. + #resource_file=resource.cfg + cfg_dir=${nagiosObjectDefsDir} + + # Uid/gid that the daemon runs under. + nagios_user=nagios + nagios_group=nogroup + + # Misc. options. + illegal_macro_output_chars=`~$&|'"<> + retain_state_information=1 + ''; # " + + # Plain configuration for the Nagios web-interface with no + # authentication. + nagiosCGICfgFile = pkgs.writeText "nagios.cgi.conf" + '' + main_config_file=${cfg.mainConfigFile} + use_authentication=0 + url_html_path=${cfg.urlPath} + ''; + + extraHttpdConfig = + '' + ScriptAlias ${cfg.urlPath}/cgi-bin ${pkgs.nagios}/sbin + + <Directory "${pkgs.nagios}/sbin"> + Options ExecCGI + AllowOverride None + Order allow,deny + Allow from all + SetEnv NAGIOS_CGI_CONFIG ${cfg.cgiConfigFile} + </Directory> + + Alias ${cfg.urlPath} ${pkgs.nagios}/share + + <Directory "${pkgs.nagios}/share"> + Options None + AllowOverride None + Order allow,deny + Allow from all + </Directory> + ''; + +in +{ + options = { + services.nagios = { + enable = mkOption { + default = false; + description = " + Whether to use <link + xlink:href='http://www.nagios.org/'>Nagios</link> to monitor + your system or network. + "; + }; + + objectDefs = mkOption { + description = " + A list of Nagios object configuration files that must define + the hosts, host groups, services and contacts for the + network that you want Nagios to monitor. + "; + }; + + plugins = mkOption { + type = types.listOf types.package; + default = [pkgs.nagiosPluginsOfficial pkgs.ssmtp]; + defaultText = "[pkgs.nagiosPluginsOfficial pkgs.ssmtp]"; + description = " + Packages to be added to the Nagios <envar>PATH</envar>. + Typically used to add plugins, but can be anything. + "; + }; + + mainConfigFile = mkOption { + type = types.package; + default = nagiosCfgFile; + defaultText = "nagiosCfgFile"; + description = " + Derivation for the main configuration file of Nagios. + "; + }; + + cgiConfigFile = mkOption { + type = types.package; + default = nagiosCGICfgFile; + defaultText = "nagiosCGICfgFile"; + description = " + Derivation for the configuration file of Nagios CGI scripts + that can be used in web servers for running the Nagios web interface. + "; + }; + + enableWebInterface = mkOption { + default = false; + description = " + Whether to enable the Nagios web interface. You should also + enable Apache (<option>services.httpd.enable</option>). + "; + }; + + urlPath = mkOption { + default = "/nagios"; + description = " + The URL path under which the Nagios web interface appears. + That is, you can access the Nagios web interface through + <literal>http://<replaceable>server</replaceable>/<replaceable>urlPath</replaceable></literal>. + "; + }; + }; + }; + + + config = mkIf cfg.enable { + users.users.nagios = { + description = "Nagios user "; + uid = config.ids.uids.nagios; + home = nagiosState; + createHome = true; + }; + + # This isn't needed, it's just so that the user can type "nagiostats + # -c /etc/nagios.cfg". + environment.etc = [ + { source = cfg.mainConfigFile; + target = "nagios.cfg"; + } + ]; + + environment.systemPackages = [ pkgs.nagios ]; + systemd.services.nagios = { + description = "Nagios monitoring daemon"; + path = [ pkgs.nagios ]; + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + + serviceConfig = { + User = "nagios"; + Restart = "always"; + RestartSec = 2; + PermissionsStartOnly = true; + }; + + preStart = '' + mkdir -m 0755 -p ${nagiosState} ${nagiosLogDir} + chown nagios ${nagiosState} ${nagiosLogDir} + ''; + + script = '' + for i in ${toString cfg.plugins}; do + export PATH=$i/bin:$i/sbin:$i/libexec:$PATH + done + exec ${pkgs.nagios}/bin/nagios ${cfg.mainConfigFile} + ''; + }; + + services.httpd.extraConfig = optionalString cfg.enableWebInterface extraHttpdConfig; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/netdata.nix b/nixpkgs/nixos/modules/services/monitoring/netdata.nix new file mode 100644 index 000000000000..4873ab1fc608 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/netdata.nix @@ -0,0 +1,147 @@ +{ config, pkgs, lib, ... }: + +with lib; + +let + cfg = config.services.netdata; + + wrappedPlugins = pkgs.runCommand "wrapped-plugins" {} '' + mkdir -p $out/libexec/netdata/plugins.d + ln -s /run/wrappers/bin/apps.plugin $out/libexec/netdata/plugins.d/apps.plugin + ''; + + localConfig = { + global = { + "plugins directory" = "${pkgs.netdata}/libexec/netdata/plugins.d ${wrappedPlugins}/libexec/netdata/plugins.d"; + }; + web = { + "web files owner" = "root"; + "web files group" = "root"; + }; + }; + mkConfig = generators.toINI {} (recursiveUpdate localConfig cfg.config); + configFile = pkgs.writeText "netdata.conf" (if cfg.configText != null then cfg.configText else mkConfig); + + defaultUser = "netdata"; + +in { + options = { + services.netdata = { + enable = mkEnableOption "netdata"; + + user = mkOption { + type = types.str; + default = "netdata"; + description = "User account under which netdata runs."; + }; + + group = mkOption { + type = types.str; + default = "netdata"; + description = "Group under which netdata runs."; + }; + + configText = mkOption { + type = types.nullOr types.lines; + description = "Verbatim netdata.conf, cannot be combined with config."; + default = null; + example = '' + [global] + debug log = syslog + access log = syslog + error log = syslog + ''; + }; + + python = { + enable = mkOption { + type = types.bool; + default = true; + description = '' + Whether to enable python-based plugins + ''; + }; + extraPackages = mkOption { + default = ps: []; + defaultText = "ps: []"; + example = literalExample '' + ps: [ + ps.psycopg2 + ps.docker + ps.dnspython + ] + ''; + description = '' + Extra python packages available at runtime + to enable additional python plugins. + ''; + }; + }; + + config = mkOption { + type = types.attrsOf types.attrs; + default = {}; + description = "netdata.conf configuration as nix attributes. cannot be combined with configText."; + example = literalExample '' + global = { + "debug log" = "syslog"; + "access log" = "syslog"; + "error log" = "syslog"; + }; + ''; + }; + }; + }; + + config = mkIf cfg.enable { + assertions = + [ { assertion = cfg.config != {} -> cfg.configText == null ; + message = "Cannot specify both config and configText"; + } + ]; + + systemd.tmpfiles.rules = [ + "d /var/cache/netdata 0755 ${cfg.user} ${cfg.group} -" + "Z /var/cache/netdata - ${cfg.user} ${cfg.group} -" + "d /var/log/netdata 0755 ${cfg.user} ${cfg.group} -" + "Z /var/log/netdata - ${cfg.user} ${cfg.group} -" + "d /var/lib/netdata 0755 ${cfg.user} ${cfg.group} -" + "Z /var/lib/netdata - ${cfg.user} ${cfg.group} -" + "d /etc/netdata 0755 ${cfg.user} ${cfg.group} -" + "Z /etc/netdata - ${cfg.user} ${cfg.group} -" + ]; + systemd.services.netdata = { + description = "Real time performance monitoring"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + path = (with pkgs; [ gawk curl ]) ++ lib.optional cfg.python.enable + (pkgs.python3.withPackages cfg.python.extraPackages); + serviceConfig = { + User = cfg.user; + Group = cfg.group; + Environment="PYTHONPATH=${pkgs.netdata}/libexec/netdata/python.d/python_modules"; + PermissionsStartOnly = true; + ExecStart = "${pkgs.netdata}/bin/netdata -D -c ${configFile}"; + TimeoutStopSec = 60; + }; + }; + + security.wrappers."apps.plugin" = { + source = "${pkgs.netdata}/libexec/netdata/plugins.d/apps.plugin.org"; + capabilities = "cap_dac_read_search,cap_sys_ptrace+ep"; + owner = cfg.user; + group = cfg.group; + permissions = "u+rx,g+rx,o-rwx"; + }; + + + users.users = optional (cfg.user == defaultUser) { + name = defaultUser; + }; + + users.groups = optional (cfg.group == defaultUser) { + name = defaultUser; + }; + + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/osquery.nix b/nixpkgs/nixos/modules/services/monitoring/osquery.nix new file mode 100644 index 000000000000..c8c625577d39 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/osquery.nix @@ -0,0 +1,91 @@ +{ config, lib, pkgs, ... }: + +with builtins; +with lib; + +let + cfg = config.services.osquery; + +in + +{ + + options = { + + services.osquery = { + + enable = mkEnableOption "osquery"; + + loggerPath = mkOption { + type = types.path; + description = "Base directory used for logging."; + default = "/var/log/osquery"; + }; + + pidfile = mkOption { + type = types.path; + description = "Path used for pid file."; + default = "/var/osquery/osqueryd.pidfile"; + }; + + utc = mkOption { + type = types.bool; + description = "Attempt to convert all UNIX calendar times to UTC."; + default = true; + }; + + databasePath = mkOption { + type = types.path; + description = "Path used for database file."; + default = "/var/osquery/osquery.db"; + }; + + extraConfig = mkOption { + type = types.attrs // { + merge = loc: foldl' (res: def: recursiveUpdate res def.value) {}; + }; + description = "Extra config to be recursively merged into the JSON config file."; + default = { }; + }; + }; + + }; + + config = mkIf cfg.enable { + + environment.systemPackages = [ pkgs.osquery ]; + + environment.etc."osquery/osquery.conf".text = toJSON ( + recursiveUpdate { + options = { + config_plugin = "filesystem"; + logger_plugin = "filesystem"; + logger_path = cfg.loggerPath; + database_path = cfg.databasePath; + utc = cfg.utc; + }; + } cfg.extraConfig + ); + + systemd.services.osqueryd = { + description = "The osquery Daemon"; + after = [ "network.target" "syslog.service" ]; + wantedBy = [ "multi-user.target" ]; + path = [ pkgs.osquery ]; + preStart = '' + mkdir -p ${escapeShellArg cfg.loggerPath} + mkdir -p "$(dirname ${escapeShellArg cfg.pidfile})" + mkdir -p "$(dirname ${escapeShellArg cfg.databasePath})" + ''; + serviceConfig = { + TimeoutStartSec = "infinity"; + ExecStart = "${pkgs.osquery}/bin/osqueryd --logger_path ${escapeShellArg cfg.loggerPath} --pidfile ${escapeShellArg cfg.pidfile} --database_path ${escapeShellArg cfg.databasePath}"; + KillMode = "process"; + KillSignal = "SIGTERM"; + Restart = "on-failure"; + }; + }; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix new file mode 100644 index 000000000000..43b4a41eaf33 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix @@ -0,0 +1,169 @@ +{ config, pkgs, lib, ... }: + +with lib; + +let + cfg = config.services.prometheus.alertmanager; + mkConfigFile = pkgs.writeText "alertmanager.yml" (builtins.toJSON cfg.configuration); + + checkedConfig = file: pkgs.runCommand "checked-config" { buildInputs = [ cfg.package ]; } '' + ln -s ${file} $out + amtool check-config $out + ''; + + alertmanagerYml = let + yml = if cfg.configText != null then + pkgs.writeText "alertmanager.yml" cfg.configText + else mkConfigFile; + in checkedConfig yml; + + cmdlineArgs = cfg.extraFlags ++ [ + "--config.file ${alertmanagerYml}" + "--web.listen-address ${cfg.listenAddress}:${toString cfg.port}" + "--log.level ${cfg.logLevel}" + ] ++ (optional (cfg.webExternalUrl != null) + "--web.external-url ${cfg.webExternalUrl}" + ) ++ (optional (cfg.logFormat != null) + "--log.format ${cfg.logFormat}" + ); +in { + options = { + services.prometheus.alertmanager = { + enable = mkEnableOption "Prometheus Alertmanager"; + + package = mkOption { + type = types.package; + default = pkgs.prometheus-alertmanager; + defaultText = "pkgs.alertmanager"; + description = '' + Package that should be used for alertmanager. + ''; + }; + + user = mkOption { + type = types.str; + default = "nobody"; + description = '' + User name under which Alertmanager shall be run. + ''; + }; + + group = mkOption { + type = types.str; + default = "nogroup"; + description = '' + Group under which Alertmanager shall be run. + ''; + }; + + configuration = mkOption { + type = types.nullOr types.attrs; + default = null; + description = '' + Alertmanager configuration as nix attribute set. + ''; + }; + + configText = mkOption { + type = types.nullOr types.lines; + default = null; + description = '' + Alertmanager configuration as YAML text. If non-null, this option + defines the text that is written to alertmanager.yml. If null, the + contents of alertmanager.yml is generated from the structured config + options. + ''; + }; + + logFormat = mkOption { + type = types.nullOr types.str; + default = null; + description = '' + If set use a syslog logger or JSON logging. + ''; + }; + + logLevel = mkOption { + type = types.enum ["debug" "info" "warn" "error" "fatal"]; + default = "warn"; + description = '' + Only log messages with the given severity or above. + ''; + }; + + webExternalUrl = mkOption { + type = types.nullOr types.str; + default = null; + description = '' + The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). + Used for generating relative and absolute links back to Alertmanager itself. + If the URL has a path portion, it will be used to prefix all HTTP endoints served by Alertmanager. + If omitted, relevant URL components will be derived automatically. + ''; + }; + + listenAddress = mkOption { + type = types.str; + default = ""; + description = '' + Address to listen on for the web interface and API. + ''; + }; + + port = mkOption { + type = types.int; + default = 9093; + description = '' + Port to listen on for the web interface and API. + ''; + }; + + openFirewall = mkOption { + type = types.bool; + default = false; + description = '' + Open port in firewall for incoming connections. + ''; + }; + + extraFlags = mkOption { + type = types.listOf types.str; + default = []; + description = '' + Extra commandline options when launching the Alertmanager. + ''; + }; + }; + }; + + config = mkMerge [ + (mkIf cfg.enable { + assertions = singleton { + assertion = cfg.configuration != null || cfg.configText != null; + message = "Can not enable alertmanager without a configuration. " + + "Set either the `configuration` or `configText` attribute."; + }; + }) + (mkIf cfg.enable { + networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port; + + systemd.services.alertmanager = { + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + script = '' + ${cfg.package}/bin/alertmanager \ + ${concatStringsSep " \\\n " cmdlineArgs} + ''; + + serviceConfig = { + User = cfg.user; + Group = cfg.group; + Restart = "always"; + PrivateTmp = true; + WorkingDirectory = "/tmp"; + ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; + }; + }; + }) + ]; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix new file mode 100644 index 000000000000..bf4dfc666bb6 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix @@ -0,0 +1,525 @@ +{ config, pkgs, lib, ... }: + +with lib; + +let + cfg = config.services.prometheus; + promUser = "prometheus"; + promGroup = "prometheus"; + + # Get a submodule without any embedded metadata: + _filter = x: filterAttrs (k: v: k != "_module") x; + + # a wrapper that verifies that the configuration is valid + promtoolCheck = what: name: file: pkgs.runCommand "${name}-${what}-checked" + { buildInputs = [ cfg.package ]; } '' + ln -s ${file} $out + promtool ${what} $out + ''; + + # Pretty-print JSON to a file + writePrettyJSON = name: x: + pkgs.runCommand name { } '' + echo '${builtins.toJSON x}' | ${pkgs.jq}/bin/jq . > $out + ''; + + # This becomes the main config file + promConfig = { + global = cfg.globalConfig; + rule_files = map (promtoolCheck "check-rules" "rules") (cfg.ruleFiles ++ [ + (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules)) + ]); + scrape_configs = cfg.scrapeConfigs; + }; + + generatedPrometheusYml = writePrettyJSON "prometheus.yml" promConfig; + + prometheusYml = let + yml = if cfg.configText != null then + pkgs.writeText "prometheus.yml" cfg.configText + else generatedPrometheusYml; + in promtoolCheck "check-config" "prometheus.yml" yml; + + cmdlineArgs = cfg.extraFlags ++ [ + "-storage.local.path=${cfg.dataDir}/metrics" + "-config.file=${prometheusYml}" + "-web.listen-address=${cfg.listenAddress}" + "-alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}" + "-alertmanager.timeout=${toString cfg.alertmanagerTimeout}s" + (optionalString (cfg.alertmanagerURL != []) "-alertmanager.url=${concatStringsSep "," cfg.alertmanagerURL}") + (optionalString (cfg.webExternalUrl != null) "-web.external-url=${cfg.webExternalUrl}") + ]; + + promTypes.globalConfig = types.submodule { + options = { + scrape_interval = mkOption { + type = types.str; + default = "1m"; + description = '' + How frequently to scrape targets by default. + ''; + }; + + scrape_timeout = mkOption { + type = types.str; + default = "10s"; + description = '' + How long until a scrape request times out. + ''; + }; + + evaluation_interval = mkOption { + type = types.str; + default = "1m"; + description = '' + How frequently to evaluate rules by default. + ''; + }; + + external_labels = mkOption { + type = types.attrsOf types.str; + description = '' + The labels to add to any time series or alerts when + communicating with external systems (federation, remote + storage, Alertmanager). + ''; + default = {}; + }; + }; + }; + + promTypes.scrape_config = types.submodule { + options = { + job_name = mkOption { + type = types.str; + description = '' + The job name assigned to scraped metrics by default. + ''; + }; + scrape_interval = mkOption { + type = types.nullOr types.str; + default = null; + description = '' + How frequently to scrape targets from this job. Defaults to the + globally configured default. + ''; + }; + scrape_timeout = mkOption { + type = types.nullOr types.str; + default = null; + description = '' + Per-target timeout when scraping this job. Defaults to the + globally configured default. + ''; + }; + metrics_path = mkOption { + type = types.str; + default = "/metrics"; + description = '' + The HTTP resource path on which to fetch metrics from targets. + ''; + }; + honor_labels = mkOption { + type = types.bool; + default = false; + description = '' + Controls how Prometheus handles conflicts between labels + that are already present in scraped data and labels that + Prometheus would attach server-side ("job" and "instance" + labels, manually configured target labels, and labels + generated by service discovery implementations). + + If honor_labels is set to "true", label conflicts are + resolved by keeping label values from the scraped data and + ignoring the conflicting server-side labels. + + If honor_labels is set to "false", label conflicts are + resolved by renaming conflicting labels in the scraped data + to "exported_<original-label>" (for example + "exported_instance", "exported_job") and then attaching + server-side labels. This is useful for use cases such as + federation, where all labels specified in the target should + be preserved. + ''; + }; + scheme = mkOption { + type = types.enum ["http" "https"]; + default = "http"; + description = '' + The URL scheme with which to fetch metrics from targets. + ''; + }; + params = mkOption { + type = types.attrsOf (types.listOf types.str); + default = {}; + description = '' + Optional HTTP URL parameters. + ''; + }; + basic_auth = mkOption { + type = types.nullOr (types.submodule { + options = { + username = mkOption { + type = types.str; + description = '' + HTTP username + ''; + }; + password = mkOption { + type = types.str; + description = '' + HTTP password + ''; + }; + }; + }); + default = null; + apply = x: mapNullable _filter x; + description = '' + Optional http login credentials for metrics scraping. + ''; + }; + dns_sd_configs = mkOption { + type = types.listOf promTypes.dns_sd_config; + default = []; + apply = x: map _filter x; + description = '' + List of DNS service discovery configurations. + ''; + }; + consul_sd_configs = mkOption { + type = types.listOf promTypes.consul_sd_config; + default = []; + apply = x: map _filter x; + description = '' + List of Consul service discovery configurations. + ''; + }; + file_sd_configs = mkOption { + type = types.listOf promTypes.file_sd_config; + default = []; + apply = x: map _filter x; + description = '' + List of file service discovery configurations. + ''; + }; + static_configs = mkOption { + type = types.listOf promTypes.static_config; + default = []; + apply = x: map _filter x; + description = '' + List of labeled target groups for this job. + ''; + }; + relabel_configs = mkOption { + type = types.listOf promTypes.relabel_config; + default = []; + apply = x: map _filter x; + description = '' + List of relabel configurations. + ''; + }; + }; + }; + + promTypes.static_config = types.submodule { + options = { + targets = mkOption { + type = types.listOf types.str; + description = '' + The targets specified by the target group. + ''; + }; + labels = mkOption { + type = types.attrsOf types.str; + default = {}; + description = '' + Labels assigned to all metrics scraped from the targets. + ''; + }; + }; + }; + + promTypes.dns_sd_config = types.submodule { + options = { + names = mkOption { + type = types.listOf types.str; + description = '' + A list of DNS SRV record names to be queried. + ''; + }; + refresh_interval = mkOption { + type = types.str; + default = "30s"; + description = '' + The time after which the provided names are refreshed. + ''; + }; + }; + }; + + promTypes.consul_sd_config = types.submodule { + options = { + server = mkOption { + type = types.str; + description = "Consul server to query."; + }; + token = mkOption { + type = types.nullOr types.str; + description = "Consul token"; + }; + datacenter = mkOption { + type = types.nullOr types.str; + description = "Consul datacenter"; + }; + scheme = mkOption { + type = types.nullOr types.str; + description = "Consul scheme"; + }; + username = mkOption { + type = types.nullOr types.str; + description = "Consul username"; + }; + password = mkOption { + type = types.nullOr types.str; + description = "Consul password"; + }; + + services = mkOption { + type = types.listOf types.str; + description = '' + A list of services for which targets are retrieved. + ''; + }; + tag_separator = mkOption { + type = types.str; + default = ","; + description = '' + The string by which Consul tags are joined into the tag label. + ''; + }; + }; + }; + + promTypes.file_sd_config = types.submodule { + options = { + files = mkOption { + type = types.listOf types.str; + description = '' + Patterns for files from which target groups are extracted. Refer + to the Prometheus documentation for permitted filename patterns + and formats. + + ''; + }; + refresh_interval = mkOption { + type = types.str; + default = "30s"; + description = '' + Refresh interval to re-read the files. + ''; + }; + }; + }; + + promTypes.relabel_config = types.submodule { + options = { + source_labels = mkOption { + type = types.listOf types.str; + description = '' + The source labels select values from existing labels. Their content + is concatenated using the configured separator and matched against + the configured regular expression. + ''; + }; + separator = mkOption { + type = types.str; + default = ";"; + description = '' + Separator placed between concatenated source label values. + ''; + }; + target_label = mkOption { + type = types.nullOr types.str; + default = null; + description = '' + Label to which the resulting value is written in a replace action. + It is mandatory for replace actions. + ''; + }; + regex = mkOption { + type = types.str; + default = "(.*)"; + description = '' + Regular expression against which the extracted value is matched. + ''; + }; + replacement = mkOption { + type = types.str; + default = "$1"; + description = '' + Replacement value against which a regex replace is performed if the + regular expression matches. + ''; + }; + action = mkOption { + type = types.enum ["replace" "keep" "drop"]; + default = "replace"; + description = '' + Action to perform based on regex matching. + ''; + }; + }; + }; + +in { + options = { + services.prometheus = { + + enable = mkOption { + type = types.bool; + default = false; + description = '' + Enable the Prometheus monitoring daemon. + ''; + }; + + package = mkOption { + type = types.package; + default = pkgs.prometheus; + defaultText = "pkgs.prometheus"; + description = '' + The prometheus package that should be used. + ''; + }; + + listenAddress = mkOption { + type = types.str; + default = "0.0.0.0:9090"; + description = '' + Address to listen on for the web interface, API, and telemetry. + ''; + }; + + dataDir = mkOption { + type = types.path; + default = "/var/lib/prometheus"; + description = '' + Directory to store Prometheus metrics data. + ''; + }; + + extraFlags = mkOption { + type = types.listOf types.str; + default = []; + description = '' + Extra commandline options when launching Prometheus. + ''; + }; + + configText = mkOption { + type = types.nullOr types.lines; + default = null; + description = '' + If non-null, this option defines the text that is written to + prometheus.yml. If null, the contents of prometheus.yml is generated + from the structured config options. + ''; + }; + + globalConfig = mkOption { + type = promTypes.globalConfig; + default = {}; + apply = _filter; + description = '' + Parameters that are valid in all configuration contexts. They + also serve as defaults for other configuration sections + ''; + }; + + rules = mkOption { + type = types.listOf types.str; + default = []; + description = '' + Alerting and/or Recording rules to evaluate at runtime. + ''; + }; + + ruleFiles = mkOption { + type = types.listOf types.path; + default = []; + description = '' + Any additional rules files to include in this configuration. + ''; + }; + + scrapeConfigs = mkOption { + type = types.listOf promTypes.scrape_config; + default = []; + apply = x: map _filter x; + description = '' + A list of scrape configurations. + ''; + }; + + alertmanagerURL = mkOption { + type = types.listOf types.str; + default = []; + description = '' + List of Alertmanager URLs to send notifications to. + ''; + }; + + alertmanagerNotificationQueueCapacity = mkOption { + type = types.int; + default = 10000; + description = '' + The capacity of the queue for pending alert manager notifications. + ''; + }; + + alertmanagerTimeout = mkOption { + type = types.int; + default = 10; + description = '' + Alert manager HTTP API timeout (in seconds). + ''; + }; + + webExternalUrl = mkOption { + type = types.nullOr types.str; + default = null; + example = "https://example.com/"; + description = '' + The URL under which Prometheus is externally reachable (for example, + if Prometheus is served via a reverse proxy). + ''; + }; + }; + }; + + config = mkIf cfg.enable { + users.groups.${promGroup}.gid = config.ids.gids.prometheus; + users.users.${promUser} = { + description = "Prometheus daemon user"; + uid = config.ids.uids.prometheus; + group = promGroup; + home = cfg.dataDir; + createHome = true; + }; + systemd.services.prometheus = { + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + script = '' + #!/bin/sh + exec ${cfg.package}/bin/prometheus \ + ${concatStringsSep " \\\n " cmdlineArgs} + ''; + serviceConfig = { + User = promUser; + Restart = "always"; + WorkingDirectory = cfg.dataDir; + }; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix new file mode 100644 index 000000000000..5308c9c4ee08 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix @@ -0,0 +1,178 @@ +{ config, pkgs, lib, ... }: + +with lib; + +let + cfg = config.services.prometheus.exporters; + + # each attribute in `exporterOpts` is expected to have specified: + # - port (types.int): port on which the exporter listens + # - serviceOpts (types.attrs): config that is merged with the + # default definition of the exporter's + # systemd service + # - extraOpts (types.attrs): extra configuration options to + # configure the exporter with, which + # are appended to the default options + # + # Note that `extraOpts` is optional, but a script for the exporter's + # systemd service must be provided by specifying either + # `serviceOpts.script` or `serviceOpts.serviceConfig.ExecStart` + exporterOpts = { + blackbox = import ./exporters/blackbox.nix { inherit config lib pkgs; }; + collectd = import ./exporters/collectd.nix { inherit config lib pkgs; }; + dnsmasq = import ./exporters/dnsmasq.nix { inherit config lib pkgs; }; + dovecot = import ./exporters/dovecot.nix { inherit config lib pkgs; }; + fritzbox = import ./exporters/fritzbox.nix { inherit config lib pkgs; }; + json = import ./exporters/json.nix { inherit config lib pkgs; }; + minio = import ./exporters/minio.nix { inherit config lib pkgs; }; + nginx = import ./exporters/nginx.nix { inherit config lib pkgs; }; + node = import ./exporters/node.nix { inherit config lib pkgs; }; + postfix = import ./exporters/postfix.nix { inherit config lib pkgs; }; + snmp = import ./exporters/snmp.nix { inherit config lib pkgs; }; + surfboard = import ./exporters/surfboard.nix { inherit config lib pkgs; }; + tor = import ./exporters/tor.nix { inherit config lib pkgs; }; + unifi = import ./exporters/unifi.nix { inherit config lib pkgs; }; + varnish = import ./exporters/varnish.nix { inherit config lib pkgs; }; + }; + + mkExporterOpts = ({ name, port }: { + enable = mkEnableOption "the prometheus ${name} exporter"; + port = mkOption { + type = types.int; + default = port; + description = '' + Port to listen on. + ''; + }; + listenAddress = mkOption { + type = types.str; + default = "0.0.0.0"; + description = '' + Address to listen on. + ''; + }; + extraFlags = mkOption { + type = types.listOf types.str; + default = []; + description = '' + Extra commandline options to pass to the ${name} exporter. + ''; + }; + openFirewall = mkOption { + type = types.bool; + default = false; + description = '' + Open port in firewall for incoming connections. + ''; + }; + firewallFilter = mkOption { + type = types.str; + default = "-p tcp -m tcp --dport ${toString port}"; + example = literalExample '' + "-i eth0 -p tcp -m tcp --dport ${toString port}" + ''; + description = '' + Specify a filter for iptables to use when + <option>services.prometheus.exporters.${name}.openFirewall</option> + is true. It is used as `ip46tables -I nixos-fw <option>firewallFilter</option> -j nixos-fw-accept`. + ''; + }; + user = mkOption { + type = types.str; + default = "nobody"; + description = '' + User name under which the ${name} exporter shall be run. + Has no effect when <option>systemd.services.prometheus-${name}-exporter.serviceConfig.DynamicUser</option> is true. + ''; + }; + group = mkOption { + type = types.str; + default = "nobody"; + description = '' + Group under which the ${name} exporter shall be run. + Has no effect when <option>systemd.services.prometheus-${name}-exporter.serviceConfig.DynamicUser</option> is true. + ''; + }; + }); + + mkSubModule = { name, port, extraOpts, ... }: { + ${name} = mkOption { + type = types.submodule { + options = (mkExporterOpts { + inherit name port; + } // extraOpts); + }; + internal = true; + default = {}; + }; + }; + + mkSubModules = (foldl' (a: b: a//b) {} + (mapAttrsToList (name: opts: mkSubModule { + inherit name; + inherit (opts) port serviceOpts; + extraOpts = opts.extraOpts or {}; + }) exporterOpts) + ); + + mkExporterConf = { name, conf, serviceOpts }: + mkIf conf.enable { + networking.firewall.extraCommands = mkIf conf.openFirewall (concatStrings [ + "ip46tables -I nixos-fw ${conf.firewallFilter} " + "-m comment --comment ${name}-exporter -j nixos-fw-accept" + ]); + systemd.services."prometheus-${name}-exporter" = mkMerge ([{ + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + serviceConfig.Restart = mkDefault "always"; + serviceConfig.PrivateTmp = mkDefault true; + serviceConfig.WorkingDirectory = mkDefault /tmp; + } serviceOpts ] ++ optional (!(serviceOpts.serviceConfig.DynamicUser or false)) { + serviceConfig.User = conf.user; + serviceConfig.Group = conf.group; + }); + }; +in +{ + options.services.prometheus.exporters = mkOption { + type = types.submodule { + options = (mkSubModules); + }; + description = "Prometheus exporter configuration"; + default = {}; + example = literalExample '' + { + node = { + enable = true; + enabledCollectors = [ "systemd" ]; + }; + varnish.enable = true; + } + ''; + }; + + config = mkMerge ([{ + assertions = [{ + assertion = (cfg.snmp.configurationPath == null) != (cfg.snmp.configuration == null); + message = '' + Please ensure you have either `services.prometheus.exporters.snmp.configuration' + or `services.prometheus.exporters.snmp.configurationPath' set! + ''; + }]; + }] ++ [(mkIf config.services.minio.enable { + services.prometheus.exporters.minio.minioAddress = mkDefault "http://localhost:9000"; + services.prometheus.exporters.minio.minioAccessKey = mkDefault config.services.minio.accessKey; + services.prometheus.exporters.minio.minioAccessSecret = mkDefault config.services.minio.secretKey; + })] ++ (mapAttrsToList (name: conf: + mkExporterConf { + inherit name; + inherit (conf) serviceOpts; + conf = cfg.${name}; + }) exporterOpts) + ); + + meta = { + doc = ./exporters.xml; + maintainers = [ maintainers.willibutz ]; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.xml b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.xml new file mode 100644 index 000000000000..7a0a1bdf2c14 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.xml @@ -0,0 +1,187 @@ +<chapter xmlns="http://docbook.org/ns/docbook" + xmlns:xlink="http://www.w3.org/1999/xlink" + xmlns:xi="http://www.w3.org/2001/XInclude" + version="5.0" + xml:id="module-services-prometheus-exporters"> + <title>Prometheus exporters</title> + <para> + Prometheus exporters provide metrics for the + <link xlink:href="https://prometheus.io">prometheus monitoring system</link>. + </para> + <section xml:id="module-services-prometheus-exporters-configuration"> + <title>Configuration</title> + + <para> + One of the most common exporters is the + <link xlink:href="https://github.com/prometheus/node_exporter">node + exporter</link>, it provides hardware and OS metrics from the host it's + running on. The exporter could be configured as follows: +<programlisting> + services.promtheus.exporters.node = { + enable = true; + enabledCollectors = [ + "logind" + "systemd" + ]; + disabledCollectors = [ + "textfile" + ]; + openFirewall = true; + firewallFilter = "-i br0 -p tcp -m tcp --dport 9100"; + }; +</programlisting> + It should now serve all metrics from the collectors that are explicitly + enabled and the ones that are + <link xlink:href="https://github.com/prometheus/node_exporter#enabled-by-default">enabled + by default</link>, via http under <literal>/metrics</literal>. In this + example the firewall should just allow incoming connections to the + exporter's port on the bridge interface <literal>br0</literal> (this would + have to be configured seperately of course). For more information about + configuration see <literal>man configuration.nix</literal> or search through + the + <link xlink:href="https://nixos.org/nixos/options.html#prometheus.exporters">available + options</link>. + </para> + </section> + <section xml:id="module-services-prometheus-exporters-new-exporter"> + <title>Adding a new exporter</title> + + <para> + To add a new exporter, it has to be packaged first (see + <literal>nixpkgs/pkgs/servers/monitoring/prometheus/</literal> for + examples), then a module can be added. The postfix exporter is used in this + example: + </para> + + <itemizedlist> + <listitem> + <para> + Some default options for all exporters are provided by + <literal>nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix</literal>: + </para> + </listitem> + <listitem override='none'> + <itemizedlist> + <listitem> + <para> + <literal>enable</literal> + </para> + </listitem> + <listitem> + <para> + <literal>port</literal> + </para> + </listitem> + <listitem> + <para> + <literal>listenAddress</literal> + </para> + </listitem> + <listitem> + <para> + <literal>extraFlags</literal> + </para> + </listitem> + <listitem> + <para> + <literal>openFirewall</literal> + </para> + </listitem> + <listitem> + <para> + <literal>firewallFilter</literal> + </para> + </listitem> + <listitem> + <para> + <literal>user</literal> + </para> + </listitem> + <listitem> + <para> + <literal>group</literal> + </para> + </listitem> + </itemizedlist> + </listitem> + <listitem> + <para> + As there is already a package available, the module can now be added. This + is accomplished by adding a new file to the + <literal>nixos/modules/services/monitoring/prometheus/exporters/</literal> + directory, which will be called postfix.nix and contains all exporter + specific options and configuration: +<programlisting> + # nixpgs/nixos/modules/services/prometheus/exporters/postfix.nix + { config, lib, pkgs }: + + with lib; + + let + # for convenience we define cfg here + cfg = config.services.prometheus.exporters.postfix; + in + { + port = 9154; # The postfix exporter listens on this port by default + + # `extraOpts` is an attribute set which contains additional options + # (and optional overrides for default options). + # Note that this attribute is optional. + extraOpts = { + telemetryPath = mkOption { + type = types.str; + default = "/metrics"; + description = '' + Path under which to expose metrics. + ''; + }; + logfilePath = mkOption { + type = types.path; + default = /var/log/postfix_exporter_input.log; + example = /var/log/mail.log; + description = '' + Path where Postfix writes log entries. + This file will be truncated by this exporter! + ''; + }; + showqPath = mkOption { + type = types.path; + default = /var/spool/postfix/public/showq; + example = /var/lib/postfix/queue/public/showq; + description = '' + Path at which Postfix places its showq socket. + ''; + }; + }; + + # `serviceOpts` is an attribute set which contains configuration + # for the exporter's systemd service. One of + # `serviceOpts.script` and `serviceOpts.serviceConfig.ExecStart` + # has to be specified here. This will be merged with the default + # service confiuration. + serviceOpts = { + serviceConfig = { + ExecStart = '' + ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \ + --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ + --web.telemetry-path ${cfg.telemetryPath} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; + } + </programlisting> + </para> + </listitem> + <listitem> + <para> + This should already be enough for the postfix exporter. Additionally one + could now add assertions and conditional default values. This can be done + in the 'meta-module' that combines all exporter definitions and generates + the submodules: + <literal>nixpkgs/nixos/modules/services/prometheus/exporters.nix</literal> + </para> + </listitem> + </itemizedlist> + </section> +</chapter> diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix new file mode 100644 index 000000000000..d09d1c4f3663 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix @@ -0,0 +1,31 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.blackbox; +in +{ + port = 9115; + extraOpts = { + configFile = mkOption { + type = types.path; + description = '' + Path to configuration file. + ''; + }; + }; + serviceOpts = { + serviceConfig = { + AmbientCapabilities = [ "CAP_NET_RAW" ]; # for ping probes + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-blackbox-exporter}/bin/blackbox_exporter \ + --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ + --config.file ${cfg.configFile} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix new file mode 100644 index 000000000000..0eba3527162d --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix @@ -0,0 +1,78 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.collectd; +in +{ + port = 9103; + extraOpts = { + collectdBinary = { + enable = mkEnableOption "collectd binary protocol receiver"; + + authFile = mkOption { + default = null; + type = types.nullOr types.path; + description = "File mapping user names to pre-shared keys (passwords)."; + }; + + port = mkOption { + type = types.int; + default = 25826; + description = ''Network address on which to accept collectd binary network packets.''; + }; + + listenAddress = mkOption { + type = types.str; + default = "0.0.0.0"; + description = '' + Address to listen on for binary network packets. + ''; + }; + + securityLevel = mkOption { + type = types.enum ["None" "Sign" "Encrypt"]; + default = "None"; + description = '' + Minimum required security level for accepted packets. + ''; + }; + }; + + logFormat = mkOption { + type = types.str; + default = "logger:stderr"; + example = "logger:syslog?appname=bob&local=7 or logger:stdout?json=true"; + description = '' + Set the log target and format. + ''; + }; + + logLevel = mkOption { + type = types.enum ["debug" "info" "warn" "error" "fatal"]; + default = "info"; + description = '' + Only log messages with the given severity or above. + ''; + }; + }; + serviceOpts = let + collectSettingsArgs = if (cfg.collectdBinary.enable) then '' + -collectd.listen-address ${cfg.collectdBinary.listenAddress}:${toString cfg.collectdBinary.port} \ + -collectd.security-level ${cfg.collectdBinary.securityLevel} \ + '' else ""; + in { + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-collectd-exporter}/bin/collectd_exporter \ + -log.format ${cfg.logFormat} \ + -log.level ${cfg.logLevel} \ + -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ + ${collectSettingsArgs} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix new file mode 100644 index 000000000000..b1fab85109af --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix @@ -0,0 +1,39 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.dnsmasq; +in +{ + port = 9153; + extraOpts = { + dnsmasqListenAddress = mkOption { + type = types.str; + default = "localhost:53"; + description = '' + Address on which dnsmasq listens. + ''; + }; + leasesPath = mkOption { + type = types.path; + default = "/var/lib/misc/dnsmasq.leases"; + example = "/var/lib/dnsmasq/dnsmasq.leases"; + description = '' + Path to the <literal>dnsmasq.leases</literal> file. + ''; + }; + }; + serviceOpts = { + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-dnsmasq-exporter}/bin/dnsmasq_exporter \ + --listen ${cfg.listenAddress}:${toString cfg.port} \ + --dnsmasq ${cfg.dnsmasqListenAddress} \ + --leases_path ${cfg.leasesPath} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix new file mode 100644 index 000000000000..4ca6d4e5f8b6 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix @@ -0,0 +1,50 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.dovecot; +in +{ + port = 9166; + extraOpts = { + telemetryPath = mkOption { + type = types.str; + default = "/metrics"; + description = '' + Path under which to expose metrics. + ''; + }; + socketPath = mkOption { + type = types.path; + default = "/var/run/dovecot/stats"; + example = "/var/run/dovecot2/stats"; + description = '' + Path under which the stats socket is placed. + The user/group under which the exporter runs, + should be able to access the socket in order + to scrape the metrics successfully. + ''; + }; + scopes = mkOption { + type = types.listOf types.str; + default = [ "user" ]; + example = [ "user" "global" ]; + description = '' + Stats scopes to query. + ''; + }; + }; + serviceOpts = { + serviceConfig = { + ExecStart = '' + ${pkgs.prometheus-dovecot-exporter}/bin/dovecot_exporter \ + --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ + --web.telemetry-path ${cfg.telemetryPath} \ + --dovecot.socket-path ${cfg.socketPath} \ + --dovecot.scopes ${concatStringsSep "," cfg.scopes} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix new file mode 100644 index 000000000000..a3f1d9d31323 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix @@ -0,0 +1,39 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.fritzbox; +in +{ + port = 9133; + extraOpts = { + gatewayAddress = mkOption { + type = types.str; + default = "fritz.box"; + description = '' + The hostname or IP of the FRITZ!Box. + ''; + }; + + gatewayPort = mkOption { + type = types.int; + default = 49000; + description = '' + The port of the FRITZ!Box UPnP service. + ''; + }; + }; + serviceOpts = { + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-fritzbox-exporter}/bin/fritzbox_exporter \ + -listen-address ${cfg.listenAddress}:${toString cfg.port} \ + -gateway-address ${cfg.gatewayAddress} \ + -gateway-port ${toString cfg.gatewayPort} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix new file mode 100644 index 000000000000..a5494e85e016 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix @@ -0,0 +1,36 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.json; +in +{ + port = 7979; + extraOpts = { + url = mkOption { + type = types.str; + description = '' + URL to scrape JSON from. + ''; + }; + configFile = mkOption { + type = types.path; + description = '' + Path to configuration file. + ''; + }; + listenAddress = {}; # not used + }; + serviceOpts = { + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-json-exporter}/bin/prometheus-json-exporter \ + --port ${toString cfg.port} \ + ${cfg.url} ${cfg.configFile} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix new file mode 100644 index 000000000000..3cc4ffdbc8fd --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix @@ -0,0 +1,65 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.minio; +in +{ + port = 9290; + extraOpts = { + minioAddress = mkOption { + type = types.str; + example = "https://10.0.0.1:9000"; + description = '' + The URL of the minio server. + Use HTTPS if Minio accepts secure connections only. + By default this connects to the local minio server if enabled. + ''; + }; + + minioAccessKey = mkOption { + type = types.str; + example = "yourMinioAccessKey"; + description = '' + The value of the Minio access key. + It is required in order to connect to the server. + By default this uses the one from the local minio server if enabled + and <literal>config.services.minio.accessKey</literal>. + ''; + }; + + minioAccessSecret = mkOption { + type = types.str; + description = '' + The value of the Minio access secret. + It is required in order to connect to the server. + By default this uses the one from the local minio server if enabled + and <literal>config.services.minio.secretKey</literal>. + ''; + }; + + minioBucketStats = mkOption { + type = types.bool; + default = false; + description = '' + Collect statistics about the buckets and files in buckets. + It requires more computation, use it carefully in case of large buckets.. + ''; + }; + }; + serviceOpts = { + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-minio-exporter}/bin/minio-exporter \ + -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ + -minio.server ${cfg.minioAddress} \ + -minio.access-key ${cfg.minioAccessKey} \ + -minio.access-secret ${cfg.minioAccessSecret} \ + ${optionalString cfg.minioBucketStats "-minio.bucket-stats"} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix new file mode 100644 index 000000000000..431dd8b4ead7 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix @@ -0,0 +1,47 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.nginx; +in +{ + port = 9113; + extraOpts = { + scrapeUri = mkOption { + type = types.str; + default = "http://localhost/nginx_status"; + description = '' + Address to access the nginx status page. + Can be enabled with services.nginx.statusPage = true. + ''; + }; + telemetryEndpoint = mkOption { + type = types.str; + default = "/metrics"; + description = '' + Path under which to expose metrics. + ''; + }; + insecure = mkOption { + type = types.bool; + default = true; + description = '' + Ignore server certificate if using https. + ''; + }; + }; + serviceOpts = { + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-nginx-exporter}/bin/nginx_exporter \ + --nginx.scrape_uri '${cfg.scrapeUri}' \ + --telemetry.address ${cfg.listenAddress}:${toString cfg.port} \ + --telemetry.endpoint ${cfg.telemetryEndpoint} \ + --insecure ${toString cfg.insecure} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix new file mode 100644 index 000000000000..ee7bf39f199a --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix @@ -0,0 +1,40 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.node; +in +{ + port = 9100; + extraOpts = { + enabledCollectors = mkOption { + type = types.listOf types.string; + default = []; + example = ''[ "systemd" ]''; + description = '' + Collectors to enable. The collectors listed here are enabled in addition to the default ones. + ''; + }; + disabledCollectors = mkOption { + type = types.listOf types.str; + default = []; + example = ''[ "timex" ]''; + description = '' + Collectors to disable which are enabled by default. + ''; + }; + }; + serviceOpts = { + serviceConfig = { + RuntimeDirectory = "prometheus-node-exporter"; + ExecStart = '' + ${pkgs.prometheus-node-exporter}/bin/node_exporter \ + ${concatMapStringsSep " " (x: "--collector." + x) cfg.enabledCollectors} \ + ${concatMapStringsSep " " (x: "--no-collector." + x) cfg.disabledCollectors} \ + --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix new file mode 100644 index 000000000000..efe78ebcba86 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix @@ -0,0 +1,81 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.postfix; +in +{ + port = 9154; + extraOpts = { + telemetryPath = mkOption { + type = types.str; + default = "/metrics"; + description = '' + Path under which to expose metrics. + ''; + }; + logfilePath = mkOption { + type = types.path; + default = "/var/log/postfix_exporter_input.log"; + example = "/var/log/mail.log"; + description = '' + Path where Postfix writes log entries. + This file will be truncated by this exporter! + ''; + }; + showqPath = mkOption { + type = types.path; + default = "/var/spool/postfix/public/showq"; + example = "/var/lib/postfix/queue/public/showq"; + description = '' + Path where Postfix places it's showq socket. + ''; + }; + systemd = { + enable = mkEnableOption '' + reading metrics from the systemd-journal instead of from a logfile + ''; + unit = mkOption { + type = types.str; + default = "postfix.service"; + description = '' + Name of the postfix systemd unit. + ''; + }; + slice = mkOption { + type = types.nullOr types.str; + default = null; + description = '' + Name of the postfix systemd slice. + This overrides the <option>systemd.unit</option>. + ''; + }; + journalPath = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + Path to the systemd journal. + ''; + }; + }; + }; + serviceOpts = { + serviceConfig = { + ExecStart = '' + ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \ + --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ + --web.telemetry-path ${cfg.telemetryPath} \ + --postfix.showq_path ${cfg.showqPath} \ + ${concatStringsSep " \\\n " (cfg.extraFlags + ++ optional cfg.systemd.enable "--systemd.enable" + ++ optional cfg.systemd.enable (if cfg.systemd.slice != null + then "--systemd.slice ${cfg.systemd.slice}" + else "--systemd.unit ${cfg.systemd.unit}") + ++ optional (cfg.systemd.enable && (cfg.systemd.journalPath != null)) + "--systemd.jounal_path ${cfg.systemd.journalPath}" + ++ optional (!cfg.systemd.enable) "--postfix.logfile_path ${cfg.logfilePath}")} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix new file mode 100644 index 000000000000..0d9194124325 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix @@ -0,0 +1,71 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.snmp; +in +{ + port = 9116; + extraOpts = { + configurationPath = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + Path to a snmp exporter configuration file. Mutually exclusive with 'configuration' option. + ''; + example = "./snmp.yml"; + }; + + configuration = mkOption { + type = types.nullOr types.attrs; + default = {}; + description = '' + Snmp exporter configuration as nix attribute set. Mutually exclusive with 'configurationPath' option. + ''; + example = '' + { + "default" = { + "version" = 2; + "auth" = { + "community" = "public"; + }; + }; + }; + ''; + }; + + logFormat = mkOption { + type = types.str; + default = "logger:stderr"; + description = '' + Set the log target and format. + ''; + }; + + logLevel = mkOption { + type = types.enum ["debug" "info" "warn" "error" "fatal"]; + default = "info"; + description = '' + Only log messages with the given severity or above. + ''; + }; + }; + serviceOpts = let + configFile = if cfg.configurationPath != null + then cfg.configurationPath + else "${pkgs.writeText "snmp-eporter-conf.yml" (builtins.toJSON cfg.configuration)}"; + in { + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-snmp-exporter.bin}/bin/snmp_exporter \ + --config.file=${configFile} \ + --log.format=${cfg.logFormat} \ + --log.level=${cfg.logLevel} \ + --web.listen-address=${cfg.listenAddress}:${toString cfg.port} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix new file mode 100644 index 000000000000..715dba06a3dc --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix @@ -0,0 +1,32 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.surfboard; +in +{ + port = 9239; + extraOpts = { + modemAddress = mkOption { + type = types.str; + default = "192.168.100.1"; + description = '' + The hostname or IP of the cable modem. + ''; + }; + }; + serviceOpts = { + description = "Prometheus exporter for surfboard cable modem"; + unitConfig.Documentation = "https://github.com/ipstatic/surfboard_exporter"; + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-surfboard-exporter}/bin/surfboard_exporter \ + --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ + --modem-address ${cfg.modemAddress} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix new file mode 100644 index 000000000000..e0ae83802425 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix @@ -0,0 +1,45 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.tor; +in +{ + port = 9130; + extraOpts = { + torControlAddress = mkOption { + type = types.str; + default = "127.0.0.1"; + description = '' + Tor control IP address or hostname. + ''; + }; + + torControlPort = mkOption { + type = types.int; + default = 9051; + description = '' + Tor control port. + ''; + }; + }; + serviceOpts = { + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-tor-exporter}/bin/prometheus-tor-exporter \ + -b ${cfg.listenAddress} \ + -p ${toString cfg.port} \ + -a ${cfg.torControlAddress} \ + -c ${toString cfg.torControlPort} \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + + # CPython requires a process to either have $HOME defined or run as a UID + # defined in /etc/passwd. The latter is false with DynamicUser, so define a + # dummy $HOME. https://bugs.python.org/issue10496 + environment = { HOME = "/var/empty"; }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix new file mode 100644 index 000000000000..011dcbe208e4 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix @@ -0,0 +1,67 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.unifi; +in +{ + port = 9130; + extraOpts = { + unifiAddress = mkOption { + type = types.str; + example = "https://10.0.0.1:8443"; + description = '' + URL of the UniFi Controller API. + ''; + }; + + unifiInsecure = mkOption { + type = types.bool; + default = false; + description = '' + If enabled skip the verification of the TLS certificate of the UniFi Controller API. + Use with caution. + ''; + }; + + unifiUsername = mkOption { + type = types.str; + example = "ReadOnlyUser"; + description = '' + username for authentication against UniFi Controller API. + ''; + }; + + unifiPassword = mkOption { + type = types.str; + description = '' + Password for authentication against UniFi Controller API. + ''; + }; + + unifiTimeout = mkOption { + type = types.str; + default = "5s"; + example = "2m"; + description = '' + Timeout including unit for UniFi Controller API requests. + ''; + }; + }; + serviceOpts = { + serviceConfig = { + DynamicUser = true; + ExecStart = '' + ${pkgs.prometheus-unifi-exporter}/bin/unifi_exporter \ + -telemetry.addr ${cfg.listenAddress}:${toString cfg.port} \ + -unifi.addr ${cfg.unifiAddress} \ + -unifi.username ${cfg.unifiUsername} \ + -unifi.password ${cfg.unifiPassword} \ + -unifi.timeout ${cfg.unifiTimeout} \ + ${optionalString cfg.unifiInsecure "-unifi.insecure" } \ + ${concatStringsSep " \\\n " cfg.extraFlags} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix new file mode 100644 index 000000000000..aaed76175b84 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix @@ -0,0 +1,88 @@ +{ config, lib, pkgs }: + +with lib; + +let + cfg = config.services.prometheus.exporters.varnish; +in +{ + port = 9131; + extraOpts = { + noExit = mkOption { + type = types.bool; + default = false; + description = '' + Do not exit server on Varnish scrape errors. + ''; + }; + withGoMetrics = mkOption { + type = types.bool; + default = false; + description = '' + Export go runtime and http handler metrics. + ''; + }; + verbose = mkOption { + type = types.bool; + default = false; + description = '' + Enable verbose logging. + ''; + }; + raw = mkOption { + type = types.bool; + default = false; + description = '' + Enable raw stdout logging without timestamps. + ''; + }; + varnishStatPath = mkOption { + type = types.str; + default = "varnishstat"; + description = '' + Path to varnishstat. + ''; + }; + instance = mkOption { + type = types.nullOr types.str; + default = null; + description = '' + varnishstat -n value. + ''; + }; + healthPath = mkOption { + type = types.nullOr types.str; + default = null; + description = '' + Path under which to expose healthcheck. Disabled unless configured. + ''; + }; + telemetryPath = mkOption { + type = types.str; + default = "/metrics"; + description = '' + Path under which to expose metrics. + ''; + }; + }; + serviceOpts = { + path = [ pkgs.varnish ]; + serviceConfig = { + DynamicUser = true; + RestartSec = mkDefault 1; + ExecStart = '' + ${pkgs.prometheus-varnish-exporter}/bin/prometheus_varnish_exporter \ + --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ + --web.telemetry-path ${cfg.telemetryPath} \ + --varnishstat-path ${cfg.varnishStatPath} \ + ${concatStringsSep " \\\n " (cfg.extraFlags + ++ optional (cfg.healthPath != null) "--web.health-path ${cfg.healthPath}" + ++ optional (cfg.instance != null) "-n ${cfg.instance}" + ++ optional cfg.noExit "--no-exit" + ++ optional cfg.withGoMetrics "--with-go-metrics" + ++ optional cfg.verbose "--verbose" + ++ optional cfg.raw "--raw")} + ''; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix b/nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix new file mode 100644 index 000000000000..7eb4d888b0cc --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix @@ -0,0 +1,79 @@ +{ config, pkgs, lib, ... }: + +with pkgs; +with lib; + +let + + cfg = config.services.riemann-dash; + + conf = writeText "config.rb" '' + riemann_base = "${cfg.dataDir}" + config.store[:ws_config] = "#{riemann_base}/config/config.json" + ${cfg.config} + ''; + + launcher = writeScriptBin "riemann-dash" '' + #!/bin/sh + exec ${pkgs.riemann-dash}/bin/riemann-dash ${conf} + ''; + +in { + + options = { + + services.riemann-dash = { + enable = mkOption { + type = types.bool; + default = false; + description = '' + Enable the riemann-dash dashboard daemon. + ''; + }; + config = mkOption { + type = types.lines; + description = '' + Contents added to the end of the riemann-dash configuration file. + ''; + }; + dataDir = mkOption { + type = types.str; + default = "/var/riemann-dash"; + description = '' + Location of the riemann-base dir. The dashboard configuration file is + is stored to this directory. The directory is created automatically on + service start, and owner is set to the riemanndash user. + ''; + }; + }; + + }; + + config = mkIf cfg.enable { + + users.groups.riemanndash.gid = config.ids.gids.riemanndash; + + users.users.riemanndash = { + description = "riemann-dash daemon user"; + uid = config.ids.uids.riemanndash; + group = "riemanndash"; + }; + + systemd.services.riemann-dash = { + wantedBy = [ "multi-user.target" ]; + wants = [ "riemann.service" ]; + after = [ "riemann.service" ]; + preStart = '' + mkdir -p ${cfg.dataDir}/config + chown -R riemanndash:riemanndash ${cfg.dataDir} + ''; + serviceConfig = { + User = "riemanndash"; + ExecStart = "${launcher}/bin/riemann-dash"; + PermissionsStartOnly = true; + }; + }; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix b/nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix new file mode 100644 index 000000000000..4e8832dadc5e --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix @@ -0,0 +1,63 @@ +{ config, pkgs, lib, ... }: + +with pkgs; +with lib; + +let + + cfg = config.services.riemann-tools; + + riemannHost = "${cfg.riemannHost}"; + + healthLauncher = writeScriptBin "riemann-health" '' + #!/bin/sh + exec ${pkgs.riemann-tools}/bin/riemann-health --host ${riemannHost} + ''; + + +in { + + options = { + + services.riemann-tools = { + enableHealth = mkOption { + type = types.bool; + default = false; + description = '' + Enable the riemann-health daemon. + ''; + }; + riemannHost = mkOption { + type = types.str; + default = "127.0.0.1"; + description = '' + Address of the host riemann node. Defaults to localhost. + ''; + }; + }; + + }; + + config = mkIf cfg.enableHealth { + + users.groups.riemanntools.gid = config.ids.gids.riemanntools; + + users.users.riemanntools = { + description = "riemann-tools daemon user"; + uid = config.ids.uids.riemanntools; + group = "riemanntools"; + }; + + systemd.services.riemann-health = { + wantedBy = [ "multi-user.target" ]; + path = [ procps ]; + serviceConfig = { + User = "riemanntools"; + ExecStart = "${healthLauncher}/bin/riemann-health"; + PermissionsStartOnly = true; + }; + }; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/riemann.nix b/nixpkgs/nixos/modules/services/monitoring/riemann.nix new file mode 100644 index 000000000000..13d2b1cc0602 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/riemann.nix @@ -0,0 +1,105 @@ +{ config, pkgs, lib, ... }: + +with pkgs; +with lib; + +let + + cfg = config.services.riemann; + + classpath = concatStringsSep ":" ( + cfg.extraClasspathEntries ++ [ "${riemann}/share/java/riemann.jar" ] + ); + + riemannConfig = concatStringsSep "\n" ( + [cfg.config] ++ (map (f: ''(load-file "${f}")'') cfg.configFiles) + ); + + launcher = writeScriptBin "riemann" '' + #!/bin/sh + exec ${jdk}/bin/java ${concatStringsSep " " cfg.extraJavaOpts} \ + -cp ${classpath} \ + riemann.bin ${cfg.configFile} + ''; + +in { + + options = { + + services.riemann = { + enable = mkOption { + type = types.bool; + default = false; + description = '' + Enable the Riemann network monitoring daemon. + ''; + }; + config = mkOption { + type = types.lines; + description = '' + Contents of the Riemann configuration file. For more complicated + config you should use configFile. + ''; + }; + configFiles = mkOption { + type = with types; listOf path; + default = []; + description = '' + Extra files containing Riemann configuration. These files will be + loaded at runtime by Riemann (with Clojure's + <literal>load-file</literal> function) at the end of the + configuration if you use the config option, this is ignored if you + use configFile. + ''; + }; + configFile = mkOption { + type = types.str; + description = '' + A Riemann config file. Any files in the same directory as this file + will be added to the classpath by Riemann. + ''; + }; + extraClasspathEntries = mkOption { + type = with types; listOf str; + default = []; + description = '' + Extra entries added to the Java classpath when running Riemann. + ''; + }; + extraJavaOpts = mkOption { + type = with types; listOf str; + default = []; + description = '' + Extra Java options used when launching Riemann. + ''; + }; + }; + }; + + config = mkIf cfg.enable { + + users.groups.riemann.gid = config.ids.gids.riemann; + + users.users.riemann = { + description = "riemann daemon user"; + uid = config.ids.uids.riemann; + group = "riemann"; + }; + + services.riemann.configFile = mkDefault ( + writeText "riemann-config.clj" riemannConfig + ); + + systemd.services.riemann = { + wantedBy = [ "multi-user.target" ]; + path = [ inetutils ]; + serviceConfig = { + User = "riemann"; + ExecStart = "${launcher}/bin/riemann"; + }; + serviceConfig.LimitNOFILE = 65536; + }; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/scollector.nix b/nixpkgs/nixos/modules/services/monitoring/scollector.nix new file mode 100644 index 000000000000..6ecb21d628de --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/scollector.nix @@ -0,0 +1,136 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.scollector; + + collectors = pkgs.runCommand "collectors" {} + '' + mkdir -p $out + ${lib.concatStringsSep + "\n" + (lib.mapAttrsToList + (frequency: binaries: + "mkdir -p $out/${frequency}\n" + + (lib.concatStringsSep + "\n" + (map (path: "ln -s ${path} $out/${frequency}/$(basename ${path})") + binaries))) + cfg.collectors)} + ''; + + conf = pkgs.writeText "scollector.toml" '' + Host = "${cfg.bosunHost}" + ColDir = "${collectors}" + ${cfg.extraConfig} + ''; + +in { + + options = { + + services.scollector = { + + enable = mkOption { + type = types.bool; + default = false; + description = '' + Whether to run scollector. + ''; + }; + + package = mkOption { + type = types.package; + default = pkgs.scollector; + defaultText = "pkgs.scollector"; + example = literalExample "pkgs.scollector"; + description = '' + scollector binary to use. + ''; + }; + + user = mkOption { + type = types.string; + default = "scollector"; + description = '' + User account under which scollector runs. + ''; + }; + + group = mkOption { + type = types.string; + default = "scollector"; + description = '' + Group account under which scollector runs. + ''; + }; + + bosunHost = mkOption { + type = types.string; + default = "localhost:8070"; + description = '' + Host and port of the bosun server that will store the collected + data. + ''; + }; + + collectors = mkOption { + type = with types; attrsOf (listOf path); + default = {}; + example = literalExample "{ \"0\" = [ \"\${postgresStats}/bin/collect-stats\" ]; }"; + description = '' + An attribute set mapping the frequency of collection to a list of + binaries that should be executed at that frequency. You can use "0" + to run a binary forever. + ''; + }; + + extraOpts = mkOption { + type = with types; listOf str; + default = []; + example = [ "-d" ]; + description = '' + Extra scollector command line options + ''; + }; + + extraConfig = mkOption { + type = types.lines; + default = ""; + description = '' + Extra scollector configuration added to the end of scollector.toml + ''; + }; + + }; + + }; + + config = mkIf config.services.scollector.enable { + + systemd.services.scollector = { + description = "scollector metrics collector (part of Bosun)"; + wantedBy = [ "multi-user.target" ]; + + path = [ pkgs.coreutils pkgs.iproute ]; + + serviceConfig = { + PermissionsStartOnly = true; + User = cfg.user; + Group = cfg.group; + ExecStart = "${cfg.package.bin}/bin/scollector -conf=${conf} ${lib.concatStringsSep " " cfg.extraOpts}"; + }; + }; + + users.users.scollector = { + description = "scollector user"; + group = "scollector"; + uid = config.ids.uids.scollector; + }; + + users.groups.scollector.gid = config.ids.gids.scollector; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/smartd.nix b/nixpkgs/nixos/modules/services/monitoring/smartd.nix new file mode 100644 index 000000000000..c345ec48a018 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/smartd.nix @@ -0,0 +1,242 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + + host = config.networking.hostName or "unknown" + + optionalString (config.networking.domain != null) ".${config.networking.domain}"; + + cfg = config.services.smartd; + + nm = cfg.notifications.mail; + nw = cfg.notifications.wall; + nx = cfg.notifications.x11; + + smartdNotify = pkgs.writeScript "smartd-notify.sh" '' + #! ${pkgs.runtimeShell} + ${optionalString nm.enable '' + { + ${pkgs.coreutils}/bin/cat << EOF + From: smartd on ${host} <root> + To: undisclosed-recipients:; + Subject: SMART error on $SMARTD_DEVICESTRING: $SMARTD_FAILTYPE + + $SMARTD_FULLMESSAGE + EOF + + ${pkgs.smartmontools}/sbin/smartctl -a -d "$SMARTD_DEVICETYPE" "$SMARTD_DEVICE" + } | ${nm.mailer} -i "${nm.recipient}" + ''} + ${optionalString nw.enable '' + { + ${pkgs.coreutils}/bin/cat << EOF + Problem detected with disk: $SMARTD_DEVICESTRING + Warning message from smartd is: + + $SMARTD_MESSAGE + EOF + } | ${pkgs.utillinux}/bin/wall 2>/dev/null + ''} + ${optionalString nx.enable '' + export DISPLAY=${nx.display} + { + ${pkgs.coreutils}/bin/cat << EOF + Problem detected with disk: $SMARTD_DEVICESTRING + Warning message from smartd is: + + $SMARTD_FULLMESSAGE + EOF + } | ${pkgs.xorg.xmessage}/bin/xmessage -file - 2>/dev/null & + ''} + ''; + + notifyOpts = optionalString (nm.enable || nw.enable || nx.enable) + ("-m <nomailer> -M exec ${smartdNotify} " + optionalString cfg.notifications.test "-M test "); + + smartdConf = pkgs.writeText "smartd.conf" '' + # Autogenerated smartd startup config file + DEFAULT ${notifyOpts}${cfg.defaults.monitored} + + ${concatMapStringsSep "\n" (d: "${d.device} ${d.options}") cfg.devices} + + ${optionalString cfg.autodetect + "DEVICESCAN ${notifyOpts}${cfg.defaults.autodetected}"} + ''; + + smartdDeviceOpts = { ... }: { + + options = { + + device = mkOption { + example = "/dev/sda"; + type = types.str; + description = "Location of the device."; + }; + + options = mkOption { + default = ""; + example = "-d sat"; + type = types.separatedString " "; + description = "Options that determine how smartd monitors the device."; + }; + + }; + + }; + +in + +{ + ###### interface + + options = { + + services.smartd = { + + enable = mkEnableOption "smartd daemon from <literal>smartmontools</literal> package"; + + autodetect = mkOption { + default = true; + type = types.bool; + description = '' + Whenever smartd should monitor all devices connected to the + machine at the time it's being started (the default). + + Set to false to monitor the devices listed in + <option>services.smartd.devices</option> only. + ''; + }; + + extraOptions = mkOption { + default = []; + type = types.listOf types.str; + example = ["-A /var/log/smartd/" "--interval=3600"]; + description = '' + Extra command-line options passed to the <literal>smartd</literal> + daemon on startup. + + (See <literal>man 8 smartd</literal>.) + ''; + }; + + notifications = { + + mail = { + enable = mkOption { + default = config.services.mail.sendmailSetuidWrapper != null; + type = types.bool; + description = "Whenever to send e-mail notifications."; + }; + + recipient = mkOption { + default = "root"; + type = types.str; + description = "Recipient of the notification messages."; + }; + + mailer = mkOption { + default = "/run/wrappers/bin/sendmail"; + type = types.path; + description = '' + Sendmail-compatible binary to be used to send the messages. + + You should probably enable + <option>services.postfix</option> or some other MTA for + this to work. + ''; + }; + }; + + wall = { + enable = mkOption { + default = true; + type = types.bool; + description = "Whenever to send wall notifications to all users."; + }; + }; + + x11 = { + enable = mkOption { + default = config.services.xserver.enable; + type = types.bool; + description = "Whenever to send X11 xmessage notifications."; + }; + + display = mkOption { + default = ":${toString config.services.xserver.display}"; + type = types.str; + description = "DISPLAY to send X11 notifications to."; + }; + }; + + test = mkOption { + default = false; + type = types.bool; + description = "Whenever to send a test notification on startup."; + }; + + }; + + defaults = { + monitored = mkOption { + default = "-a"; + type = types.separatedString " "; + example = "-a -o on -s (S/../.././02|L/../../7/04)"; + description = '' + Common default options for explicitly monitored (listed in + <option>services.smartd.devices</option>) devices. + + The default value turns on monitoring of all the things (see + <literal>man 5 smartd.conf</literal>). + + The example also turns on SMART Automatic Offline Testing on + startup, and schedules short self-tests daily, and long + self-tests weekly. + ''; + }; + + autodetected = mkOption { + default = cfg.defaults.monitored; + type = types.separatedString " "; + description = '' + Like <option>services.smartd.defaults.monitored</option>, but for the + autodetected devices. + ''; + }; + }; + + devices = mkOption { + default = []; + example = [ { device = "/dev/sda"; } { device = "/dev/sdb"; options = "-d sat"; } ]; + type = with types; listOf (submodule smartdDeviceOpts); + description = "List of devices to monitor."; + }; + + }; + + }; + + + ###### implementation + + config = mkIf cfg.enable { + + assertions = [ { + assertion = cfg.autodetect || cfg.devices != []; + message = "smartd can't run with both disabled autodetect and an empty list of devices to monitor."; + } ]; + + systemd.services.smartd = { + description = "S.M.A.R.T. Daemon"; + + wantedBy = [ "multi-user.target" ]; + + path = [ pkgs.nettools ]; # for hostname and dnsdomanname calls in smartd + + serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd ${lib.concatStringsSep " " cfg.extraOptions} --no-fork --configfile=${smartdConf}"; + }; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/statsd.nix b/nixpkgs/nixos/modules/services/monitoring/statsd.nix new file mode 100644 index 000000000000..ea155821ecc9 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/statsd.nix @@ -0,0 +1,150 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + + cfg = config.services.statsd; + + isBuiltinBackend = name: + builtins.elem name [ "graphite" "console" "repeater" ]; + + backendsToPackages = let + mkMap = list: name: + if isBuiltinBackend name then list + else list ++ [ pkgs.nodePackages.${name} ]; + in foldl mkMap []; + + configFile = pkgs.writeText "statsd.conf" '' + { + address: "${cfg.listenAddress}", + port: "${toString cfg.port}", + mgmt_address: "${cfg.mgmt_address}", + mgmt_port: "${toString cfg.mgmt_port}", + backends: [${ + concatMapStringsSep "," (name: + if (isBuiltinBackend name) + then ''"./backends/${name}"'' + else ''"${name}"'' + ) cfg.backends}], + ${optionalString (cfg.graphiteHost!=null) ''graphiteHost: "${cfg.graphiteHost}",''} + ${optionalString (cfg.graphitePort!=null) ''graphitePort: "${toString cfg.graphitePort}",''} + console: { + prettyprint: false + }, + log: { + backend: "stdout" + }, + automaticConfigReload: false${optionalString (cfg.extraConfig != null) ","} + ${cfg.extraConfig} + } + ''; + + deps = pkgs.buildEnv { + name = "statsd-runtime-deps"; + pathsToLink = [ "/lib" ]; + ignoreCollisions = true; + + paths = backendsToPackages cfg.backends; + }; + +in + +{ + + ###### interface + + options.services.statsd = { + + enable = mkEnableOption "statsd"; + + listenAddress = mkOption { + description = "Address that statsd listens on over UDP"; + default = "127.0.0.1"; + type = types.str; + }; + + port = mkOption { + description = "Port that stats listens for messages on over UDP"; + default = 8125; + type = types.int; + }; + + mgmt_address = mkOption { + description = "Address to run management TCP interface on"; + default = "127.0.0.1"; + type = types.str; + }; + + mgmt_port = mkOption { + description = "Port to run the management TCP interface on"; + default = 8126; + type = types.int; + }; + + backends = mkOption { + description = "List of backends statsd will use for data persistence"; + default = []; + example = [ + "graphite" + "console" + "repeater" + "statsd-librato-backend" + "stackdriver-statsd-backend" + "statsd-influxdb-backend" + ]; + type = types.listOf types.str; + }; + + graphiteHost = mkOption { + description = "Hostname or IP of Graphite server"; + default = null; + type = types.nullOr types.str; + }; + + graphitePort = mkOption { + description = "Port of Graphite server (i.e. carbon-cache)."; + default = null; + type = types.nullOr types.int; + }; + + extraConfig = mkOption { + description = "Extra configuration options for statsd"; + default = ""; + type = types.nullOr types.str; + }; + + }; + + ###### implementation + + config = mkIf cfg.enable { + + assertions = map (backend: { + assertion = !isBuiltinBackend backend -> hasAttrByPath [ backend ] pkgs.nodePackages; + message = "Only builtin backends (graphite, console, repeater) or backends enumerated in `pkgs.nodePackages` are allowed!"; + }) cfg.backends; + + users.users = singleton { + name = "statsd"; + uid = config.ids.uids.statsd; + description = "Statsd daemon user"; + }; + + systemd.services.statsd = { + description = "Statsd Server"; + wantedBy = [ "multi-user.target" ]; + environment = { + NODE_PATH = "${deps}/lib/node_modules"; + }; + serviceConfig = { + ExecStart = "${pkgs.statsd}/bin/statsd ${configFile}"; + User = "statsd"; + }; + }; + + environment.systemPackages = [ pkgs.statsd ]; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/sysstat.nix b/nixpkgs/nixos/modules/services/monitoring/sysstat.nix new file mode 100644 index 000000000000..d668faa53cc3 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/sysstat.nix @@ -0,0 +1,80 @@ +{ config, lib, pkgs, ... }: +with lib; +let + cfg = config.services.sysstat; +in { + options = { + services.sysstat = { + enable = mkOption { + type = types.bool; + default = false; + description = '' + Whether to enable sar system activity collection. + ''; + }; + + collect-frequency = mkOption { + default = "*:00/10"; + description = '' + OnCalendar specification for sysstat-collect + ''; + }; + + collect-args = mkOption { + default = "1 1"; + description = '' + Arguments to pass sa1 when collecting statistics + ''; + }; + }; + }; + + config = mkIf cfg.enable { + systemd.services.sysstat = { + description = "Resets System Activity Logs"; + wantedBy = [ "multi-user.target" ]; + preStart = "test -d /var/log/sa || mkdir -p /var/log/sa"; + + serviceConfig = { + User = "root"; + RemainAfterExit = true; + Type = "oneshot"; + ExecStart = "${pkgs.sysstat}/lib/sa/sa1 --boot"; + }; + }; + + systemd.services.sysstat-collect = { + description = "system activity accounting tool"; + unitConfig.Documentation = "man:sa1(8)"; + + serviceConfig = { + Type = "oneshot"; + User = "root"; + ExecStart = "${pkgs.sysstat}/lib/sa/sa1 ${cfg.collect-args}"; + }; + }; + + systemd.timers.sysstat-collect = { + description = "Run system activity accounting tool on a regular basis"; + wantedBy = [ "timers.target" ]; + timerConfig.OnCalendar = cfg.collect-frequency; + }; + + systemd.services.sysstat-summary = { + description = "Generate a daily summary of process accounting"; + unitConfig.Documentation = "man:sa2(8)"; + + serviceConfig = { + Type = "oneshot"; + User = "root"; + ExecStart = "${pkgs.sysstat}/lib/sa/sa2 -A"; + }; + }; + + systemd.timers.sysstat-summary = { + description = "Generate summary of yesterday's process accounting"; + wantedBy = [ "timers.target" ]; + timerConfig.OnCalendar = "00:07:00"; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/systemhealth.nix b/nixpkgs/nixos/modules/services/monitoring/systemhealth.nix new file mode 100644 index 000000000000..32d4314d5f77 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/systemhealth.nix @@ -0,0 +1,133 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.systemhealth; + + systemhealth = with pkgs; stdenv.mkDerivation { + name = "systemhealth-1.0"; + src = fetchurl { + url = "https://www.brianlane.com/downloads/systemhealth/systemhealth-1.0.tar.bz2"; + sha256 = "1q69lz7hmpbdpbz36zb06nzfkj651413n9icx0njmyr3xzq1j9qy"; + }; + buildInputs = [ python ]; + installPhase = '' + mkdir -p $out/bin + # Make it work for kernels 3.x, not so different than 2.6 + sed -i 's/2\.6/4.0/' system_health.py + cp system_health.py $out/bin + ''; + }; + + rrdDir = "/var/lib/health/rrd"; + htmlDir = "/var/lib/health/html"; + + configFile = rrdDir + "/.syshealthrc"; + # The program will try to read $HOME/.syshealthrc, so we set the proper home. + command = "HOME=${rrdDir} ${systemhealth}/bin/system_health.py"; + + cronJob = '' + */5 * * * * wwwrun ${command} --log + 5 * * * * wwwrun ${command} --graph + ''; + + nameEqualName = s: "${s} = ${s}"; + interfacesSection = concatStringsSep "\n" (map nameEqualName cfg.interfaces); + + driveLine = d: "${d.path} = ${d.name}"; + drivesSection = concatStringsSep "\n" (map driveLine cfg.drives); + +in +{ + options = { + services.systemhealth = { + enable = mkOption { + default = false; + description = '' + Enable the system health monitor and its generation of graphs. + ''; + }; + + urlPrefix = mkOption { + default = "/health"; + description = '' + The URL prefix under which the System Health web pages appear in httpd. + ''; + }; + + interfaces = mkOption { + default = [ "lo" ]; + example = [ "lo" "eth0" "eth1" ]; + description = '' + Interfaces to monitor (minimum one). + ''; + }; + + drives = mkOption { + default = [ ]; + example = [ { name = "root"; path = "/"; } ]; + description = '' + Drives to monitor. + ''; + }; + }; + }; + + config = mkIf cfg.enable { + services.cron.systemCronJobs = [ cronJob ]; + + system.activationScripts.systemhealth = stringAfter [ "var" ] + '' + mkdir -p ${rrdDir} ${htmlDir} + chown wwwrun:wwwrun ${rrdDir} ${htmlDir} + + cat >${configFile} << EOF + [paths] + rrdtool = ${pkgs.rrdtool}/bin/rrdtool + loadavg_rrd = loadavg + ps = /run/current-system/sw/bin/ps + df = /run/current-system/sw/bin/df + meminfo_rrd = meminfo + uptime_rrd = uptime + rrd_path = ${rrdDir} + png_path = ${htmlDir} + + [processes] + + [interfaces] + ${interfacesSection} + + [drives] + ${drivesSection} + + [graphs] + width = 400 + time = ['-3hours', '-32hours', '-8days', '-5weeks', '-13months'] + height = 100 + + [external] + + EOF + + chown wwwrun:wwwrun ${configFile} + + ${pkgs.su}/bin/su -s "/bin/sh" -c "${command} --check" wwwrun + ${pkgs.su}/bin/su -s "/bin/sh" -c "${command} --html" wwwrun + ''; + + services.httpd.extraSubservices = [ + { function = f: { + extraConfig = '' + Alias ${cfg.urlPrefix} ${htmlDir} + + <Directory ${htmlDir}> + Order allow,deny + Allow from all + </Directory> + ''; + }; + } + ]; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/teamviewer.nix b/nixpkgs/nixos/modules/services/monitoring/teamviewer.nix new file mode 100644 index 000000000000..dd98ecab828d --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/teamviewer.nix @@ -0,0 +1,46 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + + cfg = config.services.teamviewer; + +in + +{ + + ###### interface + + options = { + + services.teamviewer.enable = mkEnableOption "TeamViewer daemon"; + + }; + + ###### implementation + + config = mkIf (cfg.enable) { + + environment.systemPackages = [ pkgs.teamviewer ]; + + systemd.services.teamviewerd = { + description = "TeamViewer remote control daemon"; + + wantedBy = [ "multi-user.target" ]; + after = [ "NetworkManager-wait-online.service" "network.target" ]; + preStart = "mkdir -pv /var/lib/teamviewer /var/log/teamviewer"; + + serviceConfig = { + Type = "forking"; + ExecStart = "${pkgs.teamviewer}/bin/teamviewerd -d"; + PIDFile = "/run/teamviewerd.pid"; + ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; + Restart = "on-abort"; + StartLimitInterval = "60"; + StartLimitBurst = "10"; + }; + }; + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/telegraf.nix b/nixpkgs/nixos/modules/services/monitoring/telegraf.nix new file mode 100644 index 000000000000..6bfcd7143e1c --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/telegraf.nix @@ -0,0 +1,71 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.telegraf; + + configFile = pkgs.runCommand "config.toml" { + buildInputs = [ pkgs.remarshal ]; + } '' + remarshal -if json -of toml \ + < ${pkgs.writeText "config.json" (builtins.toJSON cfg.extraConfig)} \ + > $out + ''; +in { + ###### interface + options = { + services.telegraf = { + enable = mkEnableOption "telegraf server"; + + package = mkOption { + default = pkgs.telegraf; + defaultText = "pkgs.telegraf"; + description = "Which telegraf derivation to use"; + type = types.package; + }; + + extraConfig = mkOption { + default = {}; + description = "Extra configuration options for telegraf"; + type = types.attrs; + example = { + outputs = { + influxdb = { + urls = ["http://localhost:8086"]; + database = "telegraf"; + }; + }; + inputs = { + statsd = { + service_address = ":8125"; + delete_timings = true; + }; + }; + }; + }; + }; + }; + + + ###### implementation + config = mkIf config.services.telegraf.enable { + systemd.services.telegraf = { + description = "Telegraf Agent"; + wantedBy = [ "multi-user.target" ]; + after = [ "network-online.target" ]; + serviceConfig = { + ExecStart=''${cfg.package}/bin/telegraf -config "${configFile}"''; + ExecReload="${pkgs.coreutils}/bin/kill -HUP $MAINPID"; + User = "telegraf"; + Restart = "on-failure"; + }; + }; + + users.users = [{ + name = "telegraf"; + uid = config.ids.uids.telegraf; + description = "telegraf daemon user"; + }]; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/ups.nix b/nixpkgs/nixos/modules/services/monitoring/ups.nix new file mode 100644 index 000000000000..bc755612fd9b --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/ups.nix @@ -0,0 +1,280 @@ +{ config, lib, pkgs, ... }: + +# TODO: This is not secure, have a look at the file docs/security.txt inside +# the project sources. +with lib; + +let + cfg = config.power.ups; +in + +let + upsOptions = {name, config, ...}: + { + options = { + # This can be infered from the UPS model by looking at + # /nix/store/nut/share/driver.list + driver = mkOption { + type = types.str; + description = '' + Specify the program to run to talk to this UPS. apcsmart, + bestups, and sec are some examples. + ''; + }; + + port = mkOption { + type = types.str; + description = '' + The serial port to which your UPS is connected. /dev/ttyS0 is + usually the first port on Linux boxes, for example. + ''; + }; + + shutdownOrder = mkOption { + default = 0; + type = types.int; + description = '' + When you have multiple UPSes on your system, you usually need to + turn them off in a certain order. upsdrvctl shuts down all the + 0s, then the 1s, 2s, and so on. To exclude a UPS from the + shutdown sequence, set this to -1. + ''; + }; + + maxStartDelay = mkOption { + default = null; + type = types.uniq (types.nullOr types.int); + description = '' + This can be set as a global variable above your first UPS + definition and it can also be set in a UPS section. This value + controls how long upsdrvctl will wait for the driver to finish + starting. This keeps your system from getting stuck due to a + broken driver or UPS. + ''; + }; + + description = mkOption { + default = ""; + type = types.string; + description = '' + Description of the UPS. + ''; + }; + + directives = mkOption { + default = []; + type = types.listOf types.str; + description = '' + List of configuration directives for this UPS. + ''; + }; + + summary = mkOption { + default = ""; + type = types.string; + description = '' + Lines which would be added inside ups.conf for handling this UPS. + ''; + }; + + }; + + config = { + directives = mkOrder 10 ([ + "driver = ${config.driver}" + "port = ${config.port}" + ''desc = "${config.description}"'' + "sdorder = ${toString config.shutdownOrder}" + ] ++ (optional (config.maxStartDelay != null) + "maxstartdelay = ${toString config.maxStartDelay}") + ); + + summary = + concatStringsSep "\n " + (["[${name}]"] ++ config.directives); + }; + }; + +in + + +{ + options = { + # powerManagement.powerDownCommands + + power.ups = { + enable = mkOption { + default = false; + type = with types; bool; + description = '' + Enables support for Power Devices, such as Uninterruptible Power + Supplies, Power Distribution Units and Solar Controllers. + ''; + }; + + # This option is not used yet. + mode = mkOption { + default = "standalone"; + type = types.str; + description = '' + The MODE determines which part of the NUT is to be started, and + which configuration files must be modified. + + The values of MODE can be: + + - none: NUT is not configured, or use the Integrated Power + Management, or use some external system to startup NUT + components. So nothing is to be started. + + - standalone: This mode address a local only configuration, with 1 + UPS protecting the local system. This implies to start the 3 NUT + layers (driver, upsd and upsmon) and the matching configuration + files. This mode can also address UPS redundancy. + + - netserver: same as for the standalone configuration, but also + need some more ACLs and possibly a specific LISTEN directive in + upsd.conf. Since this MODE is opened to the network, a special + care should be applied to security concerns. + + - netclient: this mode only requires upsmon. + ''; + }; + + schedulerRules = mkOption { + example = "/etc/nixos/upssched.conf"; + type = types.str; + description = '' + File which contains the rules to handle UPS events. + ''; + }; + + + maxStartDelay = mkOption { + default = 45; + type = types.int; + description = '' + This can be set as a global variable above your first UPS + definition and it can also be set in a UPS section. This value + controls how long upsdrvctl will wait for the driver to finish + starting. This keeps your system from getting stuck due to a + broken driver or UPS. + ''; + }; + + ups = mkOption { + default = {}; + # see nut/etc/ups.conf.sample + description = '' + This is where you configure all the UPSes that this system will be + monitoring directly. These are usually attached to serial ports, + but USB devices are also supported. + ''; + type = with types; attrsOf (submodule upsOptions); + }; + + }; + }; + + config = mkIf cfg.enable { + + environment.systemPackages = [ pkgs.nut ]; + + systemd.services.upsmon = { + description = "Uninterruptible Power Supplies (Monitor)"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig.Type = "forking"; + script = "${pkgs.nut}/sbin/upsmon"; + environment.NUT_CONFPATH = "/etc/nut/"; + environment.NUT_STATEPATH = "/var/lib/nut/"; + }; + + systemd.services.upsd = { + description = "Uninterruptible Power Supplies (Daemon)"; + after = [ "network.target" "upsmon.service" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig.Type = "forking"; + # TODO: replace 'root' by another username. + script = "${pkgs.nut}/sbin/upsd -u root"; + environment.NUT_CONFPATH = "/etc/nut/"; + environment.NUT_STATEPATH = "/var/lib/nut/"; + }; + + systemd.services.upsdrv = { + description = "Uninterruptible Power Supplies (Register all UPS)"; + after = [ "upsd.service" ]; + wantedBy = [ "multi-user.target" ]; + # TODO: replace 'root' by another username. + script = ''${pkgs.nut}/bin/upsdrvctl -u root start''; + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + }; + environment.NUT_CONFPATH = "/etc/nut/"; + environment.NUT_STATEPATH = "/var/lib/nut/"; + }; + + environment.etc = [ + { source = pkgs.writeText "nut.conf" + '' + MODE = ${cfg.mode} + ''; + target = "nut/nut.conf"; + } + { source = pkgs.writeText "ups.conf" + '' + maxstartdelay = ${toString cfg.maxStartDelay} + + ${flip concatStringsSep (flip map (attrValues cfg.ups) (ups: ups.summary)) " + + "} + ''; + target = "nut/ups.conf"; + } + { source = cfg.schedulerRules; + target = "nut/upssched.conf"; + } + # These file are containing private informations and thus should not + # be stored inside the Nix store. + /* + { source = ; + target = "nut/upsd.conf"; + } + { source = ; + target = "nut/upsd.users"; + } + { source = ; + target = "nut/upsmon.conf; + } + */ + ]; + + power.ups.schedulerRules = mkDefault "${pkgs.nut}/etc/upssched.conf.sample"; + + system.activationScripts.upsSetup = stringAfter [ "users" "groups" ] + '' + # Used to store pid files of drivers. + mkdir -p /var/state/ups + ''; + + +/* + users.users = [ + { name = "nut"; + uid = 84; + home = "/var/lib/nut"; + createHome = true; + group = "nut"; + description = "UPnP A/V Media Server user"; + } + ]; + + users.groups = [ + { name = "nut"; + gid = 84; + } + ]; +*/ + + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/uptime.nix b/nixpkgs/nixos/modules/services/monitoring/uptime.nix new file mode 100644 index 000000000000..b4d3a2640109 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/uptime.nix @@ -0,0 +1,95 @@ +{ config, pkgs, lib, ... }: +let + inherit (lib) mkOption mkEnableOption mkIf mkMerge types optional; + + cfg = config.services.uptime; + + configDir = pkgs.runCommand "config" {} (if cfg.configFile != null then '' + mkdir $out + ext=`echo ${cfg.configFile} | grep -o \\..*` + ln -sv ${cfg.configFile} $out/default$ext + ln -sv /var/lib/uptime/runtime.json $out/runtime.json + '' else '' + mkdir $out + cat ${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/config/default.yaml > $out/default.yaml + cat >> $out/default.yaml <<EOF + + autoStartMonitor: false + + mongodb: + connectionString: 'mongodb://localhost/uptime' + EOF + ln -sv /var/lib/uptime/runtime.json $out/runtime.json + ''); +in { + options.services.uptime = { + configFile = mkOption { + description = '' + The uptime configuration file + + If mongodb: server != localhost, please set usesRemoteMongo = true + + If you only want to run the monitor, please set enableWebService = false + and enableSeparateMonitoringService = true + + If autoStartMonitor: false (recommended) and you want to run both + services, please set enableSeparateMonitoringService = true + ''; + + type = types.nullOr types.path; + + default = null; + }; + + usesRemoteMongo = mkOption { + description = "Whether the configuration file specifies a remote mongo instance"; + + default = false; + + type = types.bool; + }; + + enableWebService = mkEnableOption "the uptime monitoring program web service"; + + enableSeparateMonitoringService = mkEnableOption "the uptime monitoring service" // { default = cfg.enableWebService; }; + + nodeEnv = mkOption { + description = "The node environment to run in (development, production, etc.)"; + + type = types.string; + + default = "production"; + }; + }; + + config = mkMerge [ (mkIf cfg.enableWebService { + systemd.services.uptime = { + description = "uptime web service"; + wantedBy = [ "multi-user.target" ]; + environment = { + NODE_CONFIG_DIR = configDir; + NODE_ENV = cfg.nodeEnv; + NODE_PATH = "${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/node_modules"; + }; + preStart = "mkdir -p /var/lib/uptime"; + serviceConfig.ExecStart = "${pkgs.nodejs}/bin/node ${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/app.js"; + }; + + services.mongodb.enable = mkIf (!cfg.usesRemoteMongo) true; + }) (mkIf cfg.enableSeparateMonitoringService { + systemd.services.uptime-monitor = { + description = "uptime monitoring service"; + wantedBy = [ "multi-user.target" ]; + requires = optional cfg.enableWebService "uptime.service"; + after = optional cfg.enableWebService "uptime.service"; + environment = { + NODE_CONFIG_DIR = configDir; + NODE_ENV = cfg.nodeEnv; + NODE_PATH = "${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/node_modules"; + }; + # Ugh, need to wait for web service to be up + preStart = if cfg.enableWebService then "sleep 1s" else "mkdir -p /var/lib/uptime"; + serviceConfig.ExecStart = "${pkgs.nodejs}/bin/node ${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/monitor.js"; + }; + }) ]; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/vnstat.nix b/nixpkgs/nixos/modules/services/monitoring/vnstat.nix new file mode 100644 index 000000000000..cb2f8c07edb9 --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/vnstat.nix @@ -0,0 +1,43 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.vnstat; +in { + options.services.vnstat = { + enable = mkOption { + type = types.bool; + default = false; + description = '' + Whether to enable update of network usage statistics via vnstatd. + ''; + }; + }; + + config = mkIf cfg.enable { + users.users.vnstatd = { + isSystemUser = true; + description = "vnstat daemon user"; + home = "/var/lib/vnstat"; + createHome = true; + }; + + systemd.services.vnstat = { + description = "vnStat network traffic monitor"; + path = [ pkgs.coreutils ]; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + unitConfig.documentation = "man:vnstatd(1) man:vnstat(1) man:vnstat.conf(5)"; + preStart = "chmod 755 /var/lib/vnstat"; + serviceConfig = { + ExecStart = "${pkgs.vnstat}/bin/vnstatd -n"; + ExecReload = "${pkgs.procps}/bin/kill -HUP $MAINPID"; + ProtectHome = true; + PrivateDevices = true; + PrivateTmp = true; + User = "vnstatd"; + }; + }; + }; +} diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix new file mode 100644 index 000000000000..426cf9bf86ef --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix @@ -0,0 +1,113 @@ +# Zabbix agent daemon. +{ config, lib, pkgs, ... }: + +with lib; + +let + + cfg = config.services.zabbixAgent; + + zabbix = cfg.package; + + stateDir = "/var/run/zabbix"; + + logDir = "/var/log/zabbix"; + + pidFile = "${stateDir}/zabbix_agentd.pid"; + + configFile = pkgs.writeText "zabbix_agentd.conf" + '' + Server = ${cfg.server} + + LogFile = ${logDir}/zabbix_agentd + + PidFile = ${pidFile} + + StartAgents = 1 + + ${config.services.zabbixAgent.extraConfig} + ''; + +in + +{ + + ###### interface + + options = { + + services.zabbixAgent = { + + enable = mkOption { + default = false; + description = '' + Whether to run the Zabbix monitoring agent on this machine. + It will send monitoring data to a Zabbix server. + ''; + }; + + package = mkOption { + type = types.attrs; # Note: pkgs.zabbixXY isn't a derivation, but an attrset of { server = ...; agent = ...; }. + default = pkgs.zabbix; + defaultText = "pkgs.zabbix"; + example = literalExample "pkgs.zabbix34"; + description = '' + The Zabbix package to use. + ''; + }; + + server = mkOption { + default = "127.0.0.1"; + description = '' + The IP address or hostname of the Zabbix server to connect to. + ''; + }; + + extraConfig = mkOption { + default = ""; + type = types.lines; + description = '' + Configuration that is injected verbatim into the configuration file. + ''; + }; + + }; + + }; + + + ###### implementation + + config = mkIf cfg.enable { + + users.users = mkIf (!config.services.zabbixServer.enable) (singleton + { name = "zabbix"; + uid = config.ids.uids.zabbix; + description = "Zabbix daemon user"; + }); + + systemd.services."zabbix-agent" = + { description = "Zabbix Agent"; + + wantedBy = [ "multi-user.target" ]; + + path = [ pkgs.nettools ]; + + preStart = + '' + mkdir -m 0755 -p ${stateDir} ${logDir} + chown zabbix ${stateDir} ${logDir} + ''; + + serviceConfig.ExecStart = "@${zabbix.agent}/sbin/zabbix_agentd zabbix_agentd --config ${configFile}"; + serviceConfig.Type = "forking"; + serviceConfig.RemainAfterExit = true; + serviceConfig.Restart = "always"; + serviceConfig.RestartSec = 2; + }; + + environment.systemPackages = [ zabbix.agent ]; + + }; + +} diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix new file mode 100644 index 000000000000..5f9fc12832fc --- /dev/null +++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix @@ -0,0 +1,126 @@ +# Zabbix server daemon. +{ config, lib, pkgs, ... }: + +with lib; + +let + + cfg = config.services.zabbixServer; + + stateDir = "/var/run/zabbix"; + + logDir = "/var/log/zabbix"; + + libDir = "/var/lib/zabbix"; + + pidFile = "${stateDir}/zabbix_server.pid"; + + configFile = pkgs.writeText "zabbix_server.conf" + '' + LogFile = ${logDir}/zabbix_server + + PidFile = ${pidFile} + + ${optionalString (cfg.dbServer != "localhost") '' + DBHost = ${cfg.dbServer} + ''} + + DBName = zabbix + + DBUser = zabbix + + ${optionalString (cfg.dbPassword != "") '' + DBPassword = ${cfg.dbPassword} + ''} + + ${config.services.zabbixServer.extraConfig} + ''; + + useLocalPostgres = cfg.dbServer == "localhost" || cfg.dbServer == ""; + +in + +{ + + ###### interface + + options = { + + services.zabbixServer.enable = mkOption { + default = false; + type = types.bool; + description = '' + Whether to run the Zabbix server on this machine. + ''; + }; + + services.zabbixServer.dbServer = mkOption { + default = "localhost"; + type = types.str; + description = '' + Hostname or IP address of the database server. + Use an empty string ("") to use peer authentication. + ''; + }; + + services.zabbixServer.dbPassword = mkOption { + default = ""; + type = types.str; + description = "Password used to connect to the database server."; + }; + + services.zabbixServer.extraConfig = mkOption { + default = ""; + type = types.lines; + description = '' + Configuration that is injected verbatim into the configuration file. + ''; + }; + + }; + + ###### implementation + + config = mkIf cfg.enable { + + services.postgresql.enable = useLocalPostgres; + + users.users = singleton + { name = "zabbix"; + uid = config.ids.uids.zabbix; + description = "Zabbix daemon user"; + }; + + systemd.services."zabbix-server" = + { description = "Zabbix Server"; + + wantedBy = [ "multi-user.target" ]; + after = optional useLocalPostgres "postgresql.service"; + + preStart = + '' + mkdir -m 0755 -p ${stateDir} ${logDir} ${libDir} + chown zabbix ${stateDir} ${logDir} ${libDir} + + if ! test -e "${libDir}/db-created"; then + ${pkgs.su}/bin/su -s "$SHELL" ${config.services.postgresql.superUser} -c '${pkgs.postgresql}/bin/createuser --no-superuser --no-createdb --no-createrole zabbix' || true + ${pkgs.su}/bin/su -s "$SHELL" ${config.services.postgresql.superUser} -c '${pkgs.postgresql}/bin/createdb --owner zabbix zabbix' || true + cat ${pkgs.zabbix.server}/share/zabbix/db/schema/postgresql.sql | ${pkgs.su}/bin/su -s "$SHELL" zabbix -c '${pkgs.postgresql}/bin/psql zabbix' + cat ${pkgs.zabbix.server}/share/zabbix/db/data/images_pgsql.sql | ${pkgs.su}/bin/su -s "$SHELL" zabbix -c '${pkgs.postgresql}/bin/psql zabbix' + cat ${pkgs.zabbix.server}/share/zabbix/db/data/data.sql | ${pkgs.su}/bin/su -s "$SHELL" zabbix -c '${pkgs.postgresql}/bin/psql zabbix' + touch "${libDir}/db-created" + fi + ''; + + path = [ pkgs.nettools ]; + + serviceConfig.ExecStart = "@${pkgs.zabbix.server}/sbin/zabbix_server zabbix_server --config ${configFile}"; + serviceConfig.Type = "forking"; + serviceConfig.Restart = "always"; + serviceConfig.RestartSec = 2; + serviceConfig.PIDFile = pidFile; + }; + + }; + +} |