about summary refs log tree commit diff
path: root/nixpkgs/nixos/modules/services/networking
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/modules/services/networking')
-rw-r--r--nixpkgs/nixos/modules/services/networking/dnsproxy.nix106
-rw-r--r--nixpkgs/nixos/modules/services/networking/firefox-syncserver.md24
-rw-r--r--nixpkgs/nixos/modules/services/networking/microsocks.nix146
-rw-r--r--nixpkgs/nixos/modules/services/networking/mosquitto.md73
-rw-r--r--nixpkgs/nixos/modules/services/networking/netbird.md26
-rw-r--r--nixpkgs/nixos/modules/services/networking/pleroma.md196
-rw-r--r--nixpkgs/nixos/modules/services/networking/prosody.md60
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-control.nix69
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix64
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix74
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-router.nix49
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion.nix39
-rw-r--r--nixpkgs/nixos/modules/services/networking/yggdrasil.md6
13 files changed, 753 insertions, 179 deletions
diff --git a/nixpkgs/nixos/modules/services/networking/dnsproxy.nix b/nixpkgs/nixos/modules/services/networking/dnsproxy.nix
new file mode 100644
index 000000000000..f0be74d7591f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dnsproxy.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    escapeShellArgs
+    getExe
+    lists
+    literalExpression
+    maintainers
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    types;
+
+  cfg = config.services.dnsproxy;
+
+  yaml = pkgs.formats.yaml { };
+  configFile = yaml.generate "config.yaml" cfg.settings;
+
+  finalFlags = (lists.optional (cfg.settings != { }) "--config-path=${configFile}") ++ cfg.flags;
+in
+{
+
+  options.services.dnsproxy = {
+
+    enable = mkEnableOption (lib.mdDoc "dnsproxy");
+
+    package = mkPackageOption pkgs "dnsproxy" { };
+
+    settings = mkOption {
+      type = yaml.type;
+      default = { };
+      example = literalExpression ''
+        {
+          bootstrap = [
+            "8.8.8.8:53"
+          ];
+          listen-addrs = [
+            "0.0.0.0"
+          ];
+          listen-ports = [
+            53
+          ];
+          upstream = [
+            "1.1.1.1:53"
+          ];
+        }
+      '';
+      description = mdDoc ''
+        Contents of the `config.yaml` config file.
+        The `--config-path` argument will only be passed if this set is not empty.
+
+        See <https://github.com/AdguardTeam/dnsproxy/blob/master/config.yaml.dist>.
+      '';
+    };
+
+    flags = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--upstream=1.1.1.1:53" ];
+      description = lib.mdDoc ''
+        A list of extra command-line flags to pass to dnsproxy. For details on the
+        available options, see <https://github.com/AdguardTeam/dnsproxy#usage>.
+        Keep in mind that options passed through command-line flags override
+        config options.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.dnsproxy = {
+      description = "Simple DNS proxy with DoH, DoT, DoQ and DNSCrypt support";
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${getExe cfg.package} ${escapeShellArgs finalFlags}";
+        Restart = "always";
+        RestartSec = 10;
+        DynamicUser = true;
+
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        ProtectClock = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [ "@system-service" "~@privileged @resources" ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ diogotcorreia ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
index 4d8777d204bb..f6b515e67f15 100644
--- a/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
+++ b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
@@ -7,19 +7,21 @@ A storage server for Firefox Sync that you can easily host yourself.
 The absolute minimal configuration for the sync server looks like this:
 
 ```nix
-services.mysql.package = pkgs.mariadb;
-
-services.firefox-syncserver = {
-  enable = true;
-  secrets = builtins.toFile "sync-secrets" ''
-    SYNC_MASTER_SECRET=this-secret-is-actually-leaked-to-/nix/store
-  '';
-  singleNode = {
+{
+  services.mysql.package = pkgs.mariadb;
+
+  services.firefox-syncserver = {
     enable = true;
-    hostname = "localhost";
-    url = "http://localhost:5000";
+    secrets = builtins.toFile "sync-secrets" ''
+      SYNC_MASTER_SECRET=this-secret-is-actually-leaked-to-/nix/store
+    '';
+    singleNode = {
+      enable = true;
+      hostname = "localhost";
+      url = "http://localhost:5000";
+    };
   };
-};
+}
 ```
 
 This will start a sync server that is only accessible locally. Once the services is
diff --git a/nixpkgs/nixos/modules/services/networking/microsocks.nix b/nixpkgs/nixos/modules/services/networking/microsocks.nix
new file mode 100644
index 000000000000..be79a8495636
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/microsocks.nix
@@ -0,0 +1,146 @@
+{ config,
+  lib,
+  pkgs,
+  ...
+}:
+
+let
+  cfg = config.services.microsocks;
+
+  cmd =
+    if cfg.execWrapper != null
+    then "${cfg.execWrapper} ${cfg.package}/bin/microsocks"
+    else "${cfg.package}/bin/microsocks";
+  args =
+    [ "-i" cfg.ip "-p" (toString cfg.port) ]
+    ++ lib.optionals (cfg.authOnce) [ "-1" ]
+    ++ lib.optionals (cfg.disableLogging) [ "-q" ]
+    ++ lib.optionals (cfg.outgoingBindIp != null) [ "-b" cfg.outgoingBindIp ]
+    ++ lib.optionals (cfg.authUsername != null) [ "-u" cfg.authUsername ];
+in {
+  options.services.microsocks = {
+    enable = lib.mkEnableOption (lib.mdDoc "Tiny, portable SOCKS5 server with very moderate resource usage");
+    user = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "User microsocks runs as.";
+      type = lib.types.str;
+    };
+    group = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "Group microsocks runs as.";
+      type = lib.types.str;
+    };
+    package = lib.mkPackageOption pkgs "microsocks" {};
+    ip = lib.mkOption {
+      type = lib.types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        IP on which microsocks should listen. Defaults to 127.0.0.1 for
+        security reasons.
+      '';
+    };
+    port = lib.mkOption {
+      type = lib.types.port;
+      default = 1080;
+      description = lib.mdDoc "Port on which microsocks should listen.";
+    };
+    disableLogging = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "If true, microsocks will not log any messages to stdout/stderr.";
+    };
+    authOnce = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If true, once a specific ip address authed successfully with user/pass,
+        it is added to a whitelist and may use the proxy without auth.
+      '';
+    };
+    outgoingBindIp = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc "Specifies which ip outgoing connections are bound to";
+    };
+    authUsername = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = "alice";
+      description = lib.mdDoc "Optional username to use for authentication.";
+    };
+    authPasswordFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      example = "/run/secrets/microsocks-password";
+      description = lib.mdDoc "Path to a file containing the password for authentication.";
+    };
+    execWrapper = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = ''
+        ''${pkgs.mullvad-vpn}/bin/mullvad-exclude
+      '';
+      description = lib.mdDoc ''
+        An optional command to prepend to the microsocks command (such as proxychains, or a VPN exclude command).
+      '';
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.authUsername != null) == (cfg.authPasswordFile != null);
+        message = "Need to set both authUsername and authPasswordFile for microsocks";
+      }
+    ];
+    users = {
+      users = lib.mkIf (cfg.user == "microsocks") {
+        microsocks = {
+          group = cfg.group;
+          isSystemUser = true;
+        };
+      };
+      groups = lib.mkIf (cfg.group == "microsocks") {
+        microsocks = {};
+      };
+    };
+    systemd.services.microsocks = {
+      enable = true;
+      description = "a tiny socks server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "on-failure";
+        RestartSec = 10;
+        LoadCredential = lib.optionalString (cfg.authPasswordFile != null) "MICROSOCKS_PASSWORD_FILE:${cfg.authPasswordFile}";
+        MemoryDenyWriteExecute = true;
+        SystemCallArchitectures = "native";
+        PrivateTmp = true;
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        PrivateDevices = true;
+        RestrictSUIDSGID = true;
+        RestrictNamespaces = [
+          "cgroup"
+          "ipc"
+          "pid"
+          "user"
+          "uts"
+        ];
+      };
+      script =
+        if cfg.authPasswordFile != null
+        then ''
+          PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/MICROSOCKS_PASSWORD_FILE")
+          ${cmd} ${lib.escapeShellArgs args} -P "$PASSWORD"
+        ''
+        else ''
+          ${cmd} ${lib.escapeShellArgs args}
+        '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mosquitto.md b/nixpkgs/nixos/modules/services/networking/mosquitto.md
index 5cdb598151e5..66b3ad6cfa8f 100644
--- a/nixpkgs/nixos/modules/services/networking/mosquitto.md
+++ b/nixpkgs/nixos/modules/services/networking/mosquitto.md
@@ -7,14 +7,16 @@ Mosquitto is a MQTT broker often used for IoT or home automation data transport.
 A minimal configuration for Mosquitto is
 
 ```nix
-services.mosquitto = {
-  enable = true;
-  listeners = [ {
-    acl = [ "pattern readwrite #" ];
-    omitPasswordAuth = true;
-    settings.allow_anonymous = true;
-  } ];
-};
+{
+  services.mosquitto = {
+    enable = true;
+    listeners = [ {
+      acl = [ "pattern readwrite #" ];
+      omitPasswordAuth = true;
+      settings.allow_anonymous = true;
+    } ];
+  };
+}
 ```
 
 This will start a broker on port 1883, listening on all interfaces of the machine, allowing
@@ -25,37 +27,42 @@ full read access to a user `monitor` and restricted write access to a user `serv
 like
 
 ```nix
-services.mosquitto = {
-  enable = true;
-  listeners = [ {
-    users = {
-      monitor = {
-        acl = [ "read #" ];
-        password = "monitor";
+{
+  services.mosquitto = {
+    enable = true;
+    listeners = [ {
+      users = {
+        monitor = {
+          acl = [ "read #" ];
+          password = "monitor";
+        };
+        service = {
+          acl = [ "write service/#" ];
+          password = "service";
+        };
       };
-      service = {
-        acl = [ "write service/#" ];
-        password = "service";
-      };
-    };
-  } ];
-};
+    } ];
+  };
+}
 ```
 
 TLS authentication is configured by setting TLS-related options of the listener:
 
 ```nix
-services.mosquitto = {
-  enable = true;
-  listeners = [ {
-    port = 8883; # port change is not required, but helpful to avoid mistakes
-    # ...
-    settings = {
-      cafile = "/path/to/mqtt.ca.pem";
-      certfile = "/path/to/mqtt.pem";
-      keyfile = "/path/to/mqtt.key";
-    };
-  } ];
+{
+  services.mosquitto = {
+    enable = true;
+    listeners = [ {
+      port = 8883; # port change is not required, but helpful to avoid mistakes
+      # ...
+      settings = {
+        cafile = "/path/to/mqtt.ca.pem";
+        certfile = "/path/to/mqtt.pem";
+        keyfile = "/path/to/mqtt.key";
+      };
+    } ];
+  };
+}
 ```
 
 ## Configuration {#module-services-mosquitto-config}
diff --git a/nixpkgs/nixos/modules/services/networking/netbird.md b/nixpkgs/nixos/modules/services/networking/netbird.md
index a326207becc8..e1f6753cbd30 100644
--- a/nixpkgs/nixos/modules/services/networking/netbird.md
+++ b/nixpkgs/nixos/modules/services/networking/netbird.md
@@ -5,7 +5,9 @@
 The absolute minimal configuration for the netbird daemon looks like this:
 
 ```nix
-services.netbird.enable = true;
+{
+  services.netbird.enable = true;
+}
 ```
 
 This will set up a netbird service listening on the port `51820` associated to the
@@ -14,7 +16,9 @@ This will set up a netbird service listening on the port `51820` associated to t
 It is strictly equivalent to setting:
 
 ```nix
-services.netbird.tunnels.wt0.stateDir = "netbird";
+{
+  services.netbird.tunnels.wt0.stateDir = "netbird";
+}
 ```
 
 The `enable` option is mainly kept for backward compatibility, as defining netbird
@@ -29,11 +33,13 @@ The following configuration will start a netbird daemon using the interface `wt1
 the port 51830. Its configuration file will then be located at `/var/lib/netbird-wt1/config.json`.
 
 ```nix
-services.netbird.tunnels = {
-  wt1 = {
-    port = 51830;
+{
+  services.netbird.tunnels = {
+    wt1 = {
+      port = 51830;
+    };
   };
-};
+}
 ```
 
 To interact with it, you will need to specify the correct daemon address:
@@ -48,9 +54,11 @@ It is also possible to overwrite default options passed to the service, for
 example:
 
 ```nix
-services.netbird.tunnels.wt1.environment = {
-  NB_DAEMON_ADDR = "unix:///var/run/toto.sock"
-};
+{
+  services.netbird.tunnels.wt1.environment = {
+    NB_DAEMON_ADDR = "unix:///var/run/toto.sock";
+  };
+}
 ```
 
 This will set the socket to interact with the netbird service to `/var/run/toto.sock`.
diff --git a/nixpkgs/nixos/modules/services/networking/pleroma.md b/nixpkgs/nixos/modules/services/networking/pleroma.md
index 7c499e1c616c..c2313fd63e6a 100644
--- a/nixpkgs/nixos/modules/services/networking/pleroma.md
+++ b/nixpkgs/nixos/modules/services/networking/pleroma.md
@@ -17,11 +17,13 @@ The `config.exs` file can be further customized following the instructions on th
 ## Initializing the database {#module-services-pleroma-initialize-db}
 
 First, the Postgresql service must be enabled in the NixOS configuration
-```
-services.postgresql = {
-  enable = true;
-  package = pkgs.postgresql_13;
-};
+```nix
+{
+  services.postgresql = {
+    enable = true;
+    package = pkgs.postgresql_13;
+  };
+}
 ```
 and activated with the usual
 ```ShellSession
@@ -38,43 +40,45 @@ $ sudo -u postgres psql -f setup.psql
 In this section we will enable the Pleroma service only locally, so its configurations can be improved incrementally.
 
 This is an example of configuration, where [](#opt-services.pleroma.configs) option contains the content of the file `config.exs`, generated [in the first section](#module-services-pleroma-generate-config), but with the secrets (database password, endpoint secret key, salts, etc.) removed. Removing secrets is important, because otherwise they will be stored publicly in the Nix store.
-```
-services.pleroma = {
-  enable = true;
-  secretConfigFile = "/var/lib/pleroma/secrets.exs";
-  configs = [
-    ''
-    import Config
-
-    config :pleroma, Pleroma.Web.Endpoint,
-      url: [host: "pleroma.example.net", scheme: "https", port: 443],
-      http: [ip: {127, 0, 0, 1}, port: 4000]
-
-    config :pleroma, :instance,
-      name: "Test",
-      email: "admin@example.net",
-      notify_email: "admin@example.net",
-      limit: 5000,
-      registrations_open: true
-
-    config :pleroma, :media_proxy,
-      enabled: false,
-      redirect_on_failure: true
-
-    config :pleroma, Pleroma.Repo,
-      adapter: Ecto.Adapters.Postgres,
-      username: "pleroma",
-      database: "pleroma",
-      hostname: "localhost"
-
-    # Configure web push notifications
-    config :web_push_encryption, :vapid_details,
-      subject: "mailto:admin@example.net"
-
-    # ... TO CONTINUE ...
-    ''
-  ];
-};
+```nix
+{
+  services.pleroma = {
+    enable = true;
+    secretConfigFile = "/var/lib/pleroma/secrets.exs";
+    configs = [
+      ''
+      import Config
+
+      config :pleroma, Pleroma.Web.Endpoint,
+        url: [host: "pleroma.example.net", scheme: "https", port: 443],
+        http: [ip: {127, 0, 0, 1}, port: 4000]
+
+      config :pleroma, :instance,
+        name: "Test",
+        email: "admin@example.net",
+        notify_email: "admin@example.net",
+        limit: 5000,
+        registrations_open: true
+
+      config :pleroma, :media_proxy,
+        enabled: false,
+        redirect_on_failure: true
+
+      config :pleroma, Pleroma.Repo,
+        adapter: Ecto.Adapters.Postgres,
+        username: "pleroma",
+        database: "pleroma",
+        hostname: "localhost"
+
+      # Configure web push notifications
+      config :web_push_encryption, :vapid_details,
+        subject: "mailto:admin@example.net"
+
+      # ... TO CONTINUE ...
+      ''
+    ];
+  };
+}
 ```
 
 Secrets must be moved into a file pointed by [](#opt-services.pleroma.secretConfigFile), in our case `/var/lib/pleroma/secrets.exs`. This file can be created copying the previously generated `config.exs` file and then removing all the settings, except the secrets. This is an example
@@ -121,60 +125,62 @@ $ pleroma_ctl user new <nickname> <email>  --admin --moderator --password <passw
 
 In this configuration, Pleroma is listening only on the local port 4000. Nginx can be configured as a Reverse Proxy, for forwarding requests from public ports to the Pleroma service. This is an example of configuration, using
 [Let's Encrypt](https://letsencrypt.org/) for the TLS certificates
-```
-security.acme = {
-  email = "root@example.net";
-  acceptTerms = true;
-};
-
-services.nginx = {
-  enable = true;
-  addSSL = true;
-
-  recommendedTlsSettings = true;
-  recommendedOptimisation = true;
-  recommendedGzipSettings = true;
-
-  recommendedProxySettings = false;
-  # NOTE: if enabled, the NixOS proxy optimizations will override the Pleroma
-  # specific settings, and they will enter in conflict.
-
-  virtualHosts = {
-    "pleroma.example.net" = {
-      http2 = true;
-      enableACME = true;
-      forceSSL = true;
-
-      locations."/" = {
-        proxyPass = "http://127.0.0.1:4000";
-
-        extraConfig = ''
-          etag on;
-          gzip on;
-
-          add_header 'Access-Control-Allow-Origin' '*' always;
-          add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
-          add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
-          add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
-          if ($request_method = OPTIONS) {
-            return 204;
-          }
-          add_header X-XSS-Protection "1; mode=block";
-          add_header X-Permitted-Cross-Domain-Policies none;
-          add_header X-Frame-Options DENY;
-          add_header X-Content-Type-Options nosniff;
-          add_header Referrer-Policy same-origin;
-          add_header X-Download-Options noopen;
-          proxy_http_version 1.1;
-          proxy_set_header Upgrade $http_upgrade;
-          proxy_set_header Connection "upgrade";
-          proxy_set_header Host $host;
-
-          client_max_body_size 16m;
-          # NOTE: increase if users need to upload very big files
-        '';
+```nix
+{
+  security.acme = {
+    email = "root@example.net";
+    acceptTerms = true;
+  };
+
+  services.nginx = {
+    enable = true;
+    addSSL = true;
+
+    recommendedTlsSettings = true;
+    recommendedOptimisation = true;
+    recommendedGzipSettings = true;
+
+    recommendedProxySettings = false;
+    # NOTE: if enabled, the NixOS proxy optimizations will override the Pleroma
+    # specific settings, and they will enter in conflict.
+
+    virtualHosts = {
+      "pleroma.example.net" = {
+        http2 = true;
+        enableACME = true;
+        forceSSL = true;
+
+        locations."/" = {
+          proxyPass = "http://127.0.0.1:4000";
+
+          extraConfig = ''
+            etag on;
+            gzip on;
+
+            add_header 'Access-Control-Allow-Origin' '*' always;
+            add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
+            add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
+            add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
+            if ($request_method = OPTIONS) {
+              return 204;
+            }
+            add_header X-XSS-Protection "1; mode=block";
+            add_header X-Permitted-Cross-Domain-Policies none;
+            add_header X-Frame-Options DENY;
+            add_header X-Content-Type-Options nosniff;
+            add_header Referrer-Policy same-origin;
+            add_header X-Download-Options noopen;
+            proxy_http_version 1.1;
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection "upgrade";
+            proxy_set_header Host $host;
+
+            client_max_body_size 16m;
+            # NOTE: increase if users need to upload very big files
+          '';
+        };
       };
     };
   };
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/networking/prosody.md b/nixpkgs/nixos/modules/services/networking/prosody.md
index 2da2c242a98b..d6eee4e29f0a 100644
--- a/nixpkgs/nixos/modules/services/networking/prosody.md
+++ b/nixpkgs/nixos/modules/services/networking/prosody.md
@@ -25,25 +25,27 @@ A good configuration to start with, including a
 [Multi User Chat (MUC)](https://xmpp.org/extensions/xep-0045.html)
 endpoint as well as a [HTTP File Upload](https://xmpp.org/extensions/xep-0363.html)
 endpoint will look like this:
-```
-services.prosody = {
-  enable = true;
-  admins = [ "root@example.org" ];
-  ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
-  ssl.key = "/var/lib/acme/example.org/key.pem";
-  virtualHosts."example.org" = {
-      enabled = true;
-      domain = "example.org";
-      ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
-      ssl.key = "/var/lib/acme/example.org/key.pem";
-  };
-  muc = [ {
-      domain = "conference.example.org";
-  } ];
-  uploadHttp = {
-      domain = "upload.example.org";
+```nix
+{
+  services.prosody = {
+    enable = true;
+    admins = [ "root@example.org" ];
+    ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
+    ssl.key = "/var/lib/acme/example.org/key.pem";
+    virtualHosts."example.org" = {
+        enabled = true;
+        domain = "example.org";
+        ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
+        ssl.key = "/var/lib/acme/example.org/key.pem";
+    };
+    muc = [ {
+        domain = "conference.example.org";
+    } ];
+    uploadHttp = {
+        domain = "upload.example.org";
+    };
   };
-};
+}
 ```
 
 ## Let's Encrypt Configuration {#module-services-prosody-letsencrypt}
@@ -57,16 +59,18 @@ certificate by leveraging the ACME
 
 Provided the setup detailed in the previous section, you'll need the following acme configuration to generate
 a TLS certificate for the three endponits:
-```
-security.acme = {
-  email = "root@example.org";
-  acceptTerms = true;
-  certs = {
-    "example.org" = {
-      webroot = "/var/www/example.org";
-      email = "root@example.org";
-      extraDomainNames = [ "conference.example.org" "upload.example.org" ];
+```nix
+{
+  security.acme = {
+    email = "root@example.org";
+    acceptTerms = true;
+    certs = {
+      "example.org" = {
+        webroot = "/var/www/example.org";
+        email = "root@example.org";
+        extraDomainNames = [ "conference.example.org" "upload.example.org" ];
+      };
     };
   };
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-control.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-control.nix
new file mode 100644
index 000000000000..fdf3a9ba3cc1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-control.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-control;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "cs";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    beacon_db = {
+      connection = "/var/lib/scion-control/control.beacon.db";
+    };
+    path_db = {
+      connection = "/var/lib/scion-control/control.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-control/control.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-control.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-control = {
+    enable = mkEnableOption (lib.mdDoc "the scion-control service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-control/control.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-control configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-control = {
+      description = "SCION Control Service";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = if (config.services.scion.scion-dispatcher.enable == true) then "scion" else null;
+        ExecStart = "${pkgs.scion}/bin/scion-control --config ${configFile}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        StateDirectory = "scion-control";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix
new file mode 100644
index 000000000000..0bcc18771fc3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-daemon;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "sd";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    path_db = {
+      connection = "/var/lib/scion-daemon/sd.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-daemon/sd.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-daemon.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-daemon = {
+    enable = mkEnableOption (lib.mdDoc "the scion-daemon service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-daemon/sd.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-daemon configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-daemon = {
+      description = "SCION Daemon";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-daemon --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-daemon";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix
new file mode 100644
index 000000000000..bab1ec0a989b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-dispatcher;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    dispatcher = {
+      id = "dispatcher";
+      socket_file_mode = "0770";
+      application_socket = "/dev/shm/dispatcher/default.sock";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-dispatcher.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-dispatcher = {
+    enable = mkEnableOption (lib.mdDoc "the scion-dispatcher service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          dispatcher = {
+            id = "dispatcher";
+            socket_file_mode = "0770";
+            application_socket = "/dev/shm/dispatcher/default.sock";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-dispatcher configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    # Needed for group ownership of the dispatcher socket
+    users.groups.scion = {};
+
+    # scion programs hardcode path to dispatcher in /run/shm, and is not
+    # configurable at runtime upstream plans to obsolete the dispatcher in
+    # favor of an SCMP daemon, at which point this can be removed.
+    system.activationScripts.scion-dispatcher = ''
+      ln -sf /dev/shm /run/shm
+    '';
+
+    systemd.services.scion-dispatcher = {
+      description = "SCION Dispatcher";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = "scion";
+        DynamicUser = true;
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        ExecStartPre = "${pkgs.coreutils}/bin/rm -rf /run/shm/dispatcher";
+        ExecStart = "${pkgs.scion}/bin/scion-dispatcher --config ${configFile}";
+        Restart = "on-failure";
+        StateDirectory = "scion-dispatcher";
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-router.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-router.nix
new file mode 100644
index 000000000000..cbe83c6dbf8d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-router.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-router;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "br";
+      config_dir = "/etc/scion";
+    };
+  };
+  configFile = toml.generate "scion-router.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-router = {
+    enable = mkEnableOption (lib.mdDoc "the scion-router service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          general.id = "br";
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-router configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-router = {
+      description = "SCION Router";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-router --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-router";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion.nix b/nixpkgs/nixos/modules/services/networking/scion/scion.nix
new file mode 100644
index 000000000000..704f942b5d9e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion.nix
@@ -0,0 +1,39 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion;
+in
+{
+  options.services.scion = {
+    enable = mkEnableOption (lib.mdDoc "all of the scion components and services");
+    bypassBootstrapWarning = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        bypass Nix warning about SCION PKI bootstrapping
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    services.scion = {
+      scion-dispatcher.enable = true;
+      scion-daemon.enable = true;
+      scion-router.enable = true;
+      scion-control.enable = true;
+    };
+    assertions = [
+      { assertion = cfg.bypassBootstrapWarning == true;
+        message = ''
+          SCION is a routing protocol and requires bootstrapping with a manual, imperative key signing ceremony. You may want to join an existing Isolation Domain (ISD) such as scionlab.org, or bootstrap your own. If you have completed and configured the public key infrastructure for SCION and are sure this process is complete, then add the following to your configuration:
+
+          services.scion.bypassBootstrapWarning = true;
+
+          refer to docs.scion.org for more information
+        '';
+      }
+    ];
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/yggdrasil.md b/nixpkgs/nixos/modules/services/networking/yggdrasil.md
index bbaea5bc74aa..7b899f9d6ddb 100644
--- a/nixpkgs/nixos/modules/services/networking/yggdrasil.md
+++ b/nixpkgs/nixos/modules/services/networking/yggdrasil.md
@@ -12,7 +12,7 @@ self-arranging IPv6 network.
 ### Simple ephemeral node {#module-services-networking-yggdrasil-configuration-simple}
 
 An annotated example of a simple configuration:
-```
+```nix
 {
   services.yggdrasil = {
     enable = true;
@@ -39,7 +39,7 @@ An annotated example of a simple configuration:
 ### Persistent node with prefix {#module-services-networking-yggdrasil-configuration-prefix}
 
 A node with a fixed address that announces a prefix:
-```
+```nix
 let
   address = "210:5217:69c0:9afc:1b95:b9f:8718:c3d2";
   prefix = "310:5217:69c0:9afc";
@@ -90,7 +90,7 @@ in {
 
 A NixOS container attached to the Yggdrasil network via a node running on the
 host:
-```
+```nix
 let
   yggPrefix64 = "310:5217:69c0:9afc";
     # Again, taken from the output of "yggdrasilctl getself".