about summary refs log tree commit diff
path: root/nixos/modules
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules')
-rw-r--r--nixos/modules/module-list.nix2
-rw-r--r--nixos/modules/programs/venus.nix2
-rw-r--r--nixos/modules/security/acme.nix10
-rw-r--r--nixos/modules/security/acme.xml247
-rw-r--r--nixos/modules/services/editors/emacs.xml2
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix2
-rw-r--r--nixos/modules/services/misc/octoprint.nix9
-rw-r--r--nixos/modules/services/networking/prosody.nix13
-rw-r--r--nixos/modules/services/torrent/deluge.nix4
-rw-r--r--nixos/modules/system/boot/initrd-ssh.nix8
-rw-r--r--nixos/modules/virtualisation/cri-o.nix29
-rw-r--r--nixos/modules/virtualisation/oci-containers.nix (renamed from nixos/modules/virtualisation/docker-containers.nix)124
-rw-r--r--nixos/modules/virtualisation/podman.nix20
13 files changed, 340 insertions, 132 deletions
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 0cd17775e516..28f536056bf1 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -984,9 +984,9 @@
   ./virtualisation/container-config.nix
   ./virtualisation/containers.nix
   ./virtualisation/nixos-containers.nix
+  ./virtualisation/oci-containers.nix
   ./virtualisation/cri-o.nix
   ./virtualisation/docker.nix
-  ./virtualisation/docker-containers.nix
   ./virtualisation/ecs-agent.nix
   ./virtualisation/libvirtd.nix
   ./virtualisation/lxc.nix
diff --git a/nixos/modules/programs/venus.nix b/nixos/modules/programs/venus.nix
index 110570ac3f06..58faf38777d0 100644
--- a/nixos/modules/programs/venus.nix
+++ b/nixos/modules/programs/venus.nix
@@ -75,7 +75,7 @@ in
       };
 
       link = mkOption {
-        default = "http://planet.nixos.org";
+        default = "https://planet.nixos.org";
         type = types.str;
         description = ''
           Link to the main page.
diff --git a/nixos/modules/security/acme.nix b/nixos/modules/security/acme.nix
index 36f91529265e..776ef07d716c 100644
--- a/nixos/modules/security/acme.nix
+++ b/nixos/modules/security/acme.nix
@@ -87,19 +87,19 @@ let
         default = {};
         example = literalExample ''
           {
-            "example.org" = "/srv/http/nginx";
+            "example.org" = null;
             "mydomain.org" = null;
           }
         '';
         description = ''
-          A list of extra domain names, which are included in the one certificate to be issued, with their
-          own server roots if needed.
+          A list of extra domain names, which are included in the one certificate to be issued.
+          Setting a distinct server root is deprecated and not functional in 20.03+
         '';
       };
 
       keyType = mkOption {
         type = types.str;
-        default = "ec384";
+        default = "ec256";
         description = ''
           Key type to use for private keys.
           For an up to date list of supported values check the --key-type option
@@ -250,7 +250,7 @@ in
             "example.com" = {
               webroot = "/var/www/challenges/";
               email = "foo@example.com";
-              extraDomains = { "www.example.com" = null; "foo.example.com" = "/var/www/foo/"; };
+              extraDomains = { "www.example.com" = null; "foo.example.com" = null; };
             };
             "bar.example.com" = {
               webroot = "/var/www/challenges/";
diff --git a/nixos/modules/security/acme.xml b/nixos/modules/security/acme.xml
index 2b29c1174845..f802faee9749 100644
--- a/nixos/modules/security/acme.xml
+++ b/nixos/modules/security/acme.xml
@@ -6,92 +6,249 @@
  <title>SSL/TLS Certificates with ACME</title>
  <para>
   NixOS supports automatic domain validation &amp; certificate retrieval and
-  renewal using the ACME protocol. This is currently only implemented by and
-  for Let's Encrypt. The alternative ACME client <literal>lego</literal> is
-  used under the hood.
+  renewal using the ACME protocol. Any provider can be used, but by default
+  NixOS uses Let's Encrypt. The alternative ACME client <literal>lego</literal>
+  is used under the hood.
+ </para>
+ <para>
+  Automatic cert validation and configuration for Apache and Nginx virtual
+  hosts is included in NixOS, however if you would like to generate a wildcard
+  cert or you are not using a web server you will have to configure DNS
+  based validation.
  </para>
  <section xml:id="module-security-acme-prerequisites">
   <title>Prerequisites</title>
 
   <para>
-   You need to have a running HTTP server for verification. The server must
-   have a webroot defined that can serve
+   To use the ACME module, you must accept the provider's terms of service
+   by setting <literal><xref linkend="opt-security.acme.acceptTerms" /></literal>
+   to <literal>true</literal>. The Let's Encrypt ToS can be found
+   <link xlink:href="https://letsencrypt.org/repository/">here</link>.
+  </para>
+
+  <para>
+   You must also set an email address to be used when creating accounts with
+   Let's Encrypt. You can set this for all certs with
+   <literal><xref linkend="opt-security.acme.email" /></literal>
+   and/or on a per-cert basis with
+   <literal><xref linkend="opt-security.acme.certs._name_.email" /></literal>.
+   This address is only used for registration and renewal reminders,
+   and cannot be used to administer the certificates in any way.
+  </para>
+
+  <para>
+   Alternatively, you can use a different ACME server by changing the
+   <literal><xref linkend="opt-security.acme.server" /></literal> option
+   to a provider of your choosing, or just change the server for one cert with
+   <literal><xref linkend="opt-security.acme.certs._name_.server" /></literal>.
+  </para>
+
+  <para>
+   You will need an HTTP server or DNS server for verification. For HTTP,
+   the server must have a webroot defined that can serve
    <filename>.well-known/acme-challenge</filename>. This directory must be
-   writeable by the user that will run the ACME client.
+   writeable by the user that will run the ACME client. For DNS, you must
+   set up credentials with your provider/server for use with lego.
   </para>
+ </section>
+ <section xml:id="module-security-acme-nginx">
+  <title>Using ACME certificates in Nginx</title>
 
   <para>
-   For instance, this generic snippet could be used for Nginx:
+   NixOS supports fetching ACME certificates for you by setting
+   <literal><link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link>
+   = true;</literal> in a virtualHost config. We first create self-signed
+   placeholder certificates in place of the real ACME certs. The placeholder
+   certs are overwritten when the ACME certs arrive. For
+   <literal>foo.example.com</literal> the config would look like.
+  </para>
+
 <programlisting>
-http {
-  server {
-    server_name _;
-    listen 80;
-    listen [::]:80;
-
-    location /.well-known/acme-challenge {
-      root /var/www/challenges;
-    }
+<xref linkend="opt-security.acme.acceptTerms" /> = true;
+<xref linkend="opt-security.acme.email" /> = "admin+acme@example.com";
+services.nginx = {
+  <link linkend="opt-services.nginx.enable">enable</link> = true;
+  <link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
+    "foo.example.com" = {
+      <link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
+      <link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true;
+      # All serverAliases will be added as <link linkend="opt-security.acme.certs._name_.extraDomains">extra domains</link> on the certificate.
+      <link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ "bar.example.com" ];
+      locations."/" = {
+        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/www";
+      };
+    };
 
-    location / {
-      return 301 https://$host$request_uri;
-    }
-  }
+    # We can also add a different vhost and reuse the same certificate
+    # but we have to append extraDomains manually.
+    <link linkend="opt-security.acme.certs._name_.extraDomains">security.acme.certs."foo.example.com".extraDomains."baz.example.com"</link> = null;
+    "baz.example.com" = {
+      <link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
+      <link linkend="opt-services.nginx.virtualHosts._name_.useACMEHost">useACMEHost</link> = "foo.example.com";
+      locations."/" = {
+        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/www";
+      };
+    };
+  };
 }
 </programlisting>
+ </section>
+ <section xml:id="module-security-acme-httpd">
+  <title>Using ACME certificates in Apache/httpd</title>
+
+  <para>
+   Using ACME certificates with Apache virtual hosts is identical
+   to using them with Nginx. The attribute names are all the same, just replace
+   "nginx" with "httpd" where appropriate.
   </para>
  </section>
  <section xml:id="module-security-acme-configuring">
-  <title>Configuring</title>
+  <title>Manual configuration of HTTP-01 validation</title>
 
   <para>
-   To enable ACME certificate retrieval &amp; renewal for a certificate for
-   <literal>foo.example.com</literal>, add the following in your
-   <filename>configuration.nix</filename>:
+   First off you will need to set up a virtual host to serve the challenges.
+   This example uses a vhost called <literal>certs.example.com</literal>, with
+   the intent that you will generate certs for all your vhosts and redirect
+   everyone to HTTPS.
+  </para>
+
+<programlisting>
+<xref linkend="opt-security.acme.acceptTerms" /> = true;
+<xref linkend="opt-security.acme.email" /> = "admin+acme@example.com";
+services.nginx = {
+  <link linkend="opt-services.nginx.enable">enable</link> = true;
+  <link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
+    "acmechallenge.example.com" = {
+      # Catchall vhost, will redirect users to HTTPS for all vhosts
+      <link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ "*.example.com" ];
+      # /var/lib/acme/.challenges must be writable by the ACME user
+      # and readable by the Nginx user.
+      # By default, this is the case.
+      locations."/.well-known/acme-challenge" = {
+        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/lib/acme/.challenges";
+      };
+      locations."/" = {
+        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.return">return</link> = "301 https://$host$request_uri";
+      };
+    };
+  };
+}
+# Alternative config for Apache
+services.httpd = {
+  <link linkend="opt-services.httpd.enable">enable = true;</link>
+  <link linkend="opt-services.httpd.virtualHosts">virtualHosts</link> = {
+    "acmechallenge.example.com" = {
+      # Catchall vhost, will redirect users to HTTPS for all vhosts
+      <link linkend="opt-services.httpd.virtualHosts._name_.serverAliases">serverAliases</link> = [ "*.example.com" ];
+      # /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
+      # By default, this is the case.
+      <link linkend="opt-services.httpd.virtualHosts._name_.documentRoot">documentRoot</link> = "/var/lib/acme/.challenges";
+      <link linkend="opt-services.httpd.virtualHosts._name_.extraConfig">extraConfig</link> = ''
+        RewriteEngine On
+        RewriteCond %{HTTPS} off
+        RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
+        RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
+      '';
+    };
+  };
+}
+</programlisting>
+
+  <para>
+   Now you need to configure ACME to generate a certificate.
+  </para>
+
 <programlisting>
 <xref linkend="opt-security.acme.certs"/>."foo.example.com" = {
-  <link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/www/challenges";
+  <link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/lib/acme/.challenges";
   <link linkend="opt-security.acme.certs._name_.email">email</link> = "foo@example.com";
+  # Since we have a wildcard vhost to handle port 80,
+  # we can generate certs for anything!
+  # Just make sure your DNS resolves them.
+  <link linkend="opt-security.acme.certs._name_.extraDomains">extraDomains</link> = [ "mail.example.com" ];
 };
 </programlisting>
-  </para>
 
   <para>
    The private key <filename>key.pem</filename> and certificate
    <filename>fullchain.pem</filename> will be put into
    <filename>/var/lib/acme/foo.example.com</filename>.
   </para>
+
   <para>
    Refer to <xref linkend="ch-options" /> for all available configuration
    options for the <link linkend="opt-security.acme.certs">security.acme</link>
    module.
   </para>
  </section>
- <section xml:id="module-security-acme-nginx">
-  <title>Using ACME certificates in Nginx</title>
+ <section xml:id="module-security-acme-config-dns">
+  <title>Configuring ACME for DNS validation</title>
 
   <para>
-   NixOS supports fetching ACME certificates for you by setting
-   <literal><link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link>
-   = true;</literal> in a virtualHost config. We first create self-signed
-   placeholder certificates in place of the real ACME certs. The placeholder
-   certs are overwritten when the ACME certs arrive. For
-   <literal>foo.example.com</literal> the config would look like.
+   This is useful if you want to generate a wildcard certificate, since
+   ACME servers will only hand out wildcard certs over DNS validation.
+   There a number of supported DNS providers and servers you can utilise,
+   see the <link xlink:href="https://go-acme.github.io/lego/dns/">lego docs</link>
+   for provider/server specific configuration values. For the sake of these
+   docs, we will provide a fully self-hosted example using bind.
   </para>
 
 <programlisting>
-services.nginx = {
-  <link linkend="opt-services.nginx.enable">enable = true;</link>
-  <link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
-    "foo.example.com" = {
-      <link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
-      <link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true;
-      locations."/" = {
-        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/www";
-      };
-    };
-  };
+services.bind = {
+  <link linkend="opt-services.bind.enable">enable</link> = true;
+  <link linkend="opt-services.bind.extraConfig">extraConfig</link> = ''
+    include "/var/lib/secrets/dnskeys.conf";
+  '';
+  <link linkend="opt-services.bind.zones">zones</link> = [
+    rec {
+      name = "example.com";
+      file = "/var/db/bind/${name}";
+      master = true;
+      extraConfig = "allow-update { key rfc2136key.example.com.; };";
+    }
+  ];
 }
+
+# Now we can configure ACME
+<xref linkend="opt-security.acme.acceptTerms" /> = true;
+<xref linkend="opt-security.acme.email" /> = "admin+acme@example.com";
+<xref linkend="opt-security.acme.certs" />."example.com" = {
+  <link linkend="opt-security.acme.certs._name_.domain">domain</link> = "*.example.com";
+  <link linkend="opt-security.acme.certs._name_.dnsProvider">dnsProvider</link> = "rfc2136";
+  <link linkend="opt-security.acme.certs._name_.credentialsFile">credentialsFile</link> = "/var/lib/secrets/certs.secret";
+  # We don't need to wait for propagation since this is a local DNS server
+  <link linkend="opt-security.acme.certs._name_.dnsPropagationCheck">dnsPropagationCheck</link> = false;
+};
 </programlisting>
+
+  <para>
+   The <filename>dnskeys.conf</filename> and <filename>certs.secret</filename>
+   must be kept secure and thus you should not keep their contents in your
+   Nix config. Instead, generate them one time with these commands:
+  </para>
+
+<programlisting>
+mkdir -p /var/lib/secrets
+tsig-keygen rfc2136key.example.com &gt; /var/lib/secrets/dnskeys.conf
+chown named:root /var/lib/secrets/dnskeys.conf
+chmod 400 /var/lib/secrets/dnskeys.conf
+
+# Copy the secret value from the dnskeys.conf, and put it in
+# RFC2136_TSIG_SECRET below
+
+cat &gt; /var/lib/secrets/certs.secret &lt;&lt; EOF
+RFC2136_NAMESERVER='127.0.0.1:53'
+RFC2136_TSIG_ALGORITHM='hmac-sha256.'
+RFC2136_TSIG_KEY='rfc2136key.example.com'
+RFC2136_TSIG_SECRET='your secret key'
+EOF
+chmod 400 /var/lib/secrets/certs.secret
+</programlisting>
+
+  <para>
+   Now you're all set to generate certs! You should monitor the first invokation
+   by running <literal>systemctl start acme-example.com.service &amp;
+   journalctl -fu acme-example.com.service</literal> and watching its log output.
+  </para>
  </section>
 </chapter>
diff --git a/nixos/modules/services/editors/emacs.xml b/nixos/modules/services/editors/emacs.xml
index 03483f69fa2f..74c60014dcea 100644
--- a/nixos/modules/services/editors/emacs.xml
+++ b/nixos/modules/services/editors/emacs.xml
@@ -294,7 +294,7 @@ https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides
     If you are not on NixOS or want to install this particular Emacs only for
     yourself, you can do so by adding it to your
     <filename>~/.config/nixpkgs/config.nix</filename> (see
-    <link xlink:href="http://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides">Nixpkgs
+    <link xlink:href="https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides">Nixpkgs
     manual</link>):
     <example xml:id="module-services-emacs-config-nix">
      <title>Custom Emacs in <filename>~/.config/nixpkgs/config.nix</filename></title>
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
index 0c2407e1dd2f..b28e3679d1c7 100644
--- a/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -283,7 +283,7 @@ in
       trustedBinaryCaches = mkOption {
         type = types.listOf types.str;
         default = [ ];
-        example = [ "http://hydra.nixos.org/" ];
+        example = [ "https://hydra.nixos.org/" ];
         description = ''
           List of binary cache URLs that non-root users can use (in
           addition to those specified using
diff --git a/nixos/modules/services/misc/octoprint.nix b/nixos/modules/services/misc/octoprint.nix
index 651ed3743884..7a71d2c8c6aa 100644
--- a/nixos/modules/services/misc/octoprint.nix
+++ b/nixos/modules/services/misc/octoprint.nix
@@ -17,9 +17,9 @@ let
 
   cfgUpdate = pkgs.writeText "octoprint-config.yaml" (builtins.toJSON fullConfig);
 
-  pluginsEnv = pkgs.python.buildEnv.override {
-    extraLibs = cfg.plugins pkgs.octoprint-plugins;
-  };
+  pluginsEnv = package.python.withPackages (ps: [ps.octoprint] ++ (cfg.plugins ps));
+
+  package = pkgs.octoprint;
 
 in
 {
@@ -106,7 +106,6 @@ in
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       path = [ pluginsEnv ];
-      environment.PYTHONPATH = makeSearchPathOutput "lib" pkgs.python.sitePackages [ pluginsEnv ];
 
       preStart = ''
         if [ -e "${cfg.stateDir}/config.yaml" ]; then
@@ -119,7 +118,7 @@ in
       '';
 
       serviceConfig = {
-        ExecStart = "${pkgs.octoprint}/bin/octoprint serve -b ${cfg.stateDir}";
+        ExecStart = "${pluginsEnv}/bin/octoprint serve -b ${cfg.stateDir}";
         User = cfg.user;
         Group = cfg.group;
       };
diff --git a/nixos/modules/services/networking/prosody.nix b/nixos/modules/services/networking/prosody.nix
index 9825613d809f..cdd341c9fb62 100644
--- a/nixos/modules/services/networking/prosody.nix
+++ b/nixos/modules/services/networking/prosody.nix
@@ -382,6 +382,11 @@ let
         default = "en";
         description = "Default room language.";
       };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Additional MUC specific configuration";
+      };
     };
   };
 
@@ -792,6 +797,8 @@ in
 
       https_ports = ${toLua cfg.httpsPorts}
 
+      ${ cfg.extraConfig }
+
       ${lib.concatMapStrings (muc: ''
         Component ${toLua muc.domain} "muc"
             modules_enabled = { "muc_mam"; ${optionalString muc.vcard_muc ''"vcard_muc";'' } }
@@ -809,8 +816,8 @@ in
             muc_room_default_change_subject = ${toLua muc.roomDefaultChangeSubject}
             muc_room_default_history_length = ${toLua muc.roomDefaultHistoryLength}
             muc_room_default_language = ${toLua muc.roomDefaultLanguage}
-
-          '') cfg.muc}
+            ${ muc.extraConfig }
+        '') cfg.muc}
 
       ${ lib.optionalString (cfg.uploadHttp != null) ''
         Component ${toLua cfg.uploadHttp.domain} "http_upload"
@@ -820,8 +827,6 @@ in
             http_upload_path = ${toLua cfg.uploadHttp.httpUploadPath}
       ''}
 
-      ${ cfg.extraConfig }
-
       ${ lib.concatStringsSep "\n" (lib.mapAttrsToList (n: v: ''
         VirtualHost "${v.domain}"
           enabled = ${boolToString v.enabled};
diff --git a/nixos/modules/services/torrent/deluge.nix b/nixos/modules/services/torrent/deluge.nix
index 2f1e7300ca38..45398cb26138 100644
--- a/nixos/modules/services/torrent/deluge.nix
+++ b/nixos/modules/services/torrent/deluge.nix
@@ -142,7 +142,7 @@ in {
           description = ''
             Extra packages available at runtime to enable Deluge's plugins. For example,
             extraction utilities are required for the built-in "Extractor" plugin.
-            This always contains unzip, gnutar, xz, p7zip and bzip2.
+            This always contains unzip, gnutar, xz and bzip2.
           '';
         };
 
@@ -187,7 +187,7 @@ in {
     );
 
     # Provide a default set of `extraPackages`.
-    services.deluge.extraPackages = with pkgs; [ unzip gnutar xz p7zip bzip2 ];
+    services.deluge.extraPackages = with pkgs; [ unzip gnutar xz bzip2 ];
 
     systemd.tmpfiles.rules = [
       "d '${cfg.dataDir}' 0770 ${cfg.user} ${cfg.group}"
diff --git a/nixos/modules/system/boot/initrd-ssh.nix b/nixos/modules/system/boot/initrd-ssh.nix
index 60760487a1d2..f7ef26103709 100644
--- a/nixos/modules/system/boot/initrd-ssh.nix
+++ b/nixos/modules/system/boot/initrd-ssh.nix
@@ -83,6 +83,12 @@ in
         Authorized keys for the root user on initrd.
       '';
     };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = "Verbatim contents of <filename>sshd_config</filename>.";
+    };
   };
 
   imports =
@@ -126,6 +132,8 @@ in
       '' else ''
         UseDNS no
       ''}
+
+      ${cfg.extraConfig}
     '';
   in mkIf (config.boot.initrd.network.enable && cfg.enable) {
     assertions = [
diff --git a/nixos/modules/virtualisation/cri-o.nix b/nixos/modules/virtualisation/cri-o.nix
index 89ba9c3c6636..2af4214302d6 100644
--- a/nixos/modules/virtualisation/cri-o.nix
+++ b/nixos/modules/virtualisation/cri-o.nix
@@ -4,6 +4,11 @@ with lib;
 
 let
   cfg = config.virtualisation.cri-o;
+
+  # Copy configuration files to avoid having the entire sources in the system closure
+  copyFile = filePath: pkgs.runCommandNoCC (builtins.unsafeDiscardStringContext (builtins.baseNameOf filePath)) {} ''
+    cp ${filePath} $out
+  '';
 in
 {
   imports = [
@@ -45,9 +50,9 @@ in
   config = mkIf cfg.enable {
     environment.systemPackages = with pkgs;
       [ cri-o cri-tools conmon iptables runc utillinux ];
-    environment.etc."crictl.yaml".text = ''
-      runtime-endpoint: unix:///var/run/crio/crio.sock
-    '';
+
+    environment.etc."crictl.yaml".source = copyFile "${pkgs.cri-o.src}/crictl.yaml";
+
     environment.etc."crio/crio.conf".text = ''
       [crio]
       storage_driver = "${cfg.storageDriver}"
@@ -66,23 +71,7 @@ in
       manage_network_ns_lifecycle = true
     '';
 
-    environment.etc."cni/net.d/20-cri-o-bridge.conf".text = ''
-      {
-        "cniVersion": "0.3.1",
-        "name": "crio-bridge",
-        "type": "bridge",
-        "bridge": "cni0",
-        "isGateway": true,
-        "ipMasq": true,
-        "ipam": {
-          "type": "host-local",
-          "subnet": "10.88.0.0/16",
-          "routes": [
-              { "dst": "0.0.0.0/0" }
-          ]
-        }
-      }
-    '';
+    environment.etc."cni/net.d/10-crio-bridge.conf".source = copyFile "${pkgs.cri-o.src}/contrib/cni/10-crio-bridge.conf";
 
     # Enable common /etc/containers configuration
     virtualisation.containers.enable = true;
diff --git a/nixos/modules/virtualisation/docker-containers.nix b/nixos/modules/virtualisation/oci-containers.nix
index 5ab990a3d7cc..a46dd65eb491 100644
--- a/nixos/modules/virtualisation/docker-containers.nix
+++ b/nixos/modules/virtualisation/oci-containers.nix
@@ -1,17 +1,20 @@
-{ config, lib, pkgs, ... }:
+{ config, options, lib, pkgs, ... }:
 
 with lib;
 let
-  cfg = config.docker-containers;
+  cfg = config.virtualisation.oci-containers;
+  proxy_env = config.networking.proxy.envVars;
 
-  dockerContainer =
+  defaultBackend = options.virtualisation.oci-containers.backend.default;
+
+  containerOptions =
     { ... }: {
 
       options = {
 
         image = mkOption {
           type = with types; str;
-          description = "Docker image to run.";
+          description = "OCI image to run.";
           example = "library/hello-world";
         };
 
@@ -58,18 +61,19 @@ let
 
         log-driver = mkOption {
           type = types.str;
-          default = "none";
+          default = "journald";
           description = ''
             Logging driver for the container.  The default of
-            <literal>"none"</literal> means that the container's logs will be
-            handled as part of the systemd unit.  Setting this to
-            <literal>"journald"</literal> will result in duplicate logging, but
-            the container's logs will be visible to the <command>docker
-            logs</command> command.
-
-            For more details and a full list of logging drivers, refer to the
-            <link xlink:href="https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver">
-            Docker engine documentation</link>
+            <literal>"journald"</literal> means that the container's logs will be
+            handled as part of the systemd unit.
+
+            For more details and a full list of logging drivers, refer to respective backends documentation.
+
+            For Docker:
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver">Docker engine documentation</link>
+
+            For Podman:
+            Refer to the docker-run(1) man page.
           '';
         };
 
@@ -172,10 +176,10 @@ let
           description = ''
             Define which other containers this one depends on. They will be added to both After and Requires for the unit.
 
-            Use the same name as the attribute under <literal>services.docker-containers</literal>.
+            Use the same name as the attribute under <literal>virtualisation.oci-containers</literal>.
           '';
           example = literalExample ''
-            services.docker-containers = {
+            virtualisation.oci-containers = {
               node1 = {};
               node2 = {
                 dependsOn = [ "node1" ];
@@ -184,10 +188,10 @@ let
           '';
         };
 
-        extraDockerOptions = mkOption {
+        extraOptions = mkOption {
           type = with types; listOf str;
           default = [];
-          description = "Extra options for <command>docker run</command>.";
+          description = "Extra options for <command>${defaultBackend} run</command>.";
           example = literalExample ''
             ["--network=host"]
           '';
@@ -205,24 +209,31 @@ let
     };
 
   mkService = name: container: let
-    mkAfter = map (x: "docker-${x}.service") container.dependsOn;
-  in rec {
+    dependsOn = map (x: "${cfg.backend}-${x}.service") container.dependsOn;
+  in {
     wantedBy = [] ++ optional (container.autoStart) "multi-user.target";
-    after = [ "docker.service" "docker.socket" ] ++ mkAfter;
-    requires = after;
-    path = [ pkgs.docker ];
+    after = lib.optionals (cfg.backend == "docker") [ "docker.service" "docker.socket" ] ++ dependsOn;
+    requires = dependsOn;
+    environment = proxy_env;
+
+    path =
+      if cfg.backend == "docker" then [ pkgs.docker ]
+      else if cfg.backend == "podman" then [ config.virtualisation.podman.package ]
+      else throw "Unhandled backend: ${cfg.backend}";
 
     preStart = ''
-      docker rm -f ${name} || true
+      ${cfg.backend} rm -f ${name} || true
       ${optionalString (container.imageFile != null) ''
-        docker load -i ${container.imageFile}
+        ${cfg.backend} load -i ${container.imageFile}
         ''}
       '';
-    postStop = "docker rm -f ${name} || true";
-        
+    postStop = "${cfg.backend} rm -f ${name} || true";
+
     serviceConfig = {
+      StandardOutput = "null";
+      StandardError = "null";
       ExecStart = concatStringsSep " \\\n  " ([
-        "${pkgs.docker}/bin/docker run"
+        "${config.system.path}/bin/${cfg.backend} run"
         "--rm"
         "--name=${name}"
         "--log-driver=${container.log-driver}"
@@ -233,12 +244,12 @@ let
         ++ optional (container.user != null) "-u ${escapeShellArg container.user}"
         ++ map (v: "-v ${escapeShellArg v}") container.volumes
         ++ optional (container.workdir != null) "-w ${escapeShellArg container.workdir}"
-        ++ map escapeShellArg container.extraDockerOptions
+        ++ map escapeShellArg container.extraOptions
         ++ [container.image]
         ++ map escapeShellArg container.cmd
       );
 
-      ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || docker stop ${name}"'';
+      ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || ${cfg.backend} stop ${name}"'';
 
       ### There is no generalized way of supporting `reload` for docker
       ### containers. Some containers may respond well to SIGHUP sent to their
@@ -263,19 +274,50 @@ let
   };
 
 in {
+  imports = [
+    (
+      lib.mkChangedOptionModule
+      [ "docker-containers"  ]
+      [ "virtualisation" "oci-containers" ]
+      (oldcfg: {
+        backend = "docker";
+        containers = lib.mapAttrs (n: v: builtins.removeAttrs (v // {
+          extraOptions = v.extraDockerOptions or [];
+        }) [ "extraDockerOptions" ]) oldcfg.docker-containers;
+      })
+    )
+  ];
+
+  options.virtualisation.oci-containers = {
+
+    backend = mkOption {
+      type = types.enum [ "podman" "docker" ];
+      default =
+        # TODO: Once https://github.com/NixOS/nixpkgs/issues/77925 is resolved default to podman
+        # if versionAtLeast config.system.stateVersion "20.09" then "podman"
+        # else "docker";
+        "docker";
+      description = "The underlying Docker implementation to use.";
+    };
 
-  options.docker-containers = mkOption {
-    default = {};
-    type = types.attrsOf (types.submodule dockerContainer);
-    description = "Docker containers to run as systemd services.";
-  };
-
-  config = mkIf (cfg != {}) {
-
-    systemd.services = mapAttrs' (n: v: nameValuePair "docker-${n}" (mkService n v)) cfg;
-
-    virtualisation.docker.enable = true;
+    containers = mkOption {
+      default = {};
+      type = types.attrsOf (types.submodule containerOptions);
+      description = "OCI (Docker) containers to run as systemd services.";
+    };
 
   };
 
+  config = lib.mkIf (cfg.containers != {}) (lib.mkMerge [
+    {
+      systemd.services = mapAttrs' (n: v: nameValuePair "${cfg.backend}-${n}" (mkService n v)) cfg.containers;
+    }
+    (lib.mkIf (cfg.backend == "podman") {
+      virtualisation.podman.enable = true;
+    })
+    (lib.mkIf (cfg.backend == "docker") {
+      virtualisation.docker.enable = true;
+    })
+  ]);
+
 }
diff --git a/nixos/modules/virtualisation/podman.nix b/nixos/modules/virtualisation/podman.nix
index 1dc79272ccb6..652850bf5006 100644
--- a/nixos/modules/virtualisation/podman.nix
+++ b/nixos/modules/virtualisation/podman.nix
@@ -8,13 +8,11 @@ let
 
   # Provides a fake "docker" binary mapping to podman
   dockerCompat = pkgs.runCommandNoCC "${podmanPackage.pname}-docker-compat-${podmanPackage.version}" {
-    outputs = [ "out" "bin" "man" ];
+    outputs = [ "out" "man" ];
     inherit (podmanPackage) meta;
   } ''
-    mkdir $out
-
-    mkdir -p $bin/bin
-    ln -s ${podmanPackage.bin}/bin/podman $bin/bin/docker
+    mkdir -p $out/bin
+    ln -s ${podmanPackage}/bin/podman $out/bin/docker
 
     mkdir -p $man/share/man/man1
     for f in ${podmanPackage.man}/share/man/man1/*; do
@@ -88,11 +86,21 @@ in
       };
     };
 
+    package = lib.mkOption {
+      type = types.package;
+      default = podmanPackage;
+      internal = true;
+      description = ''
+        The final Podman package (including extra packages).
+      '';
+    };
+
+
   };
 
   config = lib.mkIf cfg.enable {
 
-    environment.systemPackages = [ podmanPackage ]
+    environment.systemPackages = [ cfg.package ]
       ++ lib.optional cfg.dockerCompat dockerCompat;
 
     environment.etc."containers/libpod.conf".text = ''