about summary refs log tree commit diff
path: root/nixpkgs/nixos/modules/hardware
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/modules/hardware')
-rw-r--r--nixpkgs/nixos/modules/hardware/graphics.nix126
-rw-r--r--nixpkgs/nixos/modules/hardware/opengl.nix161
-rw-r--r--nixpkgs/nixos/modules/hardware/printers.nix2
-rw-r--r--nixpkgs/nixos/modules/hardware/video/amdgpu-pro.nix69
-rw-r--r--nixpkgs/nixos/modules/hardware/video/nvidia.nix882
-rw-r--r--nixpkgs/nixos/modules/hardware/video/virtualbox.nix7
-rw-r--r--nixpkgs/nixos/modules/hardware/xone.nix2
7 files changed, 584 insertions, 665 deletions
diff --git a/nixpkgs/nixos/modules/hardware/graphics.nix b/nixpkgs/nixos/modules/hardware/graphics.nix
new file mode 100644
index 000000000000..99c122f75c2a
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/graphics.nix
@@ -0,0 +1,126 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.hardware.graphics;
+
+  driversEnv = pkgs.buildEnv {
+    name = "graphics-drivers";
+    paths = [ cfg.package ] ++ cfg.extraPackages;
+  };
+
+  driversEnv32 = pkgs.buildEnv {
+    name = "graphics-drivers-32bit";
+    paths = [ cfg.package32 ] ++ cfg.extraPackages32;
+  };
+in
+{
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "xserver" "vaapiDrivers" ] [ "hardware" "opengl" "extraPackages" ])
+    (lib.mkRemovedOptionModule [ "hardware" "opengl" "s3tcSupport" ] "S3TC support is now always enabled in Mesa.")
+    (lib.mkRemovedOptionModule [ "hardware" "opengl" "driSupport"] "The setting can be removed.")
+
+    (lib.mkRenamedOptionModule [ "hardware" "opengl" "enable"] [ "hardware" "graphics" "enable" ])
+    (lib.mkRenamedOptionModule [ "hardware" "opengl" "driSupport32Bit"] [ "hardware" "graphics" "enable32Bit" ])
+    (lib.mkRenamedOptionModule [ "hardware" "opengl" "package"] [ "hardware" "graphics" "package" ])
+    (lib.mkRenamedOptionModule [ "hardware" "opengl" "package32"] [ "hardware" "graphics" "package32" ])
+    (lib.mkRenamedOptionModule [ "hardware" "opengl" "extraPackages"] [ "hardware" "graphics" "extraPackages" ])
+    (lib.mkRenamedOptionModule [ "hardware" "opengl" "extraPackages32"] [ "hardware" "graphics" "extraPackages32" ])
+  ];
+
+  options.hardware.graphics = {
+    enable = lib.mkOption {
+      description = ''
+        Whether to enable hardware accelerated graphics drivers.
+
+        This is required to allow most graphical applications and
+        environments to use hardware rendering, video encode/decode
+        acceleration, etc.
+
+        This option should be enabled by default by the corresponding modules,
+        so you do not usually have to set it yourself.
+      '';
+      type = lib.types.bool;
+      default = false;
+    };
+
+    enable32Bit = lib.mkOption {
+      description = ''
+        On 64-bit systems, whether to also install 32-bit drivers for
+        32-bit applications (such as Wine).
+      '';
+      type = lib.types.bool;
+      default = false;
+    };
+
+    package = lib.mkOption {
+      description = ''
+        The package that provides the default driver set.
+      '';
+      type = lib.types.package;
+      internal = true;
+    };
+
+    package32 = lib.mkOption {
+      description = ''
+        The package that provides the 32-bit driver set. Used when {option}`enable32Bit` is enabled.
+        set.
+      '';
+      type = lib.types.package;
+      internal = true;
+    };
+
+    extraPackages = lib.mkOption {
+      description = ''
+        Additional packages to add to the default graphics driver lookup path.
+        This can be used to add OpenCL drivers, VA-API/VDPAU drivers, etc.
+
+        ::: {.note}
+        intel-media-driver supports hardware Broadwell (2014) or newer. Older hardware should use the mostly unmaintained intel-vaapi-driver driver.
+        :::
+      '';
+      type = lib.types.listOf lib.types.package;
+      default = [];
+      example = lib.literalExpression "with pkgs; [ intel-media-driver intel-ocl intel-vaapi-driver ]";
+    };
+
+    extraPackages32 = lib.mkOption {
+      description = ''
+        Additional packages to add to 32-bit graphics driver lookup path on 64-bit systems.
+        Used when {option}`enable32Bit` is set. This can be used to add OpenCL drivers, VA-API/VDPAU drivers, etc.
+
+        ::: {.note}
+        intel-media-driver supports hardware Broadwell (2014) or newer. Older hardware should use the mostly unmaintained intel-vaapi-driver driver.
+        :::
+      '';
+      type = lib.types.listOf lib.types.package;
+      default = [];
+      example = lib.literalExpression "with pkgs.pkgsi686Linux; [ intel-media-driver intel-vaapi-driver ]";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.enable32Bit -> pkgs.stdenv.isx86_64;
+        message = "`hardware.graphics.enable32Bit` only makes sense on a 64-bit system.";
+      }
+      {
+        assertion = cfg.enable32Bit -> (config.boot.kernelPackages.kernel.features.ia32Emulation or false);
+        message = "`hardware.graphics.enable32Bit` requires a kernel that supports 32-bit emulation";
+      }
+    ];
+
+    systemd.tmpfiles.settings.graphics-driver = {
+      "/run/opengl-driver"."L+".argument = toString driversEnv;
+      "/run/opengl-driver-32" =
+        if pkgs.stdenv.isi686 then
+          { "L+".argument = "opengl-driver"; }
+        else if cfg.enable32Bit then
+          { "L+".argument = toString driversEnv32; }
+        else
+          { "r" = {}; };
+    };
+
+    hardware.graphics.package = lib.mkDefault pkgs.mesa.drivers;
+    hardware.graphics.package32 = lib.mkDefault pkgs.pkgsi686Linux.mesa.drivers;
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/opengl.nix b/nixpkgs/nixos/modules/hardware/opengl.nix
deleted file mode 100644
index 25324fd8b0af..000000000000
--- a/nixpkgs/nixos/modules/hardware/opengl.nix
+++ /dev/null
@@ -1,161 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-
-  cfg = config.hardware.opengl;
-
-  kernelPackages = config.boot.kernelPackages;
-
-  videoDrivers = config.services.xserver.videoDrivers;
-
-  package = pkgs.buildEnv {
-    name = "opengl-drivers";
-    paths = [ cfg.package ] ++ cfg.extraPackages;
-  };
-
-  package32 = pkgs.buildEnv {
-    name = "opengl-drivers-32bit";
-    paths = [ cfg.package32 ] ++ cfg.extraPackages32;
-  };
-
-in
-
-{
-
-  imports = [
-    (mkRenamedOptionModule [ "services" "xserver" "vaapiDrivers" ] [ "hardware" "opengl" "extraPackages" ])
-    (mkRemovedOptionModule [ "hardware" "opengl" "s3tcSupport" ] "S3TC support is now always enabled in Mesa.")
-  ];
-
-  options = {
-
-    hardware.opengl = {
-      enable = mkOption {
-        description = ''
-          Whether to enable OpenGL drivers. This is needed to enable
-          OpenGL support in X11 systems, as well as for Wayland compositors
-          like sway and Weston. It is enabled by default
-          by the corresponding modules, so you do not usually have to
-          set it yourself, only if there is no module for your wayland
-          compositor of choice. See services.xserver.enable and
-          programs.sway.enable.
-        '';
-        type = types.bool;
-        default = false;
-      };
-
-      driSupport = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Whether to enable accelerated OpenGL rendering through the
-          Direct Rendering Interface (DRI).
-        '';
-      };
-
-      driSupport32Bit = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          On 64-bit systems, whether to support Direct Rendering for
-          32-bit applications (such as Wine).  This is currently only
-          supported for the `nvidia` as well as
-          `Mesa`.
-        '';
-      };
-
-      package = mkOption {
-        type = types.package;
-        internal = true;
-        description = ''
-          The package that provides the OpenGL implementation.
-        '';
-      };
-
-      package32 = mkOption {
-        type = types.package;
-        internal = true;
-        description = ''
-          The package that provides the 32-bit OpenGL implementation on
-          64-bit systems. Used when {option}`driSupport32Bit` is
-          set.
-        '';
-      };
-
-      extraPackages = mkOption {
-        type = types.listOf types.package;
-        default = [];
-        example = literalExpression "with pkgs; [ intel-media-driver intel-ocl intel-vaapi-driver ]";
-        description = ''
-          Additional packages to add to OpenGL drivers.
-          This can be used to add OpenCL drivers, VA-API/VDPAU drivers etc.
-
-          ::: {.note}
-          intel-media-driver supports hardware Broadwell (2014) or newer. Older hardware should use the mostly unmaintained intel-vaapi-driver driver.
-          :::
-        '';
-      };
-
-      extraPackages32 = mkOption {
-        type = types.listOf types.package;
-        default = [];
-        example = literalExpression "with pkgs.pkgsi686Linux; [ intel-media-driver intel-vaapi-driver ]";
-        description = ''
-          Additional packages to add to 32-bit OpenGL drivers on 64-bit systems.
-          Used when {option}`driSupport32Bit` is set. This can be used to add OpenCL drivers, VA-API/VDPAU drivers etc.
-
-          ::: {.note}
-          intel-media-driver supports hardware Broadwell (2014) or newer. Older hardware should use the mostly unmaintained intel-vaapi-driver driver.
-          :::
-        '';
-      };
-
-      setLdLibraryPath = mkOption {
-        type = types.bool;
-        internal = true;
-        default = false;
-        description = ''
-          Whether the `LD_LIBRARY_PATH` environment variable
-          should be set to the locations of driver libraries. Drivers which
-          rely on overriding libraries should set this to true. Drivers which
-          support `libglvnd` and other dispatch libraries
-          instead of overriding libraries should not set this.
-        '';
-      };
-    };
-
-  };
-
-  config = mkIf cfg.enable {
-    assertions = [
-      { assertion = cfg.driSupport32Bit -> pkgs.stdenv.isx86_64;
-        message = "Option driSupport32Bit only makes sense on a 64-bit system.";
-      }
-      { assertion = cfg.driSupport32Bit -> (config.boot.kernelPackages.kernel.features.ia32Emulation or false);
-        message = "Option driSupport32Bit requires a kernel that supports 32bit emulation";
-      }
-    ];
-
-    systemd.tmpfiles.rules = [
-      "L+ /run/opengl-driver - - - - ${package}"
-      (
-        if pkgs.stdenv.isi686 then
-          "L+ /run/opengl-driver-32 - - - - opengl-driver"
-        else if cfg.driSupport32Bit then
-          "L+ /run/opengl-driver-32 - - - - ${package32}"
-        else
-          "r /run/opengl-driver-32"
-      )
-    ];
-
-    environment.sessionVariables.LD_LIBRARY_PATH = mkIf cfg.setLdLibraryPath
-      ([ "/run/opengl-driver/lib" ] ++ optional cfg.driSupport32Bit "/run/opengl-driver-32/lib");
-
-    hardware.opengl.package = mkDefault pkgs.mesa.drivers;
-    hardware.opengl.package32 = mkDefault pkgs.pkgsi686Linux.mesa.drivers;
-
-    boot.extraModulePackages = optional (elem "virtualbox" videoDrivers) kernelPackages.virtualboxGuestAdditions;
-  };
-}
diff --git a/nixpkgs/nixos/modules/hardware/printers.nix b/nixpkgs/nixos/modules/hardware/printers.nix
index de2f84d4831b..ace900d88586 100644
--- a/nixpkgs/nixos/modules/hardware/printers.nix
+++ b/nixpkgs/nixos/modules/hardware/printers.nix
@@ -13,7 +13,7 @@ let
     } // optionalAttrs (p.description != null) {
       D = p.description;
     } // optionalAttrs (p.ppdOptions != {}) {
-      o = mapAttrsToList (name: value: "'${name}'='${value}'") p.ppdOptions;
+      o = mapAttrsToList (name: value: "${name}=${value}") p.ppdOptions;
     });
   in ''
     ${pkgs.cups}/bin/lpadmin ${args} -E
diff --git a/nixpkgs/nixos/modules/hardware/video/amdgpu-pro.nix b/nixpkgs/nixos/modules/hardware/video/amdgpu-pro.nix
deleted file mode 100644
index 2a86280eec8c..000000000000
--- a/nixpkgs/nixos/modules/hardware/video/amdgpu-pro.nix
+++ /dev/null
@@ -1,69 +0,0 @@
-# This module provides the proprietary AMDGPU-PRO drivers.
-
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-
-  drivers = config.services.xserver.videoDrivers;
-
-  enabled = elem "amdgpu-pro" drivers;
-
-  package = config.boot.kernelPackages.amdgpu-pro;
-  package32 = pkgs.pkgsi686Linux.linuxPackages.amdgpu-pro.override { kernel = null; };
-
-  opengl = config.hardware.opengl;
-
-in
-
-{
-
-  config = mkIf enabled {
-    services.xserver.drivers = singleton
-      { name = "amdgpu"; modules = [ package ]; display = true; };
-
-    hardware.opengl.package = package;
-    hardware.opengl.package32 = package32;
-    hardware.opengl.setLdLibraryPath = true;
-
-    boot.extraModulePackages = [ package.kmod ];
-
-    boot.kernelPackages = pkgs.linuxKernel.packagesFor
-      (pkgs.linuxKernel.kernels.linux_5_10.override {
-        structuredExtraConfig = {
-          DEVICE_PRIVATE = kernel.yes;
-          KALLSYMS_ALL = kernel.yes;
-        };
-      });
-
-    hardware.firmware = [ package.fw ];
-
-    systemd.tmpfiles.settings.amdgpu-pro = {
-      "/run/amdgpu"."L+".argument = "${package}/opt/amdgpu";
-      "/run/amdgpu-pro"."L+".argument = "${package}/opt/amdgpu-pro";
-    };
-
-    system.requiredKernelConfig = with config.lib.kernelConfig; [
-      (isYes "DEVICE_PRIVATE")
-      (isYes "KALLSYMS_ALL")
-    ];
-
-    boot.initrd.extraUdevRulesCommands = mkIf (!config.boot.initrd.systemd.enable) ''
-      cp -v ${package}/etc/udev/rules.d/*.rules $out/
-    '';
-    boot.initrd.services.udev.packages = [ package ];
-
-    environment.systemPackages =
-      [ package.vulkan ] ++
-      # this isn't really DRI, but we'll reuse this option for now
-      optional config.hardware.opengl.driSupport32Bit package32.vulkan;
-
-    environment.etc = {
-      "modprobe.d/blacklist-radeon.conf".source = package + "/etc/modprobe.d/blacklist-radeon.conf";
-      amd.source = package + "/etc/amd";
-    };
-
-  };
-
-}
diff --git a/nixpkgs/nixos/modules/hardware/video/nvidia.nix b/nixpkgs/nixos/modules/hardware/video/nvidia.nix
index 37d8e53a2e04..0274dfcaa70f 100644
--- a/nixpkgs/nixos/modules/hardware/video/nvidia.nix
+++ b/nixpkgs/nixos/modules/hardware/video/nvidia.nix
@@ -3,12 +3,10 @@
   lib,
   pkgs,
   ...
-}: let
+}:
+let
   nvidiaEnabled = (lib.elem "nvidia" config.services.xserver.videoDrivers);
-  nvidia_x11 =
-    if nvidiaEnabled || cfg.datacenter.enable
-    then cfg.package
-    else null;
+  nvidia_x11 = if nvidiaEnabled || cfg.datacenter.enable then cfg.package else null;
 
   cfg = config.hardware.nvidia;
 
@@ -19,8 +17,9 @@
   primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
   busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
   ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
-  settingsFormat = pkgs.formats.keyValue {};
-in {
+  settingsFormat = pkgs.formats.keyValue { };
+in
+{
   options = {
     hardware.nvidia = {
       datacenter.enable = lib.mkEnableOption ''
@@ -29,50 +28,50 @@ in {
       datacenter.settings = lib.mkOption {
         type = settingsFormat.type;
         default = {
-          LOG_LEVEL=4;
-          LOG_FILE_NAME="/var/log/fabricmanager.log";
-          LOG_APPEND_TO_LOG=1;
-          LOG_FILE_MAX_SIZE=1024;
-          LOG_USE_SYSLOG=0;
-          DAEMONIZE=1;
-          BIND_INTERFACE_IP="127.0.0.1";
-          STARTING_TCP_PORT=16000;
-          FABRIC_MODE=0;
-          FABRIC_MODE_RESTART=0;
-          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
-          FM_CMD_BIND_INTERFACE="127.0.0.1";
-          FM_CMD_PORT_NUMBER=6666;
-          FM_STAY_RESIDENT_ON_FAILURES=0;
-          ACCESS_LINK_FAILURE_MODE=0;
-          TRUNK_LINK_FAILURE_MODE=0;
-          NVSWITCH_FAILURE_MODE=0;
-          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
-          TOPOLOGY_FILE_PATH="${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
-          DATABASE_PATH="${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
+          LOG_LEVEL = 4;
+          LOG_FILE_NAME = "/var/log/fabricmanager.log";
+          LOG_APPEND_TO_LOG = 1;
+          LOG_FILE_MAX_SIZE = 1024;
+          LOG_USE_SYSLOG = 0;
+          DAEMONIZE = 1;
+          BIND_INTERFACE_IP = "127.0.0.1";
+          STARTING_TCP_PORT = 16000;
+          FABRIC_MODE = 0;
+          FABRIC_MODE_RESTART = 0;
+          STATE_FILE_NAME = "/var/tmp/fabricmanager.state";
+          FM_CMD_BIND_INTERFACE = "127.0.0.1";
+          FM_CMD_PORT_NUMBER = 6666;
+          FM_STAY_RESIDENT_ON_FAILURES = 0;
+          ACCESS_LINK_FAILURE_MODE = 0;
+          TRUNK_LINK_FAILURE_MODE = 0;
+          NVSWITCH_FAILURE_MODE = 0;
+          ABORT_CUDA_JOBS_ON_FM_EXIT = 1;
+          TOPOLOGY_FILE_PATH = "${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
+          DATABASE_PATH = "${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
         };
         defaultText = lib.literalExpression ''
-        {
-          LOG_LEVEL=4;
-          LOG_FILE_NAME="/var/log/fabricmanager.log";
-          LOG_APPEND_TO_LOG=1;
-          LOG_FILE_MAX_SIZE=1024;
-          LOG_USE_SYSLOG=0;
-          DAEMONIZE=1;
-          BIND_INTERFACE_IP="127.0.0.1";
-          STARTING_TCP_PORT=16000;
-          FABRIC_MODE=0;
-          FABRIC_MODE_RESTART=0;
-          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
-          FM_CMD_BIND_INTERFACE="127.0.0.1";
-          FM_CMD_PORT_NUMBER=6666;
-          FM_STAY_RESIDENT_ON_FAILURES=0;
-          ACCESS_LINK_FAILURE_MODE=0;
-          TRUNK_LINK_FAILURE_MODE=0;
-          NVSWITCH_FAILURE_MODE=0;
-          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
-          TOPOLOGY_FILE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
-          DATABASE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
-        }
+          {
+            LOG_LEVEL=4;
+            LOG_FILE_NAME="/var/log/fabricmanager.log";
+            LOG_APPEND_TO_LOG=1;
+            LOG_FILE_MAX_SIZE=1024;
+            LOG_USE_SYSLOG=0;
+            DAEMONIZE=1;
+            BIND_INTERFACE_IP="127.0.0.1";
+            STARTING_TCP_PORT=16000;
+            FABRIC_MODE=0;
+            FABRIC_MODE_RESTART=0;
+            STATE_FILE_NAME="/var/tmp/fabricmanager.state";
+            FM_CMD_BIND_INTERFACE="127.0.0.1";
+            FM_CMD_PORT_NUMBER=6666;
+            FM_STAY_RESIDENT_ON_FAILURES=0;
+            ACCESS_LINK_FAILURE_MODE=0;
+            TRUNK_LINK_FAILURE_MODE=0;
+            NVSWITCH_FAILURE_MODE=0;
+            ABORT_CUDA_JOBS_ON_FM_EXIT=1;
+            TOPOLOGY_FILE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
+            DATABASE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
+          }
         '';
         description = ''
           Additional configuration options for fabricmanager.
@@ -207,11 +206,26 @@ in {
         option is supported is used
       '';
 
+      prime.reverseSync.setupCommands.enable =
+        (lib.mkEnableOption ''
+          configure the display manager to be able to use the outputs
+          attached to the NVIDIA GPU.
+          Disable in order to configure the NVIDIA GPU outputs manually using xrandr.
+          Note that this configuration will only be successful when a display manager
+          for which the {option}`services.xserver.displayManager.setupCommands`
+          option is supported is used
+        '')
+        // {
+          default = true;
+        };
+
       nvidiaSettings =
         (lib.mkEnableOption ''
           nvidia-settings, NVIDIA's GUI configuration tool
         '')
-        // {default = true;};
+        // {
+          default = true;
+        };
 
       nvidiaPersistenced = lib.mkEnableOption ''
         nvidia-persistenced a update for NVIDIA GPU headless mode, i.e.
@@ -226,7 +240,8 @@ in {
       '';
 
       package = lib.mkOption {
-        default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
+        default =
+          config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
         defaultText = lib.literalExpression ''
           config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
         '';
@@ -242,403 +257,404 @@ in {
     };
   };
 
-  config = let
-    igpuDriver =
-      if pCfg.intelBusId != ""
-      then "modesetting"
-      else "amdgpu";
-    igpuBusId =
-      if pCfg.intelBusId != ""
-      then pCfg.intelBusId
-      else pCfg.amdgpuBusId;
-  in
-    lib.mkIf (nvidia_x11 != null) (lib.mkMerge [
-      # Common
-      ({
-        assertions = [
-          {
-            assertion = !(nvidiaEnabled && cfg.datacenter.enable);
-            message = "You cannot configure both X11 and Data Center drivers at the same time.";
-          }
-        ];
-        boot = {
-          blacklistedKernelModules = ["nouveau" "nvidiafb"];
-
-          # Don't add `nvidia-uvm` to `kernelModules`, because we want
-          # `nvidia-uvm` be loaded only after `udev` rules for `nvidia` kernel
-          # module are applied.
-          #
-          # Instead, we use `softdep` to lazily load `nvidia-uvm` kernel module
-          # after `nvidia` kernel module is loaded and `udev` rules are applied.
-          extraModprobeConfig = ''
-            softdep nvidia post: nvidia-uvm
-          '';
-        };
-        systemd.tmpfiles.rules =
-          lib.optional config.virtualisation.docker.enableNvidia
-            "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
-        services.udev.extraRules =
-        ''
-          # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
-          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c 195 255'"
-          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c 195 $${i}; done'"
-          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c 195 254'"
-          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
-          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
-        '';
-        hardware.opengl = {
-          extraPackages = [
-            nvidia_x11.out
+  config =
+    let
+      igpuDriver = if pCfg.intelBusId != "" then "modesetting" else "amdgpu";
+      igpuBusId = if pCfg.intelBusId != "" then pCfg.intelBusId else pCfg.amdgpuBusId;
+    in
+    lib.mkIf (nvidia_x11 != null) (
+      lib.mkMerge [
+        # Common
+        ({
+          assertions = [
+            {
+              assertion = !(nvidiaEnabled && cfg.datacenter.enable);
+              message = "You cannot configure both X11 and Data Center drivers at the same time.";
+            }
           ];
-          extraPackages32 = [
-            nvidia_x11.lib32
+          boot = {
+            blacklistedKernelModules = [
+              "nouveau"
+              "nvidiafb"
+            ];
+
+            # Don't add `nvidia-uvm` to `kernelModules`, because we want
+            # `nvidia-uvm` be loaded only after `udev` rules for `nvidia` kernel
+            # module are applied.
+            #
+            # Instead, we use `softdep` to lazily load `nvidia-uvm` kernel module
+            # after `nvidia` kernel module is loaded and `udev` rules are applied.
+            extraModprobeConfig = ''
+              softdep nvidia post: nvidia-uvm
+            '';
+          };
+          systemd.tmpfiles.rules = lib.mkIf config.virtualisation.docker.enableNvidia [ "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin" ];
+          services.udev.extraRules = ''
+            # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
+            KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c 195 255'"
+            KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c 195 $${i}; done'"
+            KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c 195 254'"
+            KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
+            KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
+          '';
+          hardware.graphics = {
+            extraPackages = [ nvidia_x11.out ];
+            extraPackages32 = [ nvidia_x11.lib32 ];
+          };
+          environment.systemPackages = [ nvidia_x11.bin ];
+        })
+
+        # X11
+        (lib.mkIf nvidiaEnabled {
+          assertions = [
+            {
+              assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
+              message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
+            }
+
+            {
+              assertion = offloadCfg.enableOffloadCmd -> offloadCfg.enable || reverseSyncCfg.enable;
+              message = "Offload command requires offloading or reverse prime sync to be enabled.";
+            }
+
+            {
+              assertion =
+                primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
+              message = "When NVIDIA PRIME is enabled, the GPU bus IDs must be configured.";
+            }
+
+            {
+              assertion = offloadCfg.enable -> lib.versionAtLeast nvidia_x11.version "435.21";
+              message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
+            }
+
+            {
+              assertion =
+                (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> lib.versionAtLeast nvidia_x11.version "470.0";
+              message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
+            }
+
+            {
+              assertion = !(syncCfg.enable && offloadCfg.enable);
+              message = "PRIME Sync and Offload cannot be both enabled";
+            }
+
+            {
+              assertion = !(syncCfg.enable && reverseSyncCfg.enable);
+              message = "PRIME Sync and PRIME Reverse Sync cannot be both enabled";
+            }
+
+            {
+              assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
+              message = "Sync precludes powering down the NVIDIA GPU.";
+            }
+
+            {
+              assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
+              message = "Fine-grained power management requires offload to be enabled.";
+            }
+
+            {
+              assertion = cfg.powerManagement.enable -> lib.versionAtLeast nvidia_x11.version "430.09";
+              message = "Required files for driver based power management only exist on versions >= 430.09.";
+            }
+
+            {
+              assertion = cfg.open -> (cfg.package ? open && cfg.package ? firmware);
+              message = "This version of NVIDIA driver does not provide a corresponding opensource kernel driver";
+            }
+
+            {
+              assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
+              message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
+            }
           ];
-        };
-        environment.systemPackages = [
-          nvidia_x11.bin
-        ];
-      })
-      # X11
-      (lib.mkIf nvidiaEnabled {
-        assertions = [
-        {
-          assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
-          message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
-        }
-
-        {
-          assertion = offloadCfg.enableOffloadCmd -> offloadCfg.enable || reverseSyncCfg.enable;
-          message = "Offload command requires offloading or reverse prime sync to be enabled.";
-        }
-
-        {
-          assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
-          message = "When NVIDIA PRIME is enabled, the GPU bus IDs must be configured.";
-        }
-
-        {
-          assertion = offloadCfg.enable -> lib.versionAtLeast nvidia_x11.version "435.21";
-          message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
-        }
-
-        {
-          assertion = (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> lib.versionAtLeast nvidia_x11.version "470.0";
-          message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
-        }
-
-        {
-          assertion = !(syncCfg.enable && offloadCfg.enable);
-          message = "PRIME Sync and Offload cannot be both enabled";
-        }
-
-        {
-          assertion = !(syncCfg.enable && reverseSyncCfg.enable);
-          message = "PRIME Sync and PRIME Reverse Sync cannot be both enabled";
-        }
-
-        {
-          assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
-          message = "Sync precludes powering down the NVIDIA GPU.";
-        }
-
-        {
-          assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
-          message = "Fine-grained power management requires offload to be enabled.";
-        }
-
-        {
-          assertion = cfg.powerManagement.enable -> lib.versionAtLeast nvidia_x11.version "430.09";
-          message = "Required files for driver based power management only exist on versions >= 430.09.";
-        }
-
-        {
-          assertion = cfg.open -> (cfg.package ? open && cfg.package ? firmware);
-          message = "This version of NVIDIA driver does not provide a corresponding opensource kernel driver";
-        }
-
-        {
-          assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
-          message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
-        }];
-
-        # If Optimus/PRIME is enabled, we:
-        # - Specify the configured NVIDIA GPU bus ID in the Device section for the
-        #   "nvidia" driver.
-        # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
-        #   "nvidia" driver, in order to allow the X server to start without any outputs.
-        # - Add a separate Device section for the Intel GPU, using the "modesetting"
-        #   driver and with the configured BusID.
-        # - OR add a separate Device section for the AMD APU, using the "amdgpu"
-        #   driver and with the configures BusID.
-        # - Reference that Device section from the ServerLayout section as an inactive
-        #   device.
-        # - Configure the display manager to run specific `xrandr` commands which will
-        #   configure/enable displays connected to the Intel iGPU / AMD APU.
-
-        # reverse sync implies offloading
-        hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
-
-        services.xserver.drivers =
-          lib.optional primeEnabled {
-            name = igpuDriver;
-            display = offloadCfg.enable;
-            modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
-            deviceSection =
-              ''
-                BusID "${igpuBusId}"
-              ''
-              + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
-                Option "AccelMethod" "none"
-              '';
-          }
-          ++ lib.singleton {
-            name = "nvidia";
-            modules = [nvidia_x11.bin];
-            display = !offloadCfg.enable;
-            deviceSection =
-              ''
-                Option "SidebandSocketPath" "/run/nvidia-xdriver/"
-              '' +
-              lib.optionalString primeEnabled
-              ''
-                BusID "${pCfg.nvidiaBusId}"
-              ''
-              + lib.optionalString pCfg.allowExternalGpu ''
-                Option "AllowExternalGpus"
-              '';
-            screenSection =
-              ''
-                Option "RandRRotation" "on"
-              ''
-              + lib.optionalString syncCfg.enable ''
-                Option "AllowEmptyInitialConfiguration"
+
+          # If Optimus/PRIME is enabled, we:
+          # - Specify the configured NVIDIA GPU bus ID in the Device section for the
+          #   "nvidia" driver.
+          # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
+          #   "nvidia" driver, in order to allow the X server to start without any outputs.
+          # - Add a separate Device section for the Intel GPU, using the "modesetting"
+          #   driver and with the configured BusID.
+          # - OR add a separate Device section for the AMD APU, using the "amdgpu"
+          #   driver and with the configures BusID.
+          # - Reference that Device section from the ServerLayout section as an inactive
+          #   device.
+          # - Configure the display manager to run specific `xrandr` commands which will
+          #   configure/enable displays connected to the Intel iGPU / AMD APU.
+
+          # reverse sync implies offloading
+          hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
+
+          services.xserver.drivers =
+            lib.optional primeEnabled {
+              name = igpuDriver;
+              display = offloadCfg.enable;
+              modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
+              deviceSection =
+                ''
+                  BusID "${igpuBusId}"
+                ''
+                + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
+                  Option "AccelMethod" "none"
+                '';
+            }
+            ++ lib.singleton {
+              name = "nvidia";
+              modules = [ nvidia_x11.bin ];
+              display = !offloadCfg.enable;
+              deviceSection =
+                ''
+                  Option "SidebandSocketPath" "/run/nvidia-xdriver/"
+                ''
+                + lib.optionalString primeEnabled ''
+                  BusID "${pCfg.nvidiaBusId}"
+                ''
+                + lib.optionalString pCfg.allowExternalGpu ''
+                  Option "AllowExternalGpus"
+                '';
+              screenSection =
+                ''
+                  Option "RandRRotation" "on"
+                ''
+                + lib.optionalString syncCfg.enable ''
+                  Option "AllowEmptyInitialConfiguration"
+                ''
+                + lib.optionalString cfg.forceFullCompositionPipeline ''
+                  Option         "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
+                  Option         "AllowIndirectGLXProtocol" "off"
+                  Option         "TripleBuffer" "on"
+                '';
+            };
+
+          services.xserver.serverLayoutSection =
+            lib.optionalString syncCfg.enable ''
+              Inactive "Device-${igpuDriver}[0]"
+            ''
+            + lib.optionalString reverseSyncCfg.enable ''
+              Inactive "Device-nvidia[0]"
+            ''
+            + lib.optionalString offloadCfg.enable ''
+              Option "AllowNVIDIAGPUScreens"
+            '';
+
+          services.xserver.displayManager.setupCommands =
+            let
+              gpuProviderName =
+                if igpuDriver == "amdgpu" then
+                  # find the name of the provider if amdgpu
+                  "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
+                else
+                  igpuDriver;
+              providerCmdParams =
+                if syncCfg.enable then "\"${gpuProviderName}\" NVIDIA-0" else "NVIDIA-G0 \"${gpuProviderName}\"";
+            in
+            lib.optionalString
+              (syncCfg.enable || (reverseSyncCfg.enable && reverseSyncCfg.setupCommands.enable))
               ''
-              + lib.optionalString cfg.forceFullCompositionPipeline ''
-                Option         "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
-                Option         "AllowIndirectGLXProtocol" "off"
-                Option         "TripleBuffer" "on"
+                # Added by nvidia configuration module for Optimus/PRIME.
+                ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
+                ${lib.getExe pkgs.xorg.xrandr} --auto
               '';
-          };
 
-        services.xserver.serverLayoutSection =
-          lib.optionalString syncCfg.enable ''
-            Inactive "Device-${igpuDriver}[0]"
-          ''
-          + lib.optionalString reverseSyncCfg.enable ''
-            Inactive "Device-nvidia[0]"
-          ''
-          + lib.optionalString offloadCfg.enable ''
-            Option "AllowNVIDIAGPUScreens"
-          '';
+          environment.etc = {
+            "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {
+              source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";
+            };
 
-        services.xserver.displayManager.setupCommands = let
-          gpuProviderName =
-            if igpuDriver == "amdgpu"
-            then
-              # find the name of the provider if amdgpu
-              "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
-            else igpuDriver;
-          providerCmdParams =
-            if syncCfg.enable
-            then "\"${gpuProviderName}\" NVIDIA-0"
-            else "NVIDIA-G0 \"${gpuProviderName}\"";
-        in
-          lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
-            # Added by nvidia configuration module for Optimus/PRIME.
-            ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
-            ${lib.getExe pkgs.xorg.xrandr} --auto
-          '';
+            # 'nvidia_x11' installs it's files to /run/opengl-driver/...
+            "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
+          };
 
-        environment.etc = {
-          "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
+          hardware.graphics = {
+            extraPackages = [ pkgs.nvidia-vaapi-driver ];
+            extraPackages32 = [ pkgs.pkgsi686Linux.nvidia-vaapi-driver ];
+          };
 
-          # 'nvidia_x11' installs it's files to /run/opengl-driver/...
-          "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
-        };
+          environment.systemPackages =
+            lib.optional cfg.nvidiaSettings nvidia_x11.settings
+            ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
+            ++ lib.optional offloadCfg.enableOffloadCmd (
+              pkgs.writeShellScriptBin "nvidia-offload" ''
+                export __NV_PRIME_RENDER_OFFLOAD=1
+                export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
+                export __GLX_VENDOR_LIBRARY_NAME=nvidia
+                export __VK_LAYER_NV_optimus=NVIDIA_only
+                exec "$@"
+              ''
+            );
 
-        hardware.opengl = {
-          extraPackages = [
-            pkgs.nvidia-vaapi-driver
-          ];
-          extraPackages32 = [
-            pkgs.pkgsi686Linux.nvidia-vaapi-driver
-          ];
-        };
-        environment.systemPackages =
-          lib.optional cfg.nvidiaSettings nvidia_x11.settings
-          ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
-          ++ lib.optional offloadCfg.enableOffloadCmd
-          (pkgs.writeShellScriptBin "nvidia-offload" ''
-            export __NV_PRIME_RENDER_OFFLOAD=1
-            export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
-            export __GLX_VENDOR_LIBRARY_NAME=nvidia
-            export __VK_LAYER_NV_optimus=NVIDIA_only
-            exec "$@"
-          '');
-
-        systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
-
-        systemd.services = let
-          nvidiaService = state: {
-            description = "NVIDIA system ${state} actions";
-            path = [pkgs.kbd];
-            serviceConfig = {
-              Type = "oneshot";
-              ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
-            };
-            before = ["systemd-${state}.service"];
-            requiredBy = ["systemd-${state}.service"];
-          };
-        in
-          lib.mkMerge [
-            (lib.mkIf cfg.powerManagement.enable {
-              nvidia-suspend = nvidiaService "suspend";
-              nvidia-hibernate = nvidiaService "hibernate";
-              nvidia-resume =
-                (nvidiaService "resume")
-                // {
-                  before = [];
-                  after = ["systemd-suspend.service" "systemd-hibernate.service"];
-                  requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
-                };
-            })
-            (lib.mkIf cfg.nvidiaPersistenced {
-              "nvidia-persistenced" = {
-                description = "NVIDIA Persistence Daemon";
-                wantedBy = ["multi-user.target"];
+          systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
+
+          systemd.services =
+            let
+              nvidiaService = state: {
+                description = "NVIDIA system ${state} actions";
+                path = [ pkgs.kbd ];
                 serviceConfig = {
-                  Type = "forking";
-                  Restart = "always";
-                  PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
-                  ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
-                  ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+                  Type = "oneshot";
+                  ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
                 };
+                before = [ "systemd-${state}.service" ];
+                requiredBy = [ "systemd-${state}.service" ];
               };
-            })
-            (lib.mkIf cfg.dynamicBoost.enable {
-              "nvidia-powerd" = {
-                description = "nvidia-powerd service";
-                path = [
-                  pkgs.util-linux # nvidia-powerd wants lscpu
-                ];
-                wantedBy = ["multi-user.target"];
-                serviceConfig = {
-                  Type = "dbus";
-                  BusName = "nvidia.powerd.server";
-                  ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
+            in
+            lib.mkMerge [
+              (lib.mkIf cfg.powerManagement.enable {
+                nvidia-suspend = nvidiaService "suspend";
+                nvidia-hibernate = nvidiaService "hibernate";
+                nvidia-resume = (nvidiaService "resume") // {
+                  before = [ ];
+                  after = [
+                    "systemd-suspend.service"
+                    "systemd-hibernate.service"
+                  ];
+                  requiredBy = [
+                    "systemd-suspend.service"
+                    "systemd-hibernate.service"
+                  ];
                 };
-              };
-            })
-          ];
-        services.acpid.enable = true;
-
-        services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
-
-        hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
-
-        systemd.tmpfiles.rules = [
-          # Remove the following log message:
-          #    (WW) NVIDIA: Failed to bind sideband socket to
-          #    (WW) NVIDIA:     '/var/run/nvidia-xdriver-b4f69129' Permission denied
-          #
-          # https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115
-          "d /run/nvidia-xdriver 0770 root users"
-        ] ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
-          "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
-
-        boot = {
-          extraModulePackages =
-            if cfg.open
-            then [nvidia_x11.open]
-            else [nvidia_x11.bin];
-          # nvidia-uvm is required by CUDA applications.
-          kernelModules =
-            lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
-
-          # If requested enable modesetting via kernel parameter.
-          kernelParams =
-            lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
-            ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
-            ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
-            ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
-
-          # enable finegrained power management
-          extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
-            options nvidia "NVreg_DynamicPowerManagement=0x02"
-          '';
-        };
-        services.udev.extraRules =
-          lib.optionalString cfg.powerManagement.finegrained (
-          lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
-            # Remove NVIDIA USB xHCI Host Controller devices, if present
-            ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
-
-            # Remove NVIDIA USB Type-C UCSI devices, if present
-            ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
-
-            # Remove NVIDIA Audio devices, if present
-            ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
-          ''
-          + ''
-            # Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
-            ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
-            ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
-
-            # Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
-            ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
-            ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
-          ''
-        );
-      })
-      # Data Center
-      (lib.mkIf (cfg.datacenter.enable) {
-        boot.extraModulePackages = [
-          nvidia_x11.bin
-        ];
-
-        systemd = {
-          tmpfiles.rules =
-            lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
-            "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
-
-          services = lib.mkMerge [
-            ({
-              nvidia-fabricmanager = {
-                enable = true;
-                description = "Start NVIDIA NVLink Management";
-                wantedBy = [ "multi-user.target" ];
-                unitConfig.After = [ "network-online.target" ];
-                unitConfig.Requires = [ "network-online.target" ];
-                serviceConfig = {
-                  Type = "forking";
-                  TimeoutStartSec = 240;
-                  ExecStart = let
-                    nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
-                    in
+              })
+              (lib.mkIf cfg.nvidiaPersistenced {
+                "nvidia-persistenced" = {
+                  description = "NVIDIA Persistence Daemon";
+                  wantedBy = [ "multi-user.target" ];
+                  serviceConfig = {
+                    Type = "forking";
+                    Restart = "always";
+                    PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+                    ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
+                    ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+                  };
+                };
+              })
+              (lib.mkIf cfg.dynamicBoost.enable {
+                "nvidia-powerd" = {
+                  description = "nvidia-powerd service";
+                  path = [
+                    pkgs.util-linux # nvidia-powerd wants lscpu
+                  ];
+                  wantedBy = [ "multi-user.target" ];
+                  serviceConfig = {
+                    Type = "dbus";
+                    BusName = "nvidia.powerd.server";
+                    ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
+                  };
+                };
+              })
+            ];
+
+          services.acpid.enable = true;
+
+          services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
+
+          hardware.firmware = lib.optional (cfg.open || lib.versionAtLeast nvidia_x11.version "555") nvidia_x11.firmware;
+
+          systemd.tmpfiles.rules =
+            [
+              # Remove the following log message:
+              #    (WW) NVIDIA: Failed to bind sideband socket to
+              #    (WW) NVIDIA:     '/var/run/nvidia-xdriver-b4f69129' Permission denied
+              #
+              # https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115
+              "d /run/nvidia-xdriver 0770 root users"
+            ]
+            ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+              "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
+
+          boot = {
+            extraModulePackages = if cfg.open then [ nvidia_x11.open ] else [ nvidia_x11.bin ];
+            # nvidia-uvm is required by CUDA applications.
+            kernelModules = lib.optionals config.services.xserver.enable [
+              "nvidia"
+              "nvidia_modeset"
+              "nvidia_drm"
+            ];
+
+            # If requested enable modesetting via kernel parameter.
+            kernelParams =
+              lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
+              ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
+              ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
+              ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
+
+            # enable finegrained power management
+            extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
+              options nvidia "NVreg_DynamicPowerManagement=0x02"
+            '';
+          };
+          services.udev.extraRules = lib.optionalString cfg.powerManagement.finegrained (
+            lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
+              # Remove NVIDIA USB xHCI Host Controller devices, if present
+              ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
+
+              # Remove NVIDIA USB Type-C UCSI devices, if present
+              ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
+
+              # Remove NVIDIA Audio devices, if present
+              ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
+            ''
+            + ''
+              # Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
+              ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
+              ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
+
+              # Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
+              ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
+              ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
+            ''
+          );
+        })
+        # Data Center
+        (lib.mkIf (cfg.datacenter.enable) {
+          boot.extraModulePackages = [ nvidia_x11.bin ];
+
+          systemd = {
+            tmpfiles.rules =
+              lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+                "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
+
+            services = lib.mkMerge [
+              ({
+                nvidia-fabricmanager = {
+                  enable = true;
+                  description = "Start NVIDIA NVLink Management";
+                  wantedBy = [ "multi-user.target" ];
+                  unitConfig.After = [ "network-online.target" ];
+                  unitConfig.Requires = [ "network-online.target" ];
+                  serviceConfig = {
+                    Type = "forking";
+                    TimeoutStartSec = 240;
+                    ExecStart =
+                      let
+                        nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
+                      in
                       "${lib.getExe nvidia_x11.fabricmanager} -c ${nv-fab-conf}";
-                  LimitCORE="infinity";
+                    LimitCORE = "infinity";
+                  };
                 };
-              };
-            })
-            (lib.mkIf cfg.nvidiaPersistenced {
-              "nvidia-persistenced" = {
-                description = "NVIDIA Persistence Daemon";
-                wantedBy = ["multi-user.target"];
-                serviceConfig = {
-                  Type = "forking";
-                  Restart = "always";
-                  PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
-                  ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
-                  ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+              })
+              (lib.mkIf cfg.nvidiaPersistenced {
+                "nvidia-persistenced" = {
+                  description = "NVIDIA Persistence Daemon";
+                  wantedBy = [ "multi-user.target" ];
+                  serviceConfig = {
+                    Type = "forking";
+                    Restart = "always";
+                    PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+                    ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
+                    ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+                  };
                 };
-              };
-            })
-          ];
-      };
+              })
+            ];
+          };
 
-      environment.systemPackages =
-        lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager
-        ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced;
-    })
-  ]);
+          environment.systemPackages =
+            lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager
+            ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced;
+        })
+      ]
+    );
 }
diff --git a/nixpkgs/nixos/modules/hardware/video/virtualbox.nix b/nixpkgs/nixos/modules/hardware/video/virtualbox.nix
new file mode 100644
index 000000000000..31ed92b7d148
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/virtualbox.nix
@@ -0,0 +1,7 @@
+{ lib, config, ... }:
+let
+  inherit (config.boot) kernelPackages;
+  inherit (config.services.xserver) videoDrivers;
+in {
+  boot.extraModulePackages = lib.mkIf (lib.elem "virtualbox" videoDrivers) [ kernelPackages.virtualboxGuestAdditions ];
+}
diff --git a/nixpkgs/nixos/modules/hardware/xone.nix b/nixpkgs/nixos/modules/hardware/xone.nix
index 89690d8c6fb1..bb3b42399d8e 100644
--- a/nixpkgs/nixos/modules/hardware/xone.nix
+++ b/nixpkgs/nixos/modules/hardware/xone.nix
@@ -6,7 +6,7 @@ let
 in
 {
   options.hardware.xone = {
-    enable = mkEnableOption "the xone driver for Xbox One and Xbobx Series X|S accessories";
+    enable = mkEnableOption "the xone driver for Xbox One and Xbox Series X|S accessories";
   };
 
   config = mkIf cfg.enable {