about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorgithub-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>2024-01-25 00:02:41 +0000
committerGitHub <noreply@github.com>2024-01-25 00:02:41 +0000
commitfeb654493f3889a9cf91da6af219dc600e48bfdf (patch)
tree8568e9fb3eb82649e08ee0559af846d71d41d438 /nixos
parentb71c5523a316afc9ddd3c928e1615ec9e4da8d2a (diff)
parenta4b5a14b07c5a3f90bc868ee26a0a685b3f7f893 (diff)
downloadnixlib-feb654493f3889a9cf91da6af219dc600e48bfdf.tar
nixlib-feb654493f3889a9cf91da6af219dc600e48bfdf.tar.gz
nixlib-feb654493f3889a9cf91da6af219dc600e48bfdf.tar.bz2
nixlib-feb654493f3889a9cf91da6af219dc600e48bfdf.tar.lz
nixlib-feb654493f3889a9cf91da6af219dc600e48bfdf.tar.xz
nixlib-feb654493f3889a9cf91da6af219dc600e48bfdf.tar.zst
nixlib-feb654493f3889a9cf91da6af219dc600e48bfdf.zip
Merge staging-next into staging
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/sshfs-file-systems.section.md2
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/services/hardware/fwupd.nix1
-rw-r--r--nixos/modules/services/networking/knot.nix1
-rw-r--r--nixos/modules/tasks/filesystems/sshfs.nix7
-rw-r--r--nixos/tests/ceph-multi-node.nix8
-rw-r--r--nixos/tests/ceph-single-node-bluestore.nix8
-rw-r--r--nixos/tests/ceph-single-node.nix29
8 files changed, 43 insertions, 14 deletions
diff --git a/nixos/doc/manual/configuration/sshfs-file-systems.section.md b/nixos/doc/manual/configuration/sshfs-file-systems.section.md
index d8c9dea6c337..e2e37454b7ea 100644
--- a/nixos/doc/manual/configuration/sshfs-file-systems.section.md
+++ b/nixos/doc/manual/configuration/sshfs-file-systems.section.md
@@ -38,8 +38,6 @@ The file system can be configured in NixOS via the usual [fileSystems](#opt-file
 Here's a typical setup:
 ```nix
 {
-  system.fsPackages = [ pkgs.sshfs ];
-
   fileSystems."/mnt/my-dir" = {
     device = "my-user@example.com:/my-dir/";
     fsType = "sshfs";
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 041169f0b052..815061639c69 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -1519,6 +1519,7 @@
   ./tasks/filesystems/nfs.nix
   ./tasks/filesystems/ntfs.nix
   ./tasks/filesystems/reiserfs.nix
+  ./tasks/filesystems/sshfs.nix
   ./tasks/filesystems/squashfs.nix
   ./tasks/filesystems/unionfs-fuse.nix
   ./tasks/filesystems/vboxsf.nix
diff --git a/nixos/modules/services/hardware/fwupd.nix b/nixos/modules/services/hardware/fwupd.nix
index 6b3a109ed6f7..6fbcbe676460 100644
--- a/nixos/modules/services/hardware/fwupd.nix
+++ b/nixos/modules/services/hardware/fwupd.nix
@@ -16,6 +16,7 @@ let
     "fwupd/fwupd.conf" = {
       source = format.generate "fwupd.conf" {
         fwupd = cfg.daemonSettings;
+      } // lib.optionalAttrs (lib.length (lib.attrNames cfg.uefiCapsuleSettings) != 0) {
         uefi_capsule = cfg.uefiCapsuleSettings;
       };
       # fwupd tries to chmod the file if it doesn't have the right permissions
diff --git a/nixos/modules/services/networking/knot.nix b/nixos/modules/services/networking/knot.nix
index d4bd81629c97..94c32586736a 100644
--- a/nixos/modules/services/networking/knot.nix
+++ b/nixos/modules/services/networking/knot.nix
@@ -44,6 +44,7 @@ let
         ++ [ (sec_list_fa "id" nix_def "template") ]
         ++ [ (sec_list_fa "domain" nix_def "zone") ]
         ++ [ (sec_plain nix_def "include") ]
+        ++ [ (sec_plain nix_def "clear") ]
       );
 
     # A plain section contains directly attributes (we don't really check that ATM).
diff --git a/nixos/modules/tasks/filesystems/sshfs.nix b/nixos/modules/tasks/filesystems/sshfs.nix
new file mode 100644
index 000000000000..cd71dda16d8b
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/sshfs.nix
@@ -0,0 +1,7 @@
+{ config, lib, pkgs, ... }:
+
+{
+  config = lib.mkIf (lib.any (fs: fs == "sshfs" || fs == "fuse.sshfs") config.boot.supportedFilesystems) {
+    system.fsPackages = [ pkgs.sshfs ];
+  };
+}
diff --git a/nixos/tests/ceph-multi-node.nix b/nixos/tests/ceph-multi-node.nix
index 556546beee76..b1352a4bc8f4 100644
--- a/nixos/tests/ceph-multi-node.nix
+++ b/nixos/tests/ceph-multi-node.nix
@@ -185,6 +185,14 @@ let
     monA.succeed(
         "ceph osd pool create multi-node-test 32 32",
         "ceph osd pool ls | grep 'multi-node-test'",
+
+        # We need to enable an application on the pool, otherwise it will
+        # stay unhealthy in state POOL_APP_NOT_ENABLED.
+        # Creating a CephFS would do this automatically, but we haven't done that here.
+        # See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
+        # We use the custom application name "nixos-test" for this.
+        "ceph osd pool application enable multi-node-test nixos-test",
+
         "ceph osd pool rename multi-node-test multi-node-other-test",
         "ceph osd pool ls | grep 'multi-node-other-test'",
     )
diff --git a/nixos/tests/ceph-single-node-bluestore.nix b/nixos/tests/ceph-single-node-bluestore.nix
index acaae4cf300e..8bd1a78244a2 100644
--- a/nixos/tests/ceph-single-node-bluestore.nix
+++ b/nixos/tests/ceph-single-node-bluestore.nix
@@ -145,6 +145,14 @@ let
     monA.succeed(
         "ceph osd pool create single-node-test 32 32",
         "ceph osd pool ls | grep 'single-node-test'",
+
+        # We need to enable an application on the pool, otherwise it will
+        # stay unhealthy in state POOL_APP_NOT_ENABLED.
+        # Creating a CephFS would do this automatically, but we haven't done that here.
+        # See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
+        # We use the custom application name "nixos-test" for this.
+        "ceph osd pool application enable single-node-test nixos-test",
+
         "ceph osd pool rename single-node-test single-node-other-test",
         "ceph osd pool ls | grep 'single-node-other-test'",
     )
diff --git a/nixos/tests/ceph-single-node.nix b/nixos/tests/ceph-single-node.nix
index a3a4072365af..c34ec511dc6d 100644
--- a/nixos/tests/ceph-single-node.nix
+++ b/nixos/tests/ceph-single-node.nix
@@ -145,6 +145,14 @@ let
     monA.succeed(
         "ceph osd pool create single-node-test 32 32",
         "ceph osd pool ls | grep 'single-node-test'",
+
+        # We need to enable an application on the pool, otherwise it will
+        # stay unhealthy in state POOL_APP_NOT_ENABLED.
+        # Creating a CephFS would do this automatically, but we haven't done that here.
+        # See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
+        # We use the custom application name "nixos-test" for this.
+        "ceph osd pool application enable single-node-test nixos-test",
+
         "ceph osd pool rename single-node-test single-node-other-test",
         "ceph osd pool ls | grep 'single-node-other-test'",
     )
@@ -182,19 +190,16 @@ let
     monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
     monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
 
-    # This test has been commented out due to the upstream issue with pyo3
-    # that has broken this dashboard
-    # Reference: https://www.spinics.net/lists/ceph-users/msg77812.html
     # Enable the dashboard and recheck health
-    # monA.succeed(
-    #     "ceph mgr module enable dashboard",
-    #     "ceph config set mgr mgr/dashboard/ssl false",
-    #     # default is 8080 but it's better to be explicit
-    #     "ceph config set mgr mgr/dashboard/server_port 8080",
-    # )
-    # monA.wait_for_open_port(8080)
-    # monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
-    # monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+    monA.succeed(
+        "ceph mgr module enable dashboard",
+        "ceph config set mgr mgr/dashboard/ssl false",
+        # default is 8080 but it's better to be explicit
+        "ceph config set mgr mgr/dashboard/server_port 8080",
+    )
+    monA.wait_for_open_port(8080)
+    monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
   '';
 in {
   name = "basic-single-node-ceph-cluster";