about summary refs log tree commit diff
path: root/nixpkgs/nixos/tests/zfs.nix
blob: 0b44961a3deb5a1add7aad1fab8e6c2938871ebd (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
{ system ? builtins.currentSystem,
  config ? {},
  pkgs ? import ../.. { inherit system config; }
}:

with import ../lib/testing-python.nix { inherit system pkgs; };

let

  makeZfsTest = name:
    { kernelPackage ? if enableUnstable then pkgs.linuxPackages_latest else pkgs.linuxPackages
    , enableUnstable ? false
    , extraTest ? ""
    }:
    makeTest {
      name = "zfs-" + name;
      meta = with pkgs.lib.maintainers; {
        maintainers = [ adisbladis ];
      };

      nodes.machine = { pkgs, lib, ... }:
        let
          usersharePath = "/var/lib/samba/usershares";
        in {
        virtualisation.emptyDiskImages = [ 4096 ];
        networking.hostId = "deadbeef";
        boot.kernelPackages = kernelPackage;
        boot.supportedFilesystems = [ "zfs" ];
        boot.zfs.enableUnstable = enableUnstable;

        services.samba = {
          enable = true;
          extraConfig = ''
            registry shares = yes
            usershare path = ${usersharePath}
            usershare allow guests = yes
            usershare max shares = 100
            usershare owner only = no
          '';
        };
        systemd.services.samba-smbd.serviceConfig.ExecStartPre =
          "${pkgs.coreutils}/bin/mkdir -m +t -p ${usersharePath}";

        environment.systemPackages = [ pkgs.parted ];

        # Setup regular fileSystems machinery to ensure forceImportAll can be
        # tested via the regular service units.
        virtualisation.fileSystems = {
          "/forcepool" = {
            device = "forcepool";
            fsType = "zfs";
            options = [ "noauto" ];
          };
        };

        # forcepool doesn't exist at first boot, and we need to manually test
        # the import after tweaking the hostId.
        systemd.services.zfs-import-forcepool.wantedBy = lib.mkVMOverride [];
        systemd.targets.zfs.wantedBy = lib.mkVMOverride [];
        boot.zfs.forceImportAll = true;
        # /dev/disk/by-id doesn't get populated in the NixOS test framework
        boot.zfs.devNodes = "/dev/disk/by-uuid";
      };

      testScript = ''
        machine.succeed(
            "modprobe zfs",
            "zpool status",
            "ls /dev",
            "mkdir /tmp/mnt",
            "udevadm settle",
            "parted --script /dev/vdb mklabel msdos",
            "parted --script /dev/vdb -- mkpart primary 1024M -1s",
            "udevadm settle",
            "zpool create rpool /dev/vdb1",
            "zfs create -o mountpoint=legacy rpool/root",
            # shared datasets cannot have legacy mountpoint
            "zfs create rpool/shared_smb",
            "mount -t zfs rpool/root /tmp/mnt",
            "udevadm settle",
            # wait for samba services
            "systemctl is-system-running --wait",
            "zfs set sharesmb=on rpool/shared_smb",
            "zfs share rpool/shared_smb",
            "smbclient -gNL localhost | grep rpool_shared_smb",
            "umount /tmp/mnt",
            "zpool destroy rpool",
            "udevadm settle",
        )

        machine.succeed(
            'echo password | zpool create -o altroot="/tmp/mnt" '
            + "-O encryption=aes-256-gcm -O keyformat=passphrase rpool /dev/vdb1",
            "zfs create -o mountpoint=legacy rpool/root",
            "mount -t zfs rpool/root /tmp/mnt",
            "udevadm settle",
            "umount /tmp/mnt",
            "zpool destroy rpool",
            "udevadm settle",
        )

        with subtest("boot.zfs.forceImportAll works"):
            machine.succeed(
                "rm /etc/hostid",
                "zgenhostid deadcafe",
                "zpool create forcepool /dev/vdb1 -O mountpoint=legacy",
            )
            machine.shutdown()
            machine.start()
            machine.succeed("udevadm settle")
            machine.fail("zpool import forcepool")
            machine.succeed(
                "systemctl start zfs-import-forcepool.service",
                "mount -t zfs forcepool /tmp/mnt",
            )
      '' + extraTest;

    };


in {

  stable = makeZfsTest "stable" { };

  unstable = makeZfsTest "unstable" {
    enableUnstable = true;
  };

  installer = (import ./installer.nix { }).zfsroot;

  expand-partitions = makeTest {
    name = "multi-disk-zfs";
    nodes = {
      machine = { pkgs, ... }: {
        environment.systemPackages = [ pkgs.parted ];
        boot.supportedFilesystems = [ "zfs" ];
        networking.hostId = "00000000";

        virtualisation = {
          emptyDiskImages = [ 20480 20480 20480 20480 20480 20480 ];
        };

        specialisation.resize.configuration = {
          services.zfs.expandOnBoot = [ "tank" ];
        };
      };
    };

    testScript = { nodes, ... }:
      ''
        start_all()
        machine.wait_for_unit("default.target")
        print(machine.succeed('mount'))

        print(machine.succeed('parted --script /dev/vdb -- mklabel gpt'))
        print(machine.succeed('parted --script /dev/vdb -- mkpart primary 1M 70M'))

        print(machine.succeed('parted --script /dev/vdc -- mklabel gpt'))
        print(machine.succeed('parted --script /dev/vdc -- mkpart primary 1M 70M'))

        print(machine.succeed('zpool create tank mirror /dev/vdb1 /dev/vdc1 mirror /dev/vdd /dev/vde mirror /dev/vdf /dev/vdg'))
        print(machine.succeed('zpool list -v'))
        print(machine.succeed('mount'))
        start_size = int(machine.succeed('df -k --output=size /tank | tail -n1').strip())

        print(machine.succeed("/run/current-system/specialisation/resize/bin/switch-to-configuration test >&2"))
        machine.wait_for_unit("zpool-expand-pools.service")
        machine.wait_for_unit("zpool-expand@tank.service")

        print(machine.succeed('zpool list -v'))
        new_size = int(machine.succeed('df -k --output=size /tank | tail -n1').strip())

        if (new_size - start_size) > 20000000:
          print("Disk grew appropriately.")
        else:
          print(f"Disk went from {start_size} to {new_size}, which doesn't seem right.")
          exit(1)
      '';
  };
}