about summary refs log tree commit diff
path: root/nixos/tests/installer.nix
diff options
context:
space:
mode:
authorLuflosi <luflosi@luflosi.de>2023-03-24 19:16:24 +0100
committerLuflosi <luflosi@luflosi.de>2023-12-07 13:17:49 +0100
commitc351a84eec83a57643e490c34cb8cb61e1341f53 (patch)
tree5f8d14d454723bef65660e389eccbfe1ef20e505 /nixos/tests/installer.nix
parent4a609ec5d195ed770a1b30fa49ba19a0947cd819 (diff)
nixos/tests/installer: test /boot on ZFS
Let's test / on ZFS and /boot on ZFS in separate tests since the GRUB integration for ZFS seems to be not very well maintained.
If the test breaks in the future it's easier to figure out that ZFS on /boot is at fault and either fix the issue or disable the test.
The new test creates a ZFS pool where all features not compatible with GRUB2 are disabled. The dataset is then mounted on /boot and we check that the installer correctly generates a bootable configuration.
Try to use as many ZFS features as possible to verify that GRUB can handle them.
Diffstat (limited to 'nixos/tests/installer.nix')
-rw-r--r--nixos/tests/installer.nix86
1 files changed, 75 insertions, 11 deletions
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index f7fc168eba8cf..d83e49a3e8f7c 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -878,6 +878,78 @@ in {
     '';
   };
 
+  # Same as the previous, but with ZFS /boot.
+  separateBootZfs = makeInstallerTest "separateBootZfs" {
+    extraInstallerConfig = {
+      boot.supportedFilesystems = [ "zfs" ];
+    };
+
+    extraConfig = ''
+      # Using by-uuid overrides the default of by-id, and is unique
+      # to the qemu disks, as they don't produce by-id paths for
+      # some reason.
+      boot.zfs.devNodes = "/dev/disk/by-uuid/";
+      networking.hostId = "00000000";
+    '';
+
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary ext2 1M 256MB"   # /boot
+          + " mkpart primary linux-swap 256MB 1280M"
+          + " mkpart primary ext2 1280M -1s", # /
+          "udevadm settle",
+
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+
+          "mkfs.ext4 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+
+          # Use as many ZFS features as possible to verify that GRUB can handle them
+          "zpool create"
+            " -o compatibility=grub2"
+            " -O utf8only=on"
+            " -O normalization=formD"
+            " -O compression=lz4"      # Activate the lz4_compress feature
+            " -O xattr=sa"
+            " -O acltype=posixacl"
+            " bpool /dev/vda1",
+          "zfs create"
+            " -o recordsize=1M"        # Prepare activating the large_blocks feature
+            " -o mountpoint=legacy"
+            " -o relatime=on"
+            " -o quota=1G"
+            " -o filesystem_limit=100" # Activate the filesystem_limits features
+            " bpool/boot",
+
+          # Snapshotting the top-level dataset would trigger a bug in GRUB2: https://github.com/openzfs/zfs/issues/13873
+          "zfs snapshot bpool/boot@snap-1",                     # Prepare activating the livelist and bookmarks features
+          "zfs clone bpool/boot@snap-1 bpool/test",             # Activate the livelist feature
+          "zfs bookmark bpool/boot@snap-1 bpool/boot#bookmark", # Activate the bookmarks feature
+          "zpool checkpoint bpool",                             # Activate the zpool_checkpoint feature
+          "mkdir -p /mnt/boot",
+          "mount -t zfs bpool/boot /mnt/boot",
+          "touch /mnt/boot/empty",                              # Activate zilsaxattr feature
+          "dd if=/dev/urandom of=/mnt/boot/test bs=1M count=1", # Activate the large_blocks feature
+
+          # Print out all enabled and active ZFS features (and some other stuff)
+          "sync /mnt/boot",
+          "zpool get all bpool >&2",
+
+          # Abort early if GRUB2 doesn't like the disks
+          "grub-probe --target=device /mnt/boot >&2",
+      )
+    '';
+
+    # umount & export bpool before shutdown
+    # this is a fix for "cannot import 'bpool': pool was previously in use from another system."
+    postInstallCommands = ''
+      machine.succeed("umount /mnt/boot")
+      machine.succeed("zpool export bpool")
+    '';
+  };
+
   # zfs on / with swap
   zfsroot = makeInstallerTest "zfs-root" {
     extraInstallerConfig = {
@@ -897,7 +969,7 @@ in {
     createPartitions = ''
       machine.succeed(
           "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
-          + " mkpart primary 1M 100MB"  # bpool
+          + " mkpart primary 1M 100MB"  # /boot
           + " mkpart primary linux-swap 100M 1024M"
           + " mkpart primary 1024M -1s", # rpool
           "udevadm settle",
@@ -909,20 +981,12 @@ in {
           "zfs create -o mountpoint=legacy rpool/root/usr",
           "mkdir /mnt/usr",
           "mount -t zfs rpool/root/usr /mnt/usr",
-          "zpool create -o compatibility=grub2 bpool /dev/vda1",
-          "zfs create -o mountpoint=legacy bpool/boot",
+          "mkfs.vfat -n BOOT /dev/vda1",
           "mkdir /mnt/boot",
-          "mount -t zfs bpool/boot /mnt/boot",
+          "mount LABEL=BOOT /mnt/boot",
           "udevadm settle",
       )
     '';
-
-    # umount & export bpool before shutdown
-    # this is a fix for "cannot import 'bpool': pool was previously in use from another system."
-    postInstallCommands = ''
-      machine.succeed("umount /mnt/boot")
-      machine.succeed("zpool export bpool")
-    '';
   };
 
   # Create two physical LVM partitions combined into one volume group