about summary refs log tree commit diff
path: root/nixos/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/tests')
-rw-r--r--nixos/tests/acme.nix107
-rw-r--r--nixos/tests/activation/etc-overlay-immutable.nix33
-rw-r--r--nixos/tests/activation/etc-overlay-mutable.nix13
-rw-r--r--nixos/tests/all-tests.nix138
-rw-r--r--nixos/tests/alloy.nix32
-rw-r--r--nixos/tests/appliance-repart-image-verity-store.nix130
-rw-r--r--nixos/tests/archi.nix4
-rw-r--r--nixos/tests/aria2.nix43
-rw-r--r--nixos/tests/armagetronad.nix6
-rw-r--r--nixos/tests/artalk.nix28
-rw-r--r--nixos/tests/atop.nix11
-rw-r--r--nixos/tests/audiobookshelf.nix5
-rw-r--r--nixos/tests/avahi.nix2
-rw-r--r--nixos/tests/ayatana-indicators.nix238
-rw-r--r--nixos/tests/benchexec.nix54
-rw-r--r--nixos/tests/bind.nix1
-rw-r--r--nixos/tests/bittorrent.nix336
-rw-r--r--nixos/tests/borgbackup.nix26
-rw-r--r--nixos/tests/borgmatic.nix24
-rw-r--r--nixos/tests/bpf.nix7
-rw-r--r--nixos/tests/budgie.nix4
-rw-r--r--nixos/tests/buildbot.nix2
-rw-r--r--nixos/tests/cagebreak.nix6
-rw-r--r--nixos/tests/castopod.nix3
-rw-r--r--nixos/tests/centrifugo.nix8
-rw-r--r--nixos/tests/ceph-single-node-bluestore-dmcrypt.nix273
-rw-r--r--nixos/tests/cgit.nix40
-rw-r--r--nixos/tests/chromadb.nix26
-rw-r--r--nixos/tests/chrony.nix31
-rw-r--r--nixos/tests/cinnamon-wayland.nix2
-rw-r--r--nixos/tests/cinnamon.nix2
-rw-r--r--nixos/tests/clatd.nix97
-rw-r--r--nixos/tests/cntr.nix2
-rw-r--r--nixos/tests/commafeed.nix21
-rw-r--r--nixos/tests/common/acme/client/default.nix2
-rw-r--r--nixos/tests/containers-bridge.nix2
-rw-r--r--nixos/tests/containers-imperative.nix3
-rw-r--r--nixos/tests/containers-ip.nix2
-rw-r--r--nixos/tests/containers-portforward.nix2
-rw-r--r--nixos/tests/containers-reloadable.nix70
-rw-r--r--nixos/tests/containers-restart_networking.nix89
-rw-r--r--nixos/tests/crabfit.nix2
-rw-r--r--nixos/tests/cryptpad.nix71
-rw-r--r--nixos/tests/curl-impersonate.nix3
-rw-r--r--nixos/tests/dae.nix2
-rw-r--r--nixos/tests/darling-dmg.nix34
-rw-r--r--nixos/tests/ddns-updater.nix28
-rw-r--r--nixos/tests/dependency-track.nix65
-rw-r--r--nixos/tests/devpi-server.nix35
-rw-r--r--nixos/tests/dex-oidc.nix9
-rw-r--r--nixos/tests/dnscrypt-wrapper/default.nix148
-rw-r--r--nixos/tests/dnscrypt-wrapper/public.key1
-rw-r--r--nixos/tests/dnscrypt-wrapper/secret.key1
-rw-r--r--nixos/tests/docker-tools-nix-shell.nix95
-rw-r--r--nixos/tests/docker-tools.nix56
-rw-r--r--nixos/tests/domination.nix3
-rw-r--r--nixos/tests/druid/default.nix289
-rw-r--r--nixos/tests/ec2.nix7
-rw-r--r--nixos/tests/eintopf.nix21
-rw-r--r--nixos/tests/elk.nix2
-rw-r--r--nixos/tests/enlightenment.nix4
-rw-r--r--nixos/tests/fcitx5/default.nix21
-rw-r--r--nixos/tests/filesender.nix137
-rw-r--r--nixos/tests/filesystems-overlayfs.nix1
-rw-r--r--nixos/tests/firefly-iii-data-importer.nix27
-rw-r--r--nixos/tests/firefly-iii.nix99
-rw-r--r--nixos/tests/firefox.nix9
-rw-r--r--nixos/tests/firewall.nix18
-rw-r--r--nixos/tests/fish.nix2
-rw-r--r--nixos/tests/flaresolverr.nix22
-rw-r--r--nixos/tests/flood.nix27
-rw-r--r--nixos/tests/forgejo.nix12
-rw-r--r--nixos/tests/freetube.nix2
-rw-r--r--nixos/tests/freshrss-extensions.nix19
-rw-r--r--nixos/tests/frigate.nix21
-rw-r--r--nixos/tests/frp.nix2
-rw-r--r--nixos/tests/frr.nix2
-rw-r--r--nixos/tests/ft2-clone.nix2
-rw-r--r--nixos/tests/gancio.nix87
-rw-r--r--nixos/tests/garage/default.nix1
-rw-r--r--nixos/tests/garage/with-3node-replication.nix8
-rw-r--r--nixos/tests/gitdaemon.nix6
-rw-r--r--nixos/tests/gitlab.nix2
-rw-r--r--nixos/tests/gitolite-fcgiwrap.nix11
-rw-r--r--nixos/tests/glance.nix36
-rw-r--r--nixos/tests/gnome-extensions.nix7
-rw-r--r--nixos/tests/gnome-xorg.nix2
-rw-r--r--nixos/tests/gnome.nix2
-rw-r--r--nixos/tests/gnupg.nix16
-rw-r--r--nixos/tests/goatcounter.nix32
-rw-r--r--nixos/tests/gotenberg.nix26
-rw-r--r--nixos/tests/gotify-server.nix4
-rw-r--r--nixos/tests/grafana/basic.nix20
-rw-r--r--nixos/tests/grafana/provision/default.nix61
-rw-r--r--nixos/tests/graylog.nix17
-rw-r--r--nixos/tests/greetd-no-shadow.nix49
-rw-r--r--nixos/tests/hadoop/hadoop.nix1
-rw-r--r--nixos/tests/harmonia.nix2
-rw-r--r--nixos/tests/hocker-fetchdocker/default.nix16
-rw-r--r--nixos/tests/hocker-fetchdocker/hello-world-container.nix19
-rw-r--r--nixos/tests/hocker-fetchdocker/machine.nix26
-rw-r--r--nixos/tests/home-assistant.nix8
-rw-r--r--nixos/tests/homebox.nix26
-rw-r--r--nixos/tests/hydra/default.nix2
-rw-r--r--nixos/tests/ifm.nix36
-rw-r--r--nixos/tests/incus/container.nix114
-rw-r--r--nixos/tests/incus/default.nix21
-rw-r--r--nixos/tests/incus/incusd-options.nix114
-rw-r--r--nixos/tests/incus/lxd-to-incus.nix12
-rw-r--r--nixos/tests/incus/openvswitch.nix8
-rw-r--r--nixos/tests/incus/preseed.nix63
-rw-r--r--nixos/tests/incus/socket-activated.nix9
-rw-r--r--nixos/tests/incus/storage.nix12
-rw-r--r--nixos/tests/incus/ui.nix7
-rw-r--r--nixos/tests/incus/virtual-machine.nix23
-rw-r--r--nixos/tests/initrd-network.nix2
-rw-r--r--nixos/tests/initrd-secrets.nix2
-rw-r--r--nixos/tests/installed-tests/flatpak-builder.nix7
-rw-r--r--nixos/tests/installed-tests/gnome-photos.nix2
-rw-r--r--nixos/tests/installed-tests/ibus.nix5
-rw-r--r--nixos/tests/installed-tests/ostree.nix2
-rw-r--r--nixos/tests/installer-systemd-stage-1.nix4
-rw-r--r--nixos/tests/installer.nix228
-rw-r--r--nixos/tests/installer/byAttrNoChannel.nix18
-rw-r--r--nixos/tests/installer/byAttrWithChannel.nix10
-rw-r--r--nixos/tests/invidious.nix29
-rw-r--r--nixos/tests/ipv6.nix4
-rw-r--r--nixos/tests/iscsi-root.nix6
-rw-r--r--nixos/tests/jackett.nix14
-rw-r--r--nixos/tests/jenkins.nix2
-rw-r--r--nixos/tests/jool.nix18
-rw-r--r--nixos/tests/jotta-cli.nix2
-rw-r--r--nixos/tests/k3s/airgap-images.nix42
-rw-r--r--nixos/tests/k3s/auto-deploy.nix125
-rw-r--r--nixos/tests/k3s/containerd-config.nix58
-rw-r--r--nixos/tests/k3s/default.nix33
-rw-r--r--nixos/tests/k3s/etcd.nix195
-rw-r--r--nixos/tests/k3s/kubelet-config.nix80
-rw-r--r--nixos/tests/k3s/multi-node.nix228
-rw-r--r--nixos/tests/k3s/single-node.nix95
-rw-r--r--nixos/tests/kafka.nix10
-rw-r--r--nixos/tests/kanidm-provisioning.nix518
-rw-r--r--nixos/tests/kanidm.nix24
-rw-r--r--nixos/tests/kea.nix1
-rw-r--r--nixos/tests/keepalived.nix4
-rw-r--r--nixos/tests/kerberos/heimdal.nix2
-rw-r--r--nixos/tests/kerberos/mit.nix2
-rw-r--r--nixos/tests/kernel-generic.nix7
-rw-r--r--nixos/tests/knot.nix4
-rw-r--r--nixos/tests/kubernetes/base.nix6
-rw-r--r--nixos/tests/kubo/default.nix4
-rw-r--r--nixos/tests/kubo/kubo-fuse.nix2
-rw-r--r--nixos/tests/ladybird.nix4
-rw-r--r--nixos/tests/lemmy.nix6
-rw-r--r--nixos/tests/libreddit.nix19
-rw-r--r--nixos/tests/librenms.nix46
-rw-r--r--nixos/tests/libreswan-nat.nix238
-rw-r--r--nixos/tests/libreswan.nix6
-rw-r--r--nixos/tests/libvirtd.nix5
-rw-r--r--nixos/tests/limesurvey.nix6
-rw-r--r--nixos/tests/livebook-service.nix5
-rw-r--r--nixos/tests/localsend.nix21
-rw-r--r--nixos/tests/login.nix3
-rw-r--r--nixos/tests/logrotate.nix92
-rw-r--r--nixos/tests/lomiri-calculator-app.nix59
-rw-r--r--nixos/tests/lomiri-camera-app.nix135
-rw-r--r--nixos/tests/lomiri-clock-app.nix48
-rw-r--r--nixos/tests/lomiri-docviewer-app.nix84
-rw-r--r--nixos/tests/lomiri-filemanager-app.nix48
-rw-r--r--nixos/tests/lomiri-gallery-app.nix156
-rw-r--r--nixos/tests/lomiri-system-settings.nix2
-rw-r--r--nixos/tests/lomiri.nix971
-rw-r--r--nixos/tests/lorri/default.nix6
-rw-r--r--nixos/tests/lvm2/default.nix4
-rw-r--r--nixos/tests/lvm2/systemd-stage-1.nix14
-rw-r--r--nixos/tests/lxc/default.nix124
-rw-r--r--nixos/tests/lxd/container.nix6
-rw-r--r--nixos/tests/lxd/nftables.nix4
-rw-r--r--nixos/tests/lxd/preseed.nix4
-rw-r--r--nixos/tests/lxd/ui.nix6
-rw-r--r--nixos/tests/lxd/virtual-machine.nix4
-rw-r--r--nixos/tests/ly.nix44
-rw-r--r--nixos/tests/mailpit.nix35
-rw-r--r--nixos/tests/mate-wayland.nix3
-rw-r--r--nixos/tests/mate.nix4
-rw-r--r--nixos/tests/matomo.nix6
-rw-r--r--nixos/tests/matrix/appservice-irc.nix28
-rw-r--r--nixos/tests/matrix/mjolnir.nix4
-rw-r--r--nixos/tests/mealie.nix2
-rw-r--r--nixos/tests/mediamtx.nix95
-rw-r--r--nixos/tests/mediatomb.nix25
-rw-r--r--nixos/tests/miracle-wm.nix131
-rw-r--r--nixos/tests/misc.nix28
-rw-r--r--nixos/tests/misskey.nix29
-rw-r--r--nixos/tests/monado.nix2
-rw-r--r--nixos/tests/mongodb.nix1
-rw-r--r--nixos/tests/morph-browser.nix2
-rw-r--r--nixos/tests/mosquitto.nix2
-rw-r--r--nixos/tests/mpd.nix1
-rw-r--r--nixos/tests/mpv.nix2
-rw-r--r--nixos/tests/mumble.nix2
-rw-r--r--nixos/tests/munin.nix2
-rw-r--r--nixos/tests/musescore.nix34
-rw-r--r--nixos/tests/music-assistant.nix21
-rw-r--r--nixos/tests/mutable-users.nix26
-rw-r--r--nixos/tests/mxisd.nix21
-rw-r--r--nixos/tests/mycelium/default.nix3
-rw-r--r--nixos/tests/mysql/common.nix2
-rw-r--r--nixos/tests/mysql/mariadb-galera.nix475
-rw-r--r--nixos/tests/mysql/mysql-autobackup.nix78
-rw-r--r--nixos/tests/mysql/mysql.nix2
-rw-r--r--nixos/tests/nat.nix79
-rw-r--r--nixos/tests/netbird.nix4
-rw-r--r--nixos/tests/netdata.nix5
-rw-r--r--nixos/tests/networking-proxy.nix2
-rw-r--r--nixos/tests/networking/networkmanager.nix2
-rw-r--r--nixos/tests/nextcloud/basic.nix103
-rw-r--r--nixos/tests/nextcloud/default.nix121
-rw-r--r--nixos/tests/nextcloud/with-mysql-and-memcached.nix58
-rw-r--r--nixos/tests/nextcloud/with-objectstore.nix96
-rw-r--r--nixos/tests/nextcloud/with-postgresql-and-redis.nix84
-rw-r--r--nixos/tests/nfs/simple.nix2
-rw-r--r--nixos/tests/nix-required-mounts/default.nix58
-rw-r--r--nixos/tests/nix-required-mounts/ensure-path-not-present.nix13
-rw-r--r--nixos/tests/nix-required-mounts/test-require-feature.nix26
-rw-r--r--nixos/tests/nix-required-mounts/test-structured-attrs-empty.nix8
-rw-r--r--nixos/tests/nix-required-mounts/test-structured-attrs.nix18
-rw-r--r--nixos/tests/nix-serve.nix6
-rw-r--r--nixos/tests/nix/misc.nix64
-rw-r--r--nixos/tests/nix/upgrade.nix108
-rw-r--r--nixos/tests/nixos-rebuild-specialisations.nix2
-rw-r--r--nixos/tests/nvidia-container-toolkit.nix149
-rw-r--r--nixos/tests/nvmetcfg.nix2
-rw-r--r--nixos/tests/nzbhydra2.nix2
-rw-r--r--nixos/tests/oci-containers.nix2
-rw-r--r--nixos/tests/odoo.nix2
-rw-r--r--nixos/tests/ollama-cuda.nix17
-rw-r--r--nixos/tests/ollama-rocm.nix17
-rw-r--r--nixos/tests/ollama.nix83
-rw-r--r--nixos/tests/open-webui.nix49
-rw-r--r--nixos/tests/openarena.nix2
-rw-r--r--nixos/tests/openssh.nix28
-rw-r--r--nixos/tests/opentelemetry-collector.nix4
-rw-r--r--nixos/tests/outline.nix2
-rw-r--r--nixos/tests/pam/pam-u2f.nix16
-rw-r--r--nixos/tests/pantheon.nix10
-rw-r--r--nixos/tests/paperless.nix2
-rw-r--r--nixos/tests/patroni.nix2
-rw-r--r--nixos/tests/pgbouncer.nix40
-rw-r--r--nixos/tests/pghero.nix63
-rw-r--r--nixos/tests/pgvecto-rs.nix2
-rw-r--r--nixos/tests/phosh.nix2
-rw-r--r--nixos/tests/pingvin-share.nix26
-rw-r--r--nixos/tests/plasma5.nix1
-rw-r--r--nixos/tests/playwright-python.nix58
-rw-r--r--nixos/tests/pleroma.nix9
-rw-r--r--nixos/tests/plotinus.nix16
-rw-r--r--nixos/tests/podman/default.nix29
-rw-r--r--nixos/tests/postgresql-jit.nix15
-rw-r--r--nixos/tests/postgresql-tls-client-cert.nix141
-rw-r--r--nixos/tests/postgresql-wal-receiver.nix202
-rw-r--r--nixos/tests/postgresql-wal2json.nix60
-rw-r--r--nixos/tests/postgresql/wal2json/LICENSE27
-rw-r--r--nixos/tests/postgresql/wal2json/README.md11
-rw-r--r--nixos/tests/postgresql/wal2json/example2.out74
-rw-r--r--nixos/tests/postgresql/wal2json/example2.sql31
-rw-r--r--nixos/tests/postgresql/wal2json/example3.out12
-rw-r--r--nixos/tests/postgresql/wal2json/example3.sql26
-rw-r--r--nixos/tests/printing.nix2
-rw-r--r--nixos/tests/private-gpt.nix27
-rw-r--r--nixos/tests/prometheus-exporters.nix144
-rw-r--r--nixos/tests/prometheus/alertmanager.nix152
-rw-r--r--nixos/tests/prometheus/config-reload.nix116
-rw-r--r--nixos/tests/prometheus/default.nix13
-rw-r--r--nixos/tests/prometheus/federation.nix213
-rw-r--r--nixos/tests/prometheus/prometheus-pair.nix87
-rw-r--r--nixos/tests/prometheus/pushgateway.nix96
-rw-r--r--nixos/tests/prometheus/remote-write.nix73
-rw-r--r--nixos/tests/proxy.nix2
-rw-r--r--nixos/tests/pt2-clone.nix1
-rw-r--r--nixos/tests/qemu-vm-store.nix71
-rw-r--r--nixos/tests/qtile/add-widget.patch19
-rw-r--r--nixos/tests/qtile/config.nix24
-rw-r--r--nixos/tests/qtile/default.nix (renamed from nixos/tests/qtile.nix)21
-rw-r--r--nixos/tests/quake3.nix8
-rw-r--r--nixos/tests/quickwit.nix103
-rw-r--r--nixos/tests/rabbitmq.nix2
-rw-r--r--nixos/tests/radicle.nix207
-rw-r--r--nixos/tests/rathole.nix89
-rw-r--r--nixos/tests/realm.nix39
-rw-r--r--nixos/tests/redlib.nix4
-rw-r--r--nixos/tests/renovate.nix69
-rw-r--r--nixos/tests/restart-by-activation-script.nix2
-rw-r--r--nixos/tests/restic.nix29
-rw-r--r--nixos/tests/rke2/default.nix13
-rw-r--r--nixos/tests/rke2/multi-node.nix176
-rw-r--r--nixos/tests/rke2/single-node.nix75
-rw-r--r--nixos/tests/rosenpass.nix17
-rw-r--r--nixos/tests/rtorrent.nix25
-rw-r--r--nixos/tests/samba.nix69
-rw-r--r--nixos/tests/scion/freestanding-deployment/default.nix52
-rw-r--r--nixos/tests/searx.nix182
-rw-r--r--nixos/tests/seatd.nix2
-rw-r--r--nixos/tests/sfxr-qt.nix1
-rw-r--r--nixos/tests/shattered-pixel-dungeon.nix1
-rw-r--r--nixos/tests/shiori.nix135
-rw-r--r--nixos/tests/simple.nix2
-rw-r--r--nixos/tests/slimserver.nix3
-rw-r--r--nixos/tests/smokeping.nix14
-rw-r--r--nixos/tests/snapper.nix5
-rw-r--r--nixos/tests/sogo.nix2
-rw-r--r--nixos/tests/soju.nix2
-rw-r--r--nixos/tests/sourcehut/nodes/common.nix46
-rw-r--r--nixos/tests/stalwart-mail.nix20
-rw-r--r--nixos/tests/step-ca.nix25
-rw-r--r--nixos/tests/stub-ld.nix4
-rw-r--r--nixos/tests/swayfx.nix2
-rw-r--r--nixos/tests/switch-test.nix5
-rw-r--r--nixos/tests/sx.nix63
-rw-r--r--nixos/tests/systemd-analyze.nix1
-rw-r--r--nixos/tests/systemd-boot-ovmf-broken-fat-driver.patch25
-rw-r--r--nixos/tests/systemd-boot.nix95
-rw-r--r--nixos/tests/systemd-confinement.nix184
-rw-r--r--nixos/tests/systemd-confinement/checkperms.py187
-rw-r--r--nixos/tests/systemd-confinement/default.nix274
-rw-r--r--nixos/tests/systemd-homed.nix4
-rw-r--r--nixos/tests/systemd-initrd-luks-fido2.nix3
-rw-r--r--nixos/tests/systemd-initrd-luks-unl0kr.nix4
-rw-r--r--nixos/tests/systemd-initrd-modprobe.nix12
-rw-r--r--nixos/tests/systemd-machinectl.nix26
-rw-r--r--nixos/tests/systemd-networkd-dhcpserver-static-leases.nix36
-rw-r--r--nixos/tests/systemd-networkd-dhcpserver.nix6
-rw-r--r--nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix8
-rw-r--r--nixos/tests/systemd-networkd-vrf.nix19
-rw-r--r--nixos/tests/systemd-networkd.nix24
-rw-r--r--nixos/tests/systemd-resolved.nix75
-rw-r--r--nixos/tests/systemd-sysusers-immutable.nix57
-rw-r--r--nixos/tests/systemd-sysusers-mutable.nix39
-rw-r--r--nixos/tests/systemd.nix3
-rw-r--r--nixos/tests/tandoor-recipes-script-name.nix95
-rw-r--r--nixos/tests/taskchampion-sync-server.nix48
-rw-r--r--nixos/tests/taskserver.nix6
-rw-r--r--nixos/tests/tayga.nix23
-rw-r--r--nixos/tests/telegraf.nix7
-rw-r--r--nixos/tests/teleport.nix2
-rw-r--r--nixos/tests/teleports.nix48
-rw-r--r--nixos/tests/terminal-emulators.nix7
-rw-r--r--nixos/tests/thanos.nix (renamed from nixos/tests/prometheus.nix)58
-rw-r--r--nixos/tests/tigervnc.nix10
-rw-r--r--nixos/tests/tika.nix21
-rw-r--r--nixos/tests/timezone.nix2
-rw-r--r--nixos/tests/tinywl.nix2
-rw-r--r--nixos/tests/tomcat.nix9
-rw-r--r--nixos/tests/traefik.nix2
-rw-r--r--nixos/tests/turbovnc-headless-server.nix13
-rw-r--r--nixos/tests/turn-rs.nix65
-rw-r--r--nixos/tests/udisks2.nix3
-rw-r--r--nixos/tests/user-activation-scripts.nix1
-rw-r--r--nixos/tests/user-home-mode.nix8
-rw-r--r--nixos/tests/userborn-immutable-etc.nix70
-rw-r--r--nixos/tests/userborn-immutable-users.nix75
-rw-r--r--nixos/tests/userborn-mutable-etc.nix70
-rw-r--r--nixos/tests/userborn-mutable-users.nix76
-rw-r--r--nixos/tests/userborn.nix135
-rw-r--r--nixos/tests/vaultwarden.nix274
-rw-r--r--nixos/tests/vector.nix37
-rw-r--r--nixos/tests/vector/api.nix39
-rw-r--r--nixos/tests/vector/default.nix12
-rw-r--r--nixos/tests/vector/dnstap.nix118
-rw-r--r--nixos/tests/vector/file-sink.nix49
-rw-r--r--nixos/tests/vector/nginx-clickhouse.nix168
-rw-r--r--nixos/tests/vector/syslog-quickwit.nix156
-rw-r--r--nixos/tests/virtualbox.nix1
-rw-r--r--nixos/tests/vscode-remote-ssh.nix12
-rw-r--r--nixos/tests/web-apps/mastodon/default.nix6
-rw-r--r--nixos/tests/web-apps/mastodon/remote-databases.nix34
-rw-r--r--nixos/tests/web-apps/mastodon/standard.nix3
-rw-r--r--nixos/tests/web-apps/nextjs-ollama-llm-ui.nix22
-rw-r--r--nixos/tests/web-apps/pixelfed/standard.nix7
-rw-r--r--nixos/tests/web-apps/pretalx.nix9
-rw-r--r--nixos/tests/web-apps/pretix.nix1
-rw-r--r--nixos/tests/web-apps/weblate.nix104
-rw-r--r--nixos/tests/web-servers/stargazer.nix42
-rw-r--r--nixos/tests/wg-access-server.nix28
-rw-r--r--nixos/tests/wireguard/default.nix3
-rw-r--r--nixos/tests/wordpress.nix4
-rw-r--r--nixos/tests/wpa_supplicant.nix262
-rw-r--r--nixos/tests/wstunnel.nix93
-rw-r--r--nixos/tests/xfce.nix3
-rw-r--r--nixos/tests/ydotool.nix184
-rw-r--r--nixos/tests/your_spotify.nix33
-rw-r--r--nixos/tests/zfs.nix18
-rw-r--r--nixos/tests/zigbee2mqtt.nix2
393 files changed, 13689 insertions, 3781 deletions
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index d63a77fcdd23c..a4f00be887be2 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -99,7 +99,14 @@
               serverAliases = [ "${server}-wildcard-alias.example.test" ];
               useACMEHost = "example.test";
             };
-          };
+          } // (lib.optionalAttrs (server == "nginx") {
+            # The nginx module supports using a different key than the hostname
+            different-key = vhostBaseData // {
+              serverName = "${server}-different-key.example.test";
+              serverAliases = [ "${server}-different-key-alias.example.test" ];
+              enableACME = true;
+            };
+          });
         };
 
         # Used to determine if service reload was triggered
@@ -117,7 +124,7 @@
     };
 
     # Test that server reloads when an alias is removed (and subsequently test removal works in acme)
-    "${server}-remove-alias".configuration = { nodes, config, ... }: baseConfig {
+    "${server}_remove_alias".configuration = { nodes, config, ... }: baseConfig {
       inherit nodes config;
       specialConfig = {
         # Remove an alias, but create a standalone vhost in its place for testing.
@@ -133,7 +140,7 @@
     };
 
     # Test that the server reloads when only the acme configuration is changed.
-    "${server}-change-acme-conf".configuration = { nodes, config, ... }: baseConfig {
+    "${server}_change_acme_conf".configuration = { nodes, config, ... }: baseConfig {
       inherit nodes config;
       specialConfig = {
         security.acme.certs."${server}-http.example.test" = {
@@ -193,6 +200,14 @@ in {
         # Tests HTTP-01 verification using Lego's built-in web server
         http01lego.configuration = simpleConfig;
 
+        # account hash generation with default server from <= 23.11
+        http01lego_legacyAccountHash.configuration = lib.mkMerge [
+          simpleConfig
+          {
+            security.acme.defaults.server = lib.mkForce null;
+          }
+        ];
+
         renew.configuration = lib.mkMerge [
           simpleConfig
           {
@@ -236,7 +251,7 @@ in {
         ];
 
         # Test OCSP Stapling
-        ocsp-stapling.configuration = { ... }: lib.mkMerge [
+        ocsp_stapling.configuration = { ... }: lib.mkMerge [
           webserverBasicConfig
           {
             security.acme.certs."a.example.test".ocspMustStaple = true;
@@ -251,7 +266,7 @@ in {
 
         # Validate service relationships by adding a slow start service to nginx' wants.
         # Reproducer for https://github.com/NixOS/nixpkgs/issues/81842
-        slow-startup.configuration = { ... }: lib.mkMerge [
+        slow_startup.configuration = { ... }: lib.mkMerge [
           webserverBasicConfig
           {
             systemd.services.my-slow-service = {
@@ -269,7 +284,7 @@ in {
           }
         ];
 
-        concurrency-limit.configuration = {pkgs, ...}: lib.mkMerge [
+        concurrency_limit.configuration = {pkgs, ...}: lib.mkMerge [
           webserverBasicConfig {
             security.acme.maxConcurrentRenewals = 1;
 
@@ -302,7 +317,7 @@ in {
 
         # Test lego internal server (listenHTTP option)
         # Also tests useRoot option
-        lego-server.configuration = { ... }: {
+        lego_server.configuration = { ... }: {
           security.acme.useRoot = true;
           security.acme.certs."lego.example.test" = {
             listenHTTP = ":80";
@@ -343,7 +358,7 @@ in {
         caddy.configuration = baseCaddyConfig;
 
         # Test that the server reloads when only the acme configuration is changed.
-        "caddy-change-acme-conf".configuration = { nodes, config, ... }: lib.mkMerge [
+        "caddy_change_acme_conf".configuration = { nodes, config, ... }: lib.mkMerge [
           (baseCaddyConfig {
             inherit nodes config;
           })
@@ -385,8 +400,6 @@ in {
   testScript = { nodes, ... }:
     let
       caDomain = nodes.acme.test-support.acme.caDomain;
-      newServerSystem = nodes.webserver.config.system.build.toplevel;
-      switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test";
     in
     # Note, wait_for_unit does not work for oneshot services that do not have RemainAfterExit=true,
     # this is because a oneshot goes from inactive => activating => inactive, and never
@@ -419,7 +432,7 @@ in {
       backoff = BackoffTracker()
 
 
-      def switch_to(node, name):
+      def switch_to(node, name, allow_fail=False):
           # On first switch, this will create a symlink to the current system so that we can
           # quickly switch between derivations
           root_specs = "/tmp/specialisation"
@@ -433,9 +446,14 @@ in {
           if rc > 0:
               switcher_path = f"/tmp/specialisation/{name}/bin/switch-to-configuration"
 
-          node.succeed(
-              f"{switcher_path} test"
-          )
+          if not allow_fail:
+            node.succeed(
+                f"{switcher_path} test"
+            )
+          else:
+            node.execute(
+                f"{switcher_path} test"
+            )
 
 
       # Ensures the issuer of our cert matches the chain
@@ -538,6 +556,12 @@ in {
           check_fullchain(webserver, "http.example.test")
           check_issuer(webserver, "http.example.test", "pebble")
 
+      # Perform account hash test
+      with subtest("Assert that account hash didn't unexpectedly change"):
+          hash = webserver.succeed("ls /var/lib/acme/.lego/accounts/")
+          print("Account hash: " + hash)
+          assert hash.strip() == "d590213ed52603e9128d"
+
       # Perform renewal test
       with subtest("Can renew certificates when they expire"):
           hash = webserver.succeed("sha256sum /var/lib/acme/http.example.test/cert.pem")
@@ -605,12 +629,12 @@ in {
           webserver.succeed("systemctl start nginx-config-reload.service")
 
       with subtest("Correctly implements OCSP stapling"):
-          switch_to(webserver, "ocsp-stapling")
+          switch_to(webserver, "ocsp_stapling")
           webserver.wait_for_unit("acme-finished-a.example.test.target")
           check_stapling(client, "a.example.test")
 
       with subtest("Can request certificate with HTTP-01 using lego's internal web server"):
-          switch_to(webserver, "lego-server")
+          switch_to(webserver, "lego_server")
           webserver.wait_for_unit("acme-finished-lego.example.test.target")
           webserver.wait_for_unit("nginx.service")
           webserver.succeed("echo HENLO && systemctl cat nginx.service")
@@ -620,14 +644,14 @@ in {
 
       with subtest("Can request certificate with HTTP-01 when nginx startup is delayed"):
           webserver.execute("systemctl stop nginx")
-          switch_to(webserver, "slow-startup")
+          switch_to(webserver, "slow_startup")
           webserver.wait_for_unit("acme-finished-slow.example.test.target")
           check_issuer(webserver, "slow.example.test", "pebble")
           webserver.wait_for_unit("nginx.service")
           check_connection(client, "slow.example.test")
 
       with subtest("Can limit concurrency of running renewals"):
-          switch_to(webserver, "concurrency-limit")
+          switch_to(webserver, "concurrency_limit")
           webserver.wait_for_unit("acme-finished-f.example.test.target")
           webserver.wait_for_unit("acme-finished-g.example.test.target")
           webserver.wait_for_unit("acme-finished-h.example.test.target")
@@ -645,7 +669,7 @@ in {
           check_connection(client, "a.example.test")
 
       with subtest("security.acme changes reflect on caddy"):
-          switch_to(webserver, "caddy-change-acme-conf")
+          switch_to(webserver, "caddy_change_acme_conf")
           webserver.wait_for_unit("acme-finished-example.test.target")
           webserver.wait_for_unit("caddy.service")
           # FIXME reloading caddy is not sufficient to load new certs.
@@ -653,20 +677,20 @@ in {
           webserver.succeed("systemctl restart caddy.service")
           check_connection_key_bits(client, "a.example.test", "384")
 
-      domains = ["http", "dns", "wildcard"]
-      for server, logsrc in [
-          ("nginx", "journalctl -n 30 -u nginx.service"),
-          ("httpd", "tail -n 30 /var/log/httpd/*.log"),
+      common_domains = ["http", "dns", "wildcard"]
+      for server, logsrc, domains in [
+          ("nginx", "journalctl -n 30 -u nginx.service", common_domains + ["different-key"]),
+          ("httpd", "tail -n 30 /var/log/httpd/*.log", common_domains),
       ]:
           wait_for_server = lambda: webserver.wait_for_unit(f"{server}.service")
           with subtest(f"Works with {server}"):
               try:
                   switch_to(webserver, server)
-                  # Skip wildcard domain for this check ([:-1])
-                  for domain in domains[:-1]:
-                      webserver.wait_for_unit(
-                          f"acme-finished-{server}-{domain}.example.test.target"
-                      )
+                  for domain in domains:
+                      if domain != "wildcard":
+                          webserver.wait_for_unit(
+                              f"acme-finished-{server}-{domain}.example.test.target"
+                          )
               except Exception as err:
                   _, output = webserver.execute(
                       f"{logsrc} && ls -al /var/lib/acme/acme-challenge"
@@ -676,8 +700,9 @@ in {
 
               wait_for_server()
 
-              for domain in domains[:-1]:
-                  check_issuer(webserver, f"{server}-{domain}.example.test", "pebble")
+              for domain in domains:
+                  if domain != "wildcard":
+                      check_issuer(webserver, f"{server}-{domain}.example.test", "pebble")
               for domain in domains:
                   check_connection(client, f"{server}-{domain}.example.test")
                   check_connection(client, f"{server}-{domain}-alias.example.test")
@@ -696,7 +721,7 @@ in {
 
           with subtest("Can remove an alias from a domain + cert is updated"):
               test_alias = f"{server}-{domains[0]}-alias.example.test"
-              switch_to(webserver, f"{server}-remove-alias")
+              switch_to(webserver, f"{server}_remove_alias")
               webserver.wait_for_unit(f"acme-finished-{test_domain}.target")
               wait_for_server()
               check_connection(client, test_domain)
@@ -711,9 +736,27 @@ in {
               # Switch back to normal server config first, reset everything.
               switch_to(webserver, server)
               wait_for_server()
-              switch_to(webserver, f"{server}-change-acme-conf")
+              switch_to(webserver, f"{server}_change_acme_conf")
               webserver.wait_for_unit(f"acme-finished-{test_domain}.target")
               wait_for_server()
               check_connection_key_bits(client, test_domain, "384")
+
+      # Perform http-01 w/ lego test again, but using the pre-24.05 account hashing
+      # (see https://github.com/NixOS/nixpkgs/pull/317257)
+      with subtest("Check account hashing compatibility with pre-24.05 settings"):
+          webserver.succeed("rm -rf /var/lib/acme/.lego/accounts/*")
+          switch_to(webserver, "http01lego_legacyAccountHash", allow_fail=True)
+          # unit is failed, but in a way that this throws no exception:
+          try:
+            webserver.wait_for_unit("acme-finished-http.example.test.target")
+          except Exception:
+            # The unit is allowed – or even expected – to fail due to not being able to
+            # reach the actual letsencrypt server. We only use it for serialising the
+            # test execution, such that the account check is done after the service run
+            # involving the account creation has been executed at least once.
+            pass
+          hash = webserver.succeed("ls /var/lib/acme/.lego/accounts/")
+          print("Account hash: " + hash)
+          assert hash.strip() == "1ccf607d9aa280e9af00"
     '';
 }
diff --git a/nixos/tests/activation/etc-overlay-immutable.nix b/nixos/tests/activation/etc-overlay-immutable.nix
index f347f9cf8efe2..6d56db43f0b25 100644
--- a/nixos/tests/activation/etc-overlay-immutable.nix
+++ b/nixos/tests/activation/etc-overlay-immutable.nix
@@ -13,6 +13,12 @@
     users.mutableUsers = false;
     boot.initrd.systemd.enable = true;
     boot.kernelPackages = pkgs.linuxPackages_latest;
+    time.timeZone = "Utc";
+
+    environment.etc = {
+      "mountpoint/.keep".text = "keep";
+      "filemount".text = "keep";
+    };
 
     specialisation.new-generation.configuration = {
       environment.etc."newgen".text = "newgen";
@@ -23,14 +29,41 @@
     with subtest("/etc is mounted as an overlay"):
       machine.succeed("findmnt --kernel --type overlay /etc")
 
+    with subtest("direct symlinks point to the target without indirection"):
+      assert machine.succeed("readlink -n /etc/localtime") == "/etc/zoneinfo/Utc"
+
+    with subtest("/etc/mtab points to the right file"):
+      assert "/proc/mounts" == machine.succeed("readlink --no-newline /etc/mtab")
+
+    with subtest("Correct mode on the source password files"):
+      assert machine.succeed("stat -c '%a' /var/lib/nixos/etc/passwd") == "644\n"
+      assert machine.succeed("stat -c '%a' /var/lib/nixos/etc/group") == "644\n"
+      assert machine.succeed("stat -c '%a' /var/lib/nixos/etc/shadow") == "0\n"
+      assert machine.succeed("stat -c '%a' /var/lib/nixos/etc/gshadow") == "0\n"
+
+    with subtest("Password files are symlinks to /var/lib/nixos/etc"):
+      assert machine.succeed("readlink -f /etc/passwd") == "/var/lib/nixos/etc/passwd\n"
+      assert machine.succeed("readlink -f /etc/group") == "/var/lib/nixos/etc/group\n"
+      assert machine.succeed("readlink -f /etc/shadow") == "/var/lib/nixos/etc/shadow\n"
+      assert machine.succeed("readlink -f /etc/gshadow") == "/var/lib/nixos/etc/gshadow\n"
+
     with subtest("switching to the same generation"):
       machine.succeed("/run/current-system/bin/switch-to-configuration test")
 
     with subtest("switching to a new generation"):
       machine.fail("stat /etc/newgen")
 
+      machine.succeed("mount -t tmpfs tmpfs /etc/mountpoint")
+      machine.succeed("touch /etc/mountpoint/extra-file")
+      machine.succeed("mount --bind /dev/null /etc/filemount")
+
       machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
 
       assert machine.succeed("cat /etc/newgen") == "newgen"
+
+      print(machine.succeed("findmnt /etc/mountpoint"))
+      print(machine.succeed("ls /etc/mountpoint"))
+      print(machine.succeed("stat /etc/mountpoint/extra-file"))
+      print(machine.succeed("findmnt /etc/filemount"))
   '';
 }
diff --git a/nixos/tests/activation/etc-overlay-mutable.nix b/nixos/tests/activation/etc-overlay-mutable.nix
index 087c06408a715..8561ff7fd230d 100644
--- a/nixos/tests/activation/etc-overlay-mutable.nix
+++ b/nixos/tests/activation/etc-overlay-mutable.nix
@@ -28,9 +28,22 @@
       machine.fail("stat /etc/newgen")
       machine.succeed("echo -n 'mutable' > /etc/mutable")
 
+      # Directory
+      machine.succeed("mkdir /etc/mountpoint")
+      machine.succeed("mount -t tmpfs tmpfs /etc/mountpoint")
+      machine.succeed("touch /etc/mountpoint/extra-file")
+
+      # File
+      machine.succeed("touch /etc/filemount")
+      machine.succeed("mount --bind /dev/null /etc/filemount")
+
       machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
 
       assert machine.succeed("cat /etc/newgen") == "newgen"
       assert machine.succeed("cat /etc/mutable") == "mutable"
+
+      print(machine.succeed("findmnt /etc/mountpoint"))
+      print(machine.succeed("stat /etc/mountpoint/extra-file"))
+      print(machine.succeed("findmnt /etc/filemount"))
   '';
 }
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index d4da32c44990f..a625cd92e236d 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -115,6 +115,7 @@ in {
   akkoma = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./akkoma.nix {};
   akkoma-confined = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./akkoma.nix { confined = true; };
   alice-lg = handleTest ./alice-lg.nix {};
+  alloy = handleTest ./alloy.nix {};
   allTerminfo = handleTest ./all-terminfo.nix {};
   alps = handleTest ./alps.nix {};
   amazon-init-shell = handleTest ./amazon-init-shell.nix {};
@@ -127,9 +128,12 @@ in {
   apcupsd = handleTest ./apcupsd.nix {};
   apfs = runTest ./apfs.nix;
   appliance-repart-image = runTest ./appliance-repart-image.nix;
+  appliance-repart-image-verity-store = runTest ./appliance-repart-image-verity-store.nix;
   apparmor = handleTest ./apparmor.nix {};
   archi = handleTest ./archi.nix {};
+  aria2 = handleTest ./aria2.nix {};
   armagetronad = handleTest ./armagetronad.nix {};
+  artalk = handleTest ./artalk.nix {};
   atd = handleTest ./atd.nix {};
   atop = handleTest ./atop.nix {};
   atuin = handleTest ./atuin.nix {};
@@ -138,12 +142,13 @@ in {
   authelia = handleTest ./authelia.nix {};
   avahi = handleTest ./avahi.nix {};
   avahi-with-resolved = handleTest ./avahi.nix { networkd = true; };
-  ayatana-indicators = handleTest ./ayatana-indicators.nix {};
+  ayatana-indicators = runTest ./ayatana-indicators.nix;
   babeld = handleTest ./babeld.nix {};
   bazarr = handleTest ./bazarr.nix {};
   bcachefs = handleTestOn ["x86_64-linux" "aarch64-linux"] ./bcachefs.nix {};
   beanstalkd = handleTest ./beanstalkd.nix {};
   bees = handleTest ./bees.nix {};
+  benchexec = handleTest ./benchexec.nix {};
   binary-cache = handleTest ./binary-cache.nix {};
   bind = handleTest ./bind.nix {};
   bird = handleTest ./bird.nix {};
@@ -156,6 +161,7 @@ in {
   bootspec = handleTestOn ["x86_64-linux"] ./bootspec.nix {};
   boot-stage1 = handleTest ./boot-stage1.nix {};
   borgbackup = handleTest ./borgbackup.nix {};
+  borgmatic = handleTest ./borgmatic.nix {};
   botamusique = handleTest ./botamusique.nix {};
   bpf = handleTestOn ["x86_64-linux" "aarch64-linux"] ./bpf.nix {};
   bpftune = handleTest ./bpftune.nix {};
@@ -183,10 +189,12 @@ in {
   ceph-multi-node = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-multi-node.nix {};
   ceph-single-node = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node.nix {};
   ceph-single-node-bluestore = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node-bluestore.nix {};
+  ceph-single-node-bluestore-dmcrypt = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node-bluestore-dmcrypt.nix {};
   certmgr = handleTest ./certmgr.nix {};
   cfssl = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cfssl.nix {};
   cgit = handleTest ./cgit.nix {};
   charliecloud = handleTest ./charliecloud.nix {};
+  chromadb = runTest ./chromadb.nix;
   chromium = (handleTestOn ["aarch64-linux" "x86_64-linux"] ./chromium.nix {}).stable or {};
   chrony = handleTestOn ["aarch64-linux" "x86_64-linux"] ./chrony.nix {};
   chrony-ptp = handleTestOn ["aarch64-linux" "x86_64-linux"] ./chrony-ptp.nix {};
@@ -204,6 +212,7 @@ in {
   code-server = handleTest ./code-server.nix {};
   coder = handleTest ./coder.nix {};
   collectd = handleTest ./collectd.nix {};
+  commafeed = handleTest ./commafeed.nix {};
   connman = handleTest ./connman.nix {};
   consul = handleTest ./consul.nix {};
   consul-template = handleTest ./consul-template.nix {};
@@ -230,31 +239,36 @@ in {
   couchdb = handleTest ./couchdb.nix {};
   crabfit = handleTest ./crabfit.nix {};
   cri-o = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cri-o.nix {};
+  cryptpad = runTest ./cryptpad.nix;
   cups-pdf = handleTest ./cups-pdf.nix {};
   curl-impersonate = handleTest ./curl-impersonate.nix {};
   custom-ca = handleTest ./custom-ca.nix {};
   croc = handleTest ./croc.nix {};
   darling = handleTest ./darling.nix {};
+  darling-dmg = runTest ./darling-dmg.nix;
   dae = handleTest ./dae.nix {};
   davis = handleTest ./davis.nix {};
   db-rest = handleTest ./db-rest.nix {};
   dconf = handleTest ./dconf.nix {};
+  ddns-updater = handleTest ./ddns-updater.nix {};
   deconz = handleTest ./deconz.nix {};
   deepin = handleTest ./deepin.nix {};
   deluge = handleTest ./deluge.nix {};
   dendrite = handleTest ./matrix/dendrite.nix {};
+  dependency-track = handleTest ./dependency-track.nix {};
+  devpi-server = handleTest ./devpi-server.nix {};
   dex-oidc = handleTest ./dex-oidc.nix {};
   dhparams = handleTest ./dhparams.nix {};
   disable-installer-tools = handleTest ./disable-installer-tools.nix {};
   discourse = handleTest ./discourse.nix {};
   dnscrypt-proxy2 = handleTestOn ["x86_64-linux"] ./dnscrypt-proxy2.nix {};
-  dnscrypt-wrapper = runTestOn ["x86_64-linux"] ./dnscrypt-wrapper;
   dnsdist = import ./dnsdist.nix { inherit pkgs runTest; };
   doas = handleTest ./doas.nix {};
   docker = handleTestOn ["aarch64-linux" "x86_64-linux"] ./docker.nix {};
   docker-rootless = handleTestOn ["aarch64-linux" "x86_64-linux"] ./docker-rootless.nix {};
   docker-registry = handleTest ./docker-registry.nix {};
   docker-tools = handleTestOn ["x86_64-linux"] ./docker-tools.nix {};
+  docker-tools-nix-shell = runTest ./docker-tools-nix-shell.nix;
   docker-tools-cross = handleTestOn ["x86_64-linux" "aarch64-linux"] ./docker-tools-cross.nix {};
   docker-tools-overlay = handleTestOn ["x86_64-linux"] ./docker-tools-overlay.nix {};
   documize = handleTest ./documize.nix {};
@@ -266,6 +280,7 @@ in {
   dovecot = handleTest ./dovecot.nix {};
   drawterm = discoverTests (import ./drawterm.nix);
   drbd = handleTest ./drbd.nix {};
+  druid = handleTestOn [ "x86_64-linux" ] ./druid {};
   dublin-traceroute = handleTest ./dublin-traceroute.nix {};
   earlyoom = handleTestOn ["x86_64-linux"] ./earlyoom.nix {};
   early-mount-options = handleTest ./early-mount-options.nix {};
@@ -274,6 +289,7 @@ in {
   ecryptfs = handleTest ./ecryptfs.nix {};
   fscrypt = handleTest ./fscrypt.nix {};
   fastnetmon-advanced = runTest ./fastnetmon-advanced.nix;
+  eintopf = handleTest ./eintopf.nix {};
   ejabberd = handleTest ./xmpp/ejabberd.nix {};
   elk = handleTestOn ["x86_64-linux"] ./elk.nix {};
   emacs-daemon = handleTest ./emacs-daemon.nix {};
@@ -290,6 +306,7 @@ in {
   esphome = handleTest ./esphome.nix {};
   etc = pkgs.callPackage ../modules/system/etc/test.nix { inherit evalMinimalConfig; };
   activation = pkgs.callPackage ../modules/system/activation/test.nix { };
+  activation-lib = pkgs.callPackage ../modules/system/activation/lib/test.nix { };
   activation-var = runTest ./activation/var.nix;
   activation-nix-channel = runTest ./activation/nix-channel.nix;
   activation-etc-overlay-mutable = runTest ./activation/etc-overlay-mutable.nix;
@@ -308,27 +325,34 @@ in {
   fenics = handleTest ./fenics.nix {};
   ferm = handleTest ./ferm.nix {};
   ferretdb = handleTest ./ferretdb.nix {};
+  filesender = handleTest ./filesender.nix {};
   filesystems-overlayfs = runTest ./filesystems-overlayfs.nix;
   firefly-iii = handleTest ./firefly-iii.nix {};
+  firefly-iii-data-importer = handleTest ./firefly-iii-data-importer.nix {};
   firefox = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox; };
   firefox-beta = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-beta; };
   firefox-devedition = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-devedition; };
   firefox-esr    = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr; }; # used in `tested` job
   firefox-esr-115 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-115; };
+  firefox-esr-128 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-128; };
   firefoxpwa = handleTest ./firefoxpwa.nix {};
   firejail = handleTest ./firejail.nix {};
   firewall = handleTest ./firewall.nix { nftables = false; };
   firewall-nftables = handleTest ./firewall.nix { nftables = true; };
   fish = handleTest ./fish.nix {};
   flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
+  flaresolverr = handleTest ./flaresolverr.nix {};
+  flood = handleTest ./flood.nix {};
   floorp = handleTest ./firefox.nix { firefoxPackage = pkgs.floorp; };
   fluentd = handleTest ./fluentd.nix {};
   fluidd = handleTest ./fluidd.nix {};
   fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {};
-  forgejo = handleTest ./forgejo.nix { };
+  forgejo = handleTest ./forgejo.nix { forgejoPackage = pkgs.forgejo; };
+  forgejo-lts = handleTest ./forgejo.nix { forgejoPackage = pkgs.forgejo-lts; };
   freenet = handleTest ./freenet.nix {};
   freeswitch = handleTest ./freeswitch.nix {};
   freetube = discoverTests (import ./freetube.nix);
+  freshrss-extensions = handleTest ./freshrss-extensions.nix {};
   freshrss-sqlite = handleTest ./freshrss-sqlite.nix {};
   freshrss-pgsql = handleTest ./freshrss-pgsql.nix {};
   freshrss-http-auth = handleTest ./freshrss-http-auth.nix {};
@@ -341,6 +365,7 @@ in {
   ft2-clone = handleTest ./ft2-clone.nix {};
   legit = handleTest ./legit.nix {};
   mimir = handleTest ./mimir.nix {};
+  gancio = handleTest ./gancio.nix {};
   garage = handleTest ./garage {};
   gemstash = handleTest ./gemstash.nix {};
   geoserver = runTest ./geoserver.nix;
@@ -353,6 +378,7 @@ in {
   gitlab = runTest ./gitlab.nix;
   gitolite = handleTest ./gitolite.nix {};
   gitolite-fcgiwrap = handleTest ./gitolite-fcgiwrap.nix {};
+  glance = runTest ./glance.nix;
   glusterfs = handleTest ./glusterfs.nix {};
   gnome = handleTest ./gnome.nix {};
   gnome-extensions = handleTest ./gnome-extensions.nix {};
@@ -360,6 +386,7 @@ in {
   gnome-xorg = handleTest ./gnome-xorg.nix {};
   gns3-server = handleTest ./gns3-server.nix {};
   gnupg = handleTest ./gnupg.nix {};
+  goatcounter = handleTest ./goatcounter.nix {};
   go-neb = handleTest ./go-neb.nix {};
   gobgpd = handleTest ./gobgpd.nix {};
   gocd-agent = handleTest ./gocd-agent.nix {};
@@ -368,12 +395,14 @@ in {
   gonic = handleTest ./gonic.nix {};
   google-oslogin = handleTest ./google-oslogin {};
   goss = handleTest ./goss.nix {};
+  gotenberg = handleTest ./gotenberg.nix {};
   gotify-server = handleTest ./gotify-server.nix {};
   gotosocial = runTest ./web-apps/gotosocial.nix;
   grafana = handleTest ./grafana {};
   grafana-agent = handleTest ./grafana-agent.nix {};
   graphite = handleTest ./graphite.nix {};
   graylog = handleTest ./graylog.nix {};
+  greetd-no-shadow = handleTest ./greetd-no-shadow.nix {};
   grocy = handleTest ./grocy.nix {};
   grow-partition = runTest ./grow-partition.nix;
   grub = handleTest ./grub.nix {};
@@ -381,7 +410,7 @@ in {
   guix = handleTest ./guix {};
   gvisor = handleTest ./gvisor.nix {};
   hadoop = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop; };
-  hadoop_3_2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop_3_2; };
+  hadoop_3_3 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop_3_3; };
   hadoop2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop2; };
   haka = handleTest ./haka.nix {};
   haste-server = handleTest ./haste-server.nix {};
@@ -391,11 +420,13 @@ in {
   headscale = handleTest ./headscale.nix {};
   healthchecks = handleTest ./web-apps/healthchecks.nix {};
   hbase2 = handleTest ./hbase.nix { package=pkgs.hbase2; };
+  hbase_2_5 = handleTest ./hbase.nix { package=pkgs.hbase_2_5; };
   hbase_2_4 = handleTest ./hbase.nix { package=pkgs.hbase_2_4; };
   hbase3 = handleTest ./hbase.nix { package=pkgs.hbase3; };
   hddfancontrol = handleTest ./hddfancontrol.nix {};
   hedgedoc = handleTest ./hedgedoc.nix {};
   herbstluftwm = handleTest ./herbstluftwm.nix {};
+  homebox = handleTest ./homebox.nix {};
   homepage-dashboard = handleTest ./homepage-dashboard.nix {};
   honk = runTest ./honk.nix;
   installed-tests = pkgs.recurseIntoAttrs (handleTest ./installed-tests {});
@@ -405,6 +436,7 @@ in {
   pyload = handleTest ./pyload.nix {};
   oci-containers = handleTestOn ["aarch64-linux" "x86_64-linux"] ./oci-containers.nix {};
   odoo = handleTest ./odoo.nix {};
+  odoo16 = handleTest ./odoo.nix { package = pkgs.odoo16; };
   odoo15 = handleTest ./odoo.nix { package = pkgs.odoo15; };
   # 9pnet_virtio used to mount /nix partition doesn't support
   # hibernation. This test happens to work on x86_64-linux but
@@ -413,7 +445,6 @@ in {
   hibernate-systemd-stage-1 = handleTestOn ["x86_64-linux"] ./hibernate.nix { systemdStage1 = true; };
   hitch = handleTest ./hitch {};
   hledger-web = handleTest ./hledger-web.nix {};
-  hocker-fetchdocker = handleTest ./hocker-fetchdocker {};
   hockeypuck = handleTest ./hockeypuck.nix { };
   home-assistant = handleTest ./home-assistant.nix {};
   hostname = handleTest ./hostname.nix {};
@@ -422,9 +453,11 @@ in {
   hydra = handleTest ./hydra {};
   i3wm = handleTest ./i3wm.nix {};
   icingaweb2 = handleTest ./icingaweb2.nix {};
+  ifm = handleTest ./ifm.nix {};
   iftop = handleTest ./iftop.nix {};
   incron = handleTest ./incron.nix {};
-  incus = pkgs.recurseIntoAttrs (handleTest ./incus { inherit handleTestOn; });
+  incus = pkgs.recurseIntoAttrs (handleTest ./incus { inherit handleTestOn; inherit (pkgs) incus; });
+  incus-lts = pkgs.recurseIntoAttrs (handleTest ./incus { inherit handleTestOn; });
   influxdb = handleTest ./influxdb.nix {};
   influxdb2 = handleTest ./influxdb2.nix {};
   initrd-network-openvpn = handleTestOn [ "x86_64-linux" "i686-linux" ] ./initrd-network-openvpn {};
@@ -456,6 +489,7 @@ in {
   k3s = handleTest ./k3s {};
   kafka = handleTest ./kafka.nix {};
   kanidm = handleTest ./kanidm.nix {};
+  kanidm-provisioning = handleTest ./kanidm-provisioning.nix {};
   karma = handleTest ./karma.nix {};
   kavita = handleTest ./kavita.nix {};
   kbd-setfont-decompress = handleTest ./kbd-setfont-decompress.nix {};
@@ -486,10 +520,10 @@ in {
   leaps = handleTest ./leaps.nix {};
   lemmy = handleTest ./lemmy.nix {};
   libinput = handleTest ./libinput.nix {};
-  libreddit = handleTest ./libreddit.nix {};
   librenms = handleTest ./librenms.nix {};
   libresprite = handleTest ./libresprite.nix {};
-  libreswan = handleTest ./libreswan.nix {};
+  libreswan = runTest ./libreswan.nix;
+  libreswan-nat = runTest ./libreswan-nat.nix;
   librewolf = handleTest ./firefox.nix { firefoxPackage = pkgs.librewolf; };
   libuiohook = handleTest ./libuiohook.nix {};
   libvirtd = handleTest ./libvirtd.nix {};
@@ -500,34 +534,44 @@ in {
   listmonk = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./listmonk.nix {};
   litestream = handleTest ./litestream.nix {};
   lldap = handleTest ./lldap.nix {};
+  localsend = handleTest ./localsend.nix {};
   locate = handleTest ./locate.nix {};
   login = handleTest ./login.nix {};
   logrotate = handleTest ./logrotate.nix {};
   loki = handleTest ./loki.nix {};
   luks = handleTest ./luks.nix {};
   lvm2 = handleTest ./lvm2 {};
+  lxc = handleTest ./lxc {};
   lxd = pkgs.recurseIntoAttrs (handleTest ./lxd { inherit handleTestOn; });
   lxd-image-server = handleTest ./lxd-image-server.nix {};
   #logstash = handleTest ./logstash.nix {};
-  lomiri = handleTest ./lomiri.nix {};
+  lomiri = discoverTests (import ./lomiri.nix);
+  lomiri-calculator-app = runTest ./lomiri-calculator-app.nix;
+  lomiri-camera-app = runTest ./lomiri-camera-app.nix;
+  lomiri-clock-app = runTest ./lomiri-clock-app.nix;
+  lomiri-docviewer-app = runTest ./lomiri-docviewer-app.nix;
+  lomiri-filemanager-app = runTest ./lomiri-filemanager-app.nix;
+  lomiri-gallery-app = runTest ./lomiri-gallery-app.nix;
   lomiri-system-settings = handleTest ./lomiri-system-settings.nix {};
   lorri = handleTest ./lorri/default.nix {};
+  ly = handleTest ./ly.nix {};
   maddy = discoverTests (import ./maddy { inherit handleTest; });
   maestral = handleTest ./maestral.nix {};
   magic-wormhole-mailbox-server = handleTest ./magic-wormhole-mailbox-server.nix {};
   magnetico = handleTest ./magnetico.nix {};
   mailcatcher = handleTest ./mailcatcher.nix {};
   mailhog = handleTest ./mailhog.nix {};
+  mailpit = handleTest ./mailpit.nix {};
   mailman = handleTest ./mailman.nix {};
   man = handleTest ./man.nix {};
   mariadb-galera = handleTest ./mysql/mariadb-galera.nix {};
-  mastodon = discoverTests (import ./web-apps/mastodon { inherit handleTestOn; });
+  mastodon = pkgs.recurseIntoAttrs (handleTest ./web-apps/mastodon { inherit handleTestOn; });
   pixelfed = discoverTests (import ./web-apps/pixelfed { inherit handleTestOn; });
   mate = handleTest ./mate.nix {};
   mate-wayland = handleTest ./mate-wayland.nix {};
   matter-server = handleTest ./matter-server.nix {};
   matomo = handleTest ./matomo.nix {};
-  matrix-appservice-irc = handleTest ./matrix/appservice-irc.nix {};
+  matrix-appservice-irc = runTest ./matrix/appservice-irc.nix;
   matrix-conduit = handleTest ./matrix/conduit.nix {};
   matrix-synapse = handleTest ./matrix/synapse.nix {};
   matrix-synapse-workers = handleTest ./matrix/synapse-workers.nix {};
@@ -549,8 +593,10 @@ in {
   minidlna = handleTest ./minidlna.nix {};
   miniflux = handleTest ./miniflux.nix {};
   minio = handleTest ./minio.nix {};
+  miracle-wm = runTest ./miracle-wm.nix;
   miriway = handleTest ./miriway.nix {};
   misc = handleTest ./misc.nix {};
+  misskey = handleTest ./misskey.nix {};
   mjolnir = handleTest ./matrix/mjolnir.nix {};
   mobilizon = handleTest ./mobilizon.nix {};
   mod_perl = handleTest ./mod_perl.nix {};
@@ -575,9 +621,9 @@ in {
   # Fails on aarch64-linux at the PDF creation step - need to debug this on an
   # aarch64 machine..
   musescore = handleTestOn ["x86_64-linux"] ./musescore.nix {};
+  music-assistant = runTest ./music-assistant.nix;
   munin = handleTest ./munin.nix {};
   mutableUsers = handleTest ./mutable-users.nix {};
-  mxisd = handleTest ./mxisd.nix {};
   mycelium = handleTest ./mycelium {};
   mympd = handleTest ./mympd.nix {};
   mysql = handleTest ./mysql/mysql.nix {};
@@ -585,7 +631,7 @@ in {
   mysql-backup = handleTest ./mysql/mysql-backup.nix {};
   mysql-replication = handleTest ./mysql/mysql-replication.nix {};
   n8n = handleTest ./n8n.nix {};
-  nagios = handleTest ./nagios.nix {};
+  nagios = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./nagios.nix {};
   nar-serve = handleTest ./nar-serve.nix {};
   nat.firewall = handleTest ./nat.nix { withFirewall = true; };
   nat.standalone = handleTest ./nat.nix { withFirewall = false; };
@@ -596,6 +642,7 @@ in {
   nbd = handleTest ./nbd.nix {};
   ncdns = handleTest ./ncdns.nix {};
   ndppd = handleTest ./ndppd.nix {};
+  nix-channel = pkgs.callPackage ../modules/config/nix-channel/test.nix { };
   nebula = handleTest ./nebula.nix {};
   netbird = handleTest ./netbird.nix {};
   nimdow = handleTest ./nimdow.nix {};
@@ -610,6 +657,7 @@ in {
   # TODO: put in networking.nix after the test becomes more complete
   networkingProxy = handleTest ./networking-proxy.nix {};
   nextcloud = handleTest ./nextcloud {};
+  nextjs-ollama-llm-ui = runTest ./web-apps/nextjs-ollama-llm-ui.nix;
   nexus = handleTest ./nexus.nix {};
   # TODO: Test nfsv3 + Kerberos
   nfs3 = handleTest ./nfs { version = 3; };
@@ -636,7 +684,10 @@ in {
   nitter = handleTest ./nitter.nix {};
   nix-config = handleTest ./nix-config.nix {};
   nix-ld = handleTest ./nix-ld.nix {};
-  nix-serve = handleTest ./nix-serve.nix {};
+  nix-misc = handleTest ./nix/misc.nix {};
+  nix-upgrade = handleTest ./nix/upgrade.nix {inherit (pkgs) nixVersions;};
+  nix-required-mounts = runTest ./nix-required-mounts;
+  nix-serve = runTest ./nix-serve.nix;
   nix-serve-ssh = handleTest ./nix-serve-ssh.nix {};
   nixops = handleTest ./nixops/default.nix {};
   nixos-generate-config = handleTest ./nixos-generate-config.nix {};
@@ -658,13 +709,16 @@ in {
   ntfy-sh = handleTest ./ntfy-sh.nix {};
   ntfy-sh-migration = handleTest ./ntfy-sh-migration.nix {};
   ntpd-rs = handleTest ./ntpd-rs.nix {};
+  nvidia-container-toolkit = runTest ./nvidia-container-toolkit.nix;
   nvmetcfg = handleTest ./nvmetcfg.nix {};
   nzbget = handleTest ./nzbget.nix {};
   nzbhydra2 = handleTest ./nzbhydra2.nix {};
   ocis = handleTest ./ocis.nix {};
   oddjobd = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./oddjobd.nix {};
   oh-my-zsh = handleTest ./oh-my-zsh.nix {};
-  ollama = handleTest ./ollama.nix {};
+  ollama = runTest ./ollama.nix;
+  ollama-cuda = runTestOn ["x86_64-linux" "aarch64-linux"] ./ollama-cuda.nix;
+  ollama-rocm = runTestOn ["x86_64-linux" "aarch64-linux"] ./ollama-rocm.nix;
   ombi = handleTest ./ombi.nix {};
   openarena = handleTest ./openarena.nix {};
   openldap = handleTest ./openldap.nix {};
@@ -684,6 +738,7 @@ in {
   outline = handleTest ./outline.nix {};
   image-contents = handleTest ./image-contents.nix {};
   openvscode-server = handleTest ./openvscode-server.nix {};
+  open-webui = runTest ./open-webui.nix;
   orangefs = handleTest ./orangefs.nix {};
   os-prober = handleTestOn ["x86_64-linux"] ./os-prober.nix {};
   osquery = handleTestOn ["x86_64-linux"] ./osquery.nix {};
@@ -710,6 +765,7 @@ in {
   pg_anonymizer = handleTest ./pg_anonymizer.nix {};
   pgadmin4 = handleTest ./pgadmin4.nix {};
   pgbouncer = handleTest ./pgbouncer.nix {};
+  pghero = runTest ./pghero.nix;
   pgjwt = handleTest ./pgjwt.nix {};
   pgmanage = handleTest ./pgmanage.nix {};
   pgvecto-rs = handleTest ./pgvecto-rs.nix {};
@@ -720,8 +776,10 @@ in {
   php81 = handleTest ./php { php = pkgs.php81; };
   php82 = handleTest ./php { php = pkgs.php82; };
   php83 = handleTest ./php { php = pkgs.php83; };
+  php84 = handleTest ./php { php = pkgs.php84; };
   phylactery = handleTest ./web-apps/phylactery.nix {};
   pict-rs = handleTest ./pict-rs.nix {};
+  pingvin-share = handleTest ./pingvin-share.nix {} ;
   pinnwand = handleTest ./pinnwand.nix {};
   plantuml-server = handleTest ./plantuml-server.nix {};
   plasma-bigscreen = handleTest ./plasma-bigscreen.nix {};
@@ -729,6 +787,7 @@ in {
   plasma6 = handleTest ./plasma6.nix {};
   plasma5-systemd-start = handleTest ./plasma5-systemd-start.nix {};
   plausible = handleTest ./plausible.nix {};
+  playwright-python = handleTest ./playwright-python.nix {};
   please = handleTest ./please.nix {};
   pleroma = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./pleroma.nix {};
   plikd = handleTest ./plikd.nix {};
@@ -747,6 +806,8 @@ in {
   postgresql = handleTest ./postgresql.nix {};
   postgresql-jit = handleTest ./postgresql-jit.nix {};
   postgresql-wal-receiver = handleTest ./postgresql-wal-receiver.nix {};
+  postgresql-tls-client-cert = handleTest ./postgresql-tls-client-cert.nix {};
+  postgresql-wal2json = handleTest ./postgresql-wal2json.nix {};
   powerdns = handleTest ./powerdns.nix {};
   powerdns-admin = handleTest ./powerdns-admin.nix {};
   power-profiles-daemon = handleTest ./power-profiles-daemon.nix {};
@@ -756,8 +817,9 @@ in {
   pretix = runTest ./web-apps/pretix.nix;
   printing-socket = handleTest ./printing.nix { socket = true; };
   printing-service = handleTest ./printing.nix { socket = false; };
+  private-gpt = handleTest ./private-gpt.nix {};
   privoxy = handleTest ./privoxy.nix {};
-  prometheus = handleTest ./prometheus.nix {};
+  prometheus = handleTest ./prometheus {};
   prometheus-exporters = handleTest ./prometheus-exporters.nix {};
   prosody = handleTest ./xmpp/prosody.nix {};
   prosody-mysql = handleTest ./xmpp/prosody-mysql.nix {};
@@ -772,26 +834,33 @@ in {
   qemu-vm-restrictnetwork = handleTest ./qemu-vm-restrictnetwork.nix {};
   qemu-vm-volatile-root = runTest ./qemu-vm-volatile-root.nix;
   qemu-vm-external-disk-image = runTest ./qemu-vm-external-disk-image.nix;
+  qemu-vm-store = runTest ./qemu-vm-store.nix;
   qgis = handleTest ./qgis.nix { qgisPackage = pkgs.qgis; };
   qgis-ltr = handleTest ./qgis.nix { qgisPackage = pkgs.qgis-ltr; };
   qownnotes = handleTest ./qownnotes.nix {};
-  qtile = handleTest ./qtile.nix {};
+  qtile = handleTestOn ["x86_64-linux" "aarch64-linux"] ./qtile/default.nix {};
   quake3 = handleTest ./quake3.nix {};
   quicktun = handleTest ./quicktun.nix {};
+  quickwit = handleTest ./quickwit.nix {};
   quorum = handleTest ./quorum.nix {};
   rabbitmq = handleTest ./rabbitmq.nix {};
   radarr = handleTest ./radarr.nix {};
   radicale = handleTest ./radicale.nix {};
+  radicle = runTest ./radicle.nix;
   ragnarwm = handleTest ./ragnarwm.nix {};
   rasdaemon = handleTest ./rasdaemon.nix {};
+  rathole = handleTest ./rathole.nix {};
   readarr = handleTest ./readarr.nix {};
+  realm = handleTest ./realm.nix {};
   redis = handleTest ./redis.nix {};
   redlib = handleTest ./redlib.nix {};
   redmine = handleTest ./redmine.nix {};
+  renovate = handleTest ./renovate.nix {};
   restartByActivationScript = handleTest ./restart-by-activation-script.nix {};
   restic-rest-server = handleTest ./restic-rest-server.nix {};
   restic = handleTest ./restic.nix {};
   retroarch = handleTest ./retroarch.nix {};
+  rke2 = handleTestOn ["aarch64-linux" "x86_64-linux"] ./rke2 {};
   rkvm = handleTest ./rkvm {};
   robustirc-bridge = handleTest ./robustirc-bridge.nix {};
   roundcube = handleTest ./roundcube.nix {};
@@ -803,6 +872,7 @@ in {
   rstudio-server = handleTest ./rstudio-server.nix {};
   rsyncd = handleTest ./rsyncd.nix {};
   rsyslogd = handleTest ./rsyslogd.nix {};
+  rtorrent = handleTest ./rtorrent.nix {};
   rxe = handleTest ./rxe.nix {};
   sabnzbd = handleTest ./sabnzbd.nix {};
   samba = handleTest ./samba.nix {};
@@ -815,7 +885,7 @@ in {
   scrutiny = handleTest ./scrutiny.nix {};
   sddm = handleTest ./sddm.nix {};
   seafile = handleTest ./seafile.nix {};
-  searx = handleTest ./searx.nix {};
+  searx = runTest ./searx.nix;
   seatd = handleTest ./seatd.nix {};
   service-runner = handleTest ./service-runner.nix {};
   sftpgo = runTest ./sftpgo.nix;
@@ -870,7 +940,9 @@ in {
   swap-random-encryption = handleTest ./swap-random-encryption.nix {};
   sway = handleTest ./sway.nix {};
   swayfx = handleTest ./swayfx.nix {};
-  switchTest = handleTest ./switch-test.nix {};
+  switchTest = handleTest ./switch-test.nix { ng = false; };
+  switchTestNg = handleTest ./switch-test.nix { ng = true; };
+  sx = handleTest ./sx.nix {};
   sympa = handleTest ./sympa.nix {};
   syncthing = handleTest ./syncthing.nix {};
   syncthing-no-settings = handleTest ./syncthing-no-settings.nix {};
@@ -883,7 +955,7 @@ in {
   systemd-binfmt = handleTestOn ["x86_64-linux"] ./systemd-binfmt.nix {};
   systemd-boot = handleTest ./systemd-boot.nix {};
   systemd-bpf = handleTest ./systemd-bpf.nix {};
-  systemd-confinement = handleTest ./systemd-confinement.nix {};
+  systemd-confinement = handleTest ./systemd-confinement {};
   systemd-coredump = handleTest ./systemd-coredump.nix {};
   systemd-cryptenroll = handleTest ./systemd-cryptenroll.nix {};
   systemd-credentials-tpm2 = handleTest ./systemd-credentials-tpm2.nix {};
@@ -922,6 +994,7 @@ in {
   systemd-oomd = handleTest ./systemd-oomd.nix {};
   systemd-portabled = handleTest ./systemd-portabled.nix {};
   systemd-repart = handleTest ./systemd-repart.nix {};
+  systemd-resolved = handleTest ./systemd-resolved.nix {};
   systemd-shutdown = handleTest ./systemd-shutdown.nix {};
   systemd-sysupdate = runTest ./systemd-sysupdate.nix;
   systemd-sysusers-mutable = runTest ./systemd-sysusers-mutable.nix;
@@ -935,17 +1008,22 @@ in {
   systemd-homed = handleTest ./systemd-homed.nix {};
   systemtap = handleTest ./systemtap.nix {};
   tandoor-recipes = handleTest ./tandoor-recipes.nix {};
+  tandoor-recipes-script-name = handleTest ./tandoor-recipes-script-name.nix {};
   tang = handleTest ./tang.nix {};
   taskserver = handleTest ./taskserver.nix {};
+  taskchampion-sync-server = handleTest ./taskchampion-sync-server.nix {};
   tayga = handleTest ./tayga.nix {};
   technitium-dns-server = handleTest ./technitium-dns-server.nix {};
   teeworlds = handleTest ./teeworlds.nix {};
   telegraf = handleTest ./telegraf.nix {};
   teleport = handleTest ./teleport.nix {};
+  teleports = runTest ./teleports.nix;
   thelounge = handleTest ./thelounge.nix {};
   terminal-emulators = handleTest ./terminal-emulators.nix {};
+  thanos = handleTest ./thanos.nix {};
   tiddlywiki = handleTest ./tiddlywiki.nix {};
   tigervnc = handleTest ./tigervnc.nix {};
+  tika = runTest ./tika.nix;
   timescaledb = handleTest ./timescaledb.nix {};
   timezone = handleTest ./timezone.nix {};
   tinc = handleTest ./tinc {};
@@ -958,7 +1036,7 @@ in {
   traefik = handleTestOn ["aarch64-linux" "x86_64-linux"] ./traefik.nix {};
   trafficserver = handleTest ./trafficserver.nix {};
   transfer-sh = handleTest ./transfer-sh.nix {};
-  transmission = handleTest ./transmission.nix { transmission = pkgs.transmission; };
+  transmission_3 = handleTest ./transmission.nix { transmission = pkgs.transmission_3; };
   transmission_4 = handleTest ./transmission.nix { transmission = pkgs.transmission_4; };
   # tracee requires bpf
   tracee = handleTestOn ["x86_64-linux"] ./tracee.nix {};
@@ -971,6 +1049,7 @@ in {
   txredisapi = handleTest ./txredisapi.nix {};
   tuptime = handleTest ./tuptime.nix {};
   turbovnc-headless-server = handleTest ./turbovnc-headless-server.nix {};
+  turn-rs = handleTest ./turn-rs.nix {};
   tuxguitar = handleTest ./tuxguitar.nix {};
   twingate = runTest ./twingate.nix;
   typesense = handleTest ./typesense.nix {};
@@ -986,6 +1065,11 @@ in {
   uptime-kuma = handleTest ./uptime-kuma.nix {};
   urn-timer = handleTest ./urn-timer.nix {};
   usbguard = handleTest ./usbguard.nix {};
+  userborn = runTest ./userborn.nix;
+  userborn-mutable-users = runTest ./userborn-mutable-users.nix;
+  userborn-immutable-users = runTest ./userborn-immutable-users.nix;
+  userborn-mutable-etc = runTest ./userborn-mutable-etc.nix;
+  userborn-immutable-etc = runTest ./userborn-immutable-etc.nix;
   user-activation-scripts = handleTest ./user-activation-scripts.nix {};
   user-expiry = runTest ./user-expiry.nix;
   user-home-mode = handleTest ./user-home-mode.nix {};
@@ -994,12 +1078,13 @@ in {
   v2ray = handleTest ./v2ray.nix {};
   varnish60 = handleTest ./varnish.nix { package = pkgs.varnish60; };
   varnish74 = handleTest ./varnish.nix { package = pkgs.varnish74; };
+  varnish75 = handleTest ./varnish.nix { package = pkgs.varnish75; };
   vault = handleTest ./vault.nix {};
   vault-agent = handleTest ./vault-agent.nix {};
   vault-dev = handleTest ./vault-dev.nix {};
   vault-postgresql = handleTest ./vault-postgresql.nix {};
-  vaultwarden = handleTest ./vaultwarden.nix {};
-  vector = handleTest ./vector.nix {};
+  vaultwarden = discoverTests (import ./vaultwarden.nix);
+  vector = handleTest ./vector {};
   vengi-tools = handleTest ./vengi-tools.nix {};
   victoriametrics = handleTest ./victoriametrics.nix {};
   vikunja = handleTest ./vikunja.nix {};
@@ -1012,16 +1097,19 @@ in {
   wastebin = handleTest ./wastebin.nix {};
   watchdogd = handleTest ./watchdogd.nix {};
   webhook = runTest ./webhook.nix;
+  weblate = handleTest ./web-apps/weblate.nix {};
   wiki-js = handleTest ./wiki-js.nix {};
   wine = handleTest ./wine.nix {};
   wireguard = handleTest ./wireguard {};
+  wg-access-server = handleTest ./wg-access-server.nix {};
   without-nix = handleTest ./without-nix.nix {};
   wmderland = handleTest ./wmderland.nix {};
   workout-tracker = handleTest ./workout-tracker.nix {};
-  wpa_supplicant = handleTest ./wpa_supplicant.nix {};
+  wpa_supplicant = import ./wpa_supplicant.nix { inherit pkgs runTest; };
   wordpress = handleTest ./wordpress.nix {};
   wrappers = handleTest ./wrappers.nix {};
   writefreely = handleTest ./web-apps/writefreely.nix {};
+  wstunnel = runTest ./wstunnel.nix;
   xandikos = handleTest ./xandikos.nix {};
   xautolock = handleTest ./xautolock.nix {};
   xfce = handleTest ./xfce.nix {};
@@ -1035,7 +1123,9 @@ in {
   xterm = handleTest ./xterm.nix {};
   xxh = handleTest ./xxh.nix {};
   yabar = handleTest ./yabar.nix {};
+  ydotool = handleTest ./ydotool.nix {};
   yggdrasil = handleTest ./yggdrasil.nix {};
+  your_spotify = handleTest ./your_spotify.nix {};
   zammad = handleTest ./zammad.nix {};
   zeronet-conservancy = handleTest ./zeronet-conservancy.nix {};
   zfs = handleTest ./zfs.nix {};
diff --git a/nixos/tests/alloy.nix b/nixos/tests/alloy.nix
new file mode 100644
index 0000000000000..d87492127d5bb
--- /dev/null
+++ b/nixos/tests/alloy.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+  let
+    nodes = {
+      machine = {
+        services.alloy = {
+          enable = true;
+        };
+        environment.etc."alloy/config.alloy".text = "";
+      };
+    };
+  in
+  {
+    name = "alloy";
+
+    meta = with lib.maintainers; {
+      maintainers = [ flokli hbjydev ];
+    };
+
+    inherit nodes;
+
+    testScript = ''
+      start_all()
+
+      machine.wait_for_unit("alloy.service")
+      machine.wait_for_open_port(12345)
+      machine.succeed(
+          "curl -sSfN http://127.0.0.1:12345/-/healthy"
+      )
+      machine.shutdown()
+    '';
+  })
diff --git a/nixos/tests/appliance-repart-image-verity-store.nix b/nixos/tests/appliance-repart-image-verity-store.nix
new file mode 100644
index 0000000000000..3834d0a468ab3
--- /dev/null
+++ b/nixos/tests/appliance-repart-image-verity-store.nix
@@ -0,0 +1,130 @@
+# similar to the appliance-repart-image test but with a dm-verity
+# protected nix store and tmpfs as rootfs
+{ lib, ... }:
+
+{
+  name = "appliance-repart-image-verity-store";
+
+  meta.maintainers = with lib.maintainers; [
+    nikstur
+    willibutz
+  ];
+
+  nodes.machine =
+    {
+      config,
+      lib,
+      pkgs,
+      ...
+    }:
+    let
+      inherit (config.image.repart.verityStore) partitionIds;
+    in
+    {
+      imports = [ ../modules/image/repart.nix ];
+
+      virtualisation.fileSystems = lib.mkVMOverride {
+        "/" = {
+          fsType = "tmpfs";
+          options = [ "mode=0755" ];
+        };
+
+        "/usr" = {
+          device = "/dev/mapper/usr";
+          # explicitly mount it read-only otherwise systemd-remount-fs will fail
+          options = [ "ro" ];
+          fsType = config.image.repart.partitions.${partitionIds.store}.repartConfig.Format;
+        };
+
+        # bind-mount the store
+        "/nix/store" = {
+          device = "/usr/nix/store";
+          options = [ "bind" ];
+        };
+      };
+
+      image.repart = {
+        verityStore = {
+          enable = true;
+          # by default the module works with systemd-boot, for simplicity this test directly boots the UKI
+          ukiPath = "/EFI/BOOT/BOOT${lib.toUpper config.nixpkgs.hostPlatform.efiArch}.EFI";
+        };
+
+        name = "appliance-verity-store-image";
+
+        partitions = {
+          ${partitionIds.esp} = {
+            # the UKI is injected into this partition by the verityStore module
+            repartConfig = {
+              Type = "esp";
+              Format = "vfat";
+              SizeMinBytes = if config.nixpkgs.hostPlatform.isx86_64 then "64M" else "96M";
+            };
+          };
+          ${partitionIds.store-verity}.repartConfig = {
+            Minimize = "best";
+          };
+          ${partitionIds.store}.repartConfig = {
+            Minimize = "best";
+          };
+        };
+      };
+
+      virtualisation = {
+        directBoot.enable = false;
+        mountHostNixStore = false;
+        useEFIBoot = true;
+      };
+
+      boot = {
+        loader.grub.enable = false;
+        initrd.systemd.enable = true;
+      };
+
+      system.image = {
+        id = "nixos-appliance";
+        version = "1";
+      };
+
+      # don't create /usr/bin/env
+      # this would require some extra work on read-only /usr
+      # and it is not a strict necessity
+      system.activationScripts.usrbinenv = lib.mkForce "";
+    };
+
+  testScript =
+    { nodes, ... }: # python
+    ''
+      import os
+      import subprocess
+      import tempfile
+
+      tmp_disk_image = tempfile.NamedTemporaryFile()
+
+      subprocess.run([
+        "${nodes.machine.virtualisation.qemu.package}/bin/qemu-img",
+        "create",
+        "-f",
+        "qcow2",
+        "-b",
+        "${nodes.machine.system.build.finalImage}/${nodes.machine.image.repart.imageFile}",
+        "-F",
+        "raw",
+        tmp_disk_image.name,
+      ])
+
+      os.environ['NIX_DISK_IMAGE'] = tmp_disk_image.name
+
+      machine.wait_for_unit("default.target")
+
+      with subtest("Running with volatile root"):
+        machine.succeed("findmnt --kernel --type tmpfs /")
+
+      with subtest("/nix/store is backed by dm-verity protected fs"):
+        verity_info = machine.succeed("dmsetup info --target verity usr")
+        assert "ACTIVE" in verity_info,f"unexpected verity info: {verity_info}"
+
+        backing_device = machine.succeed("df --output=source /nix/store | tail -n1").strip()
+        assert "/dev/mapper/usr" == backing_device,"unexpected backing device: {backing_device}"
+    '';
+}
diff --git a/nixos/tests/archi.nix b/nixos/tests/archi.nix
index 59f2e940c0050..a8cb1c503d4f7 100644
--- a/nixos/tests/archi.nix
+++ b/nixos/tests/archi.nix
@@ -24,7 +24,9 @@ import ./make-test-python.nix ({ lib, ... }: {
          machine.wait_for_window("Archi")
 
          # wait till main UI is open
-         machine.wait_for_text("Welcome to Archi")
+         # since OCR seems to be buggy wait_for_text was replaced by sleep, issue: #302965
+         # machine.wait_for_text("Welcome to Archi")
+         machine.sleep(20)
 
          machine.screenshot("welcome-screen")
   '';
diff --git a/nixos/tests/aria2.nix b/nixos/tests/aria2.nix
new file mode 100644
index 0000000000000..48fe2094b5dcf
--- /dev/null
+++ b/nixos/tests/aria2.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  rpcSecret = "supersecret";
+  rpc-listen-port = 6800;
+  curlBody = {
+    jsonrpc = 2.0;
+    id = 1;
+    method = "aria2.getVersion";
+    params = [ "token:${rpcSecret}" ];
+  };
+in
+rec {
+  name = "aria2";
+
+  nodes.machine = {
+    environment.etc."aria2Rpc".text = rpcSecret;
+    services.aria2 = {
+      enable = true;
+      rpcSecretFile = "/etc/aria2Rpc";
+      settings = {
+        inherit rpc-listen-port;
+        allow-overwrite = false;
+        check-integrity = true;
+        console-log-level = "warn";
+        listen-port = [{ from = 20000; to = 20010; } { from = 22222; to = 22222; }];
+        max-concurrent-downloads = 50;
+        seed-ratio = 1.2;
+        summary-interval = 0;
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("aria2.service")
+    curl_cmd = 'curl --fail-with-body -X POST -H "Content-Type: application/json" \
+                -d \'${builtins.toJSON curlBody}\' http://localhost:${toString rpc-listen-port}/jsonrpc'
+    print(machine.wait_until_succeeds(curl_cmd, timeout=10))
+    machine.shutdown()
+  '';
+
+  meta.maintainers = [ pkgs.lib.maintainers.timhae ];
+})
diff --git a/nixos/tests/armagetronad.nix b/nixos/tests/armagetronad.nix
index d59827354b771..b657893fc9eef 100644
--- a/nixos/tests/armagetronad.nix
+++ b/nixos/tests/armagetronad.nix
@@ -12,8 +12,8 @@ let
     { pkgs, ... }:
 
     { imports = [ ./common/user-account.nix ./common/x11.nix ];
-      hardware.opengl.driSupport = true;
-      virtualisation.memorySize = 256;
+      hardware.graphics.enable = true;
+      virtualisation.memorySize = 384;
       environment = {
         systemPackages = [ pkgs.armagetronad ];
         variables.XAUTHORITY = "/home/${user}/.Xauthority";
@@ -208,7 +208,7 @@ makeTest {
         barrier.wait()
 
       # Get to the Server Bookmarks screen on both clients. This takes a while so do it asynchronously.
-      barrier = threading.Barrier(3, timeout=120)
+      barrier = threading.Barrier(len(clients) + 1, timeout=240)
       for client in clients:
         threading.Thread(target=client_setup, args=(client, servers, barrier)).start()
       barrier.wait()
diff --git a/nixos/tests/artalk.nix b/nixos/tests/artalk.nix
new file mode 100644
index 0000000000000..1338e5cd380c6
--- /dev/null
+++ b/nixos/tests/artalk.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix (
+  { lib, pkgs, ... }:
+  {
+
+    name = "artalk";
+
+    meta = {
+      maintainers = with lib.maintainers; [ moraxyc ];
+    };
+
+    nodes.machine =
+      { pkgs, ... }:
+      {
+        environment.systemPackages = [ pkgs.curl ];
+        services.artalk = {
+          enable = true;
+        };
+      };
+
+    testScript = ''
+      machine.wait_for_unit("artalk.service")
+
+      machine.wait_for_open_port(23366)
+
+      machine.succeed("curl --fail --max-time 10 http://127.0.0.1:23366/")
+    '';
+  }
+)
diff --git a/nixos/tests/atop.nix b/nixos/tests/atop.nix
index f9335eecc20e5..f2c488c591703 100644
--- a/nixos/tests/atop.nix
+++ b/nixos/tests/atop.nix
@@ -24,7 +24,7 @@ let assertions = rec {
 
     with subtest("binary should report the correct version"):
         pkgver = "${pkgs.atop.version}"
-        ver = re.sub(r'(?s)^Version: (\d\.\d\.\d).*', r'\1', machine.succeed("atop -V"))
+        ver = re.sub(r'(?s)^Version: (\d+\.\d+\.\d+).*', r'\1', machine.succeed("atop -V"))
         assert ver == pkgver, f"Version is `{ver}`, expected `{pkgver}`"
   '';
   atoprc = contents:
@@ -103,6 +103,9 @@ let assertions = rec {
           machine.fail("type -p atopgpud")
     '';
 };
+meta = {
+  timeout = 600;
+};
 in
 {
   justThePackage = makeTest {
@@ -120,6 +123,7 @@ in
       (netatop false)
       (atopgpu false)
     ];
+    inherit meta;
   };
   defaults = makeTest {
     name = "atop-defaults";
@@ -138,6 +142,7 @@ in
       (netatop false)
       (atopgpu false)
     ];
+    inherit meta;
   };
   minimal = makeTest {
     name = "atop-minimal";
@@ -159,6 +164,7 @@ in
       (netatop false)
       (atopgpu false)
     ];
+    inherit meta;
   };
   netatop = makeTest {
     name = "atop-netatop";
@@ -178,6 +184,7 @@ in
       (netatop true)
       (atopgpu false)
     ];
+    inherit meta;
   };
   atopgpu = makeTest {
     name = "atop-atopgpu";
@@ -197,6 +204,7 @@ in
       (netatop false)
       (atopgpu true)
     ];
+    inherit meta;
   };
   everything = makeTest {
     name = "atop-everything";
@@ -222,5 +230,6 @@ in
       (netatop true)
       (atopgpu true)
     ];
+    inherit meta;
   };
 }
diff --git a/nixos/tests/audiobookshelf.nix b/nixos/tests/audiobookshelf.nix
index 64bd415160ee0..ccd830eb28daa 100644
--- a/nixos/tests/audiobookshelf.nix
+++ b/nixos/tests/audiobookshelf.nix
@@ -1,10 +1,7 @@
 import ./make-test-python.nix ({ lib, ... }:
-
-with lib;
-
 {
   name = "audiobookshelf";
-  meta.maintainers = with maintainers; [ wietsedv ];
+  meta.maintainers = with lib.maintainers; [ wietsedv ];
 
   nodes.machine =
     { pkgs, ... }:
diff --git a/nixos/tests/avahi.nix b/nixos/tests/avahi.nix
index d8f4d13340fbc..4ae2f919f2f7d 100644
--- a/nixos/tests/avahi.nix
+++ b/nixos/tests/avahi.nix
@@ -9,7 +9,7 @@
 import ./make-test-python.nix {
   name = "avahi";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco ];
+    maintainers = [ ];
   };
 
   nodes = let
diff --git a/nixos/tests/ayatana-indicators.nix b/nixos/tests/ayatana-indicators.nix
index 5709ad2a1af69..8d134e1af2eec 100644
--- a/nixos/tests/ayatana-indicators.nix
+++ b/nixos/tests/ayatana-indicators.nix
@@ -1,111 +1,147 @@
-import ./make-test-python.nix ({ pkgs, lib, ... }: let
+{ pkgs, lib, ... }:
+let
   user = "alice";
-in {
+in
+{
   name = "ayatana-indicators";
 
   meta = {
     maintainers = lib.teams.lomiri.members;
   };
 
-  nodes.machine = { config, ... }: {
-    imports = [
-      ./common/auto.nix
-      ./common/user-account.nix
-    ];
-
-    test-support.displayManager.auto = {
-      enable = true;
-      inherit user;
-    };
-
-    services.xserver = {
-      enable = true;
-      desktopManager.mate.enable = true;
-    };
-    services.displayManager.defaultSession = lib.mkForce "mate";
-
-    services.ayatana-indicators = {
-      enable = true;
-      packages = with pkgs; [
-        ayatana-indicator-datetime
-        ayatana-indicator-messages
-        ayatana-indicator-session
-      ] ++ (with pkgs.lomiri; [
-        lomiri-indicator-network
-        telephony-service
-      ]);
+  nodes.machine =
+    { config, ... }:
+    {
+      imports = [
+        ./common/auto.nix
+        ./common/user-account.nix
+      ];
+
+      test-support.displayManager.auto = {
+        enable = true;
+        inherit user;
+      };
+
+      services.xserver = {
+        enable = true;
+        desktopManager.mate.enable = true;
+      };
+      services.displayManager.defaultSession = lib.mkForce "mate";
+
+      services.ayatana-indicators = {
+        enable = true;
+        packages =
+          with pkgs;
+          [
+            ayatana-indicator-bluetooth
+            ayatana-indicator-datetime
+            ayatana-indicator-display
+            ayatana-indicator-messages
+            ayatana-indicator-power
+            ayatana-indicator-session
+            ayatana-indicator-sound
+          ]
+          ++ (with pkgs.lomiri; [
+            lomiri-indicator-network
+            telephony-service
+          ]);
+      };
+
+      # Setup needed by some indicators
+
+      services.accounts-daemon.enable = true; # messages
+
+      # Lomiri-ish setup for Lomiri indicators
+      # TODO move into a Lomiri module, once the package set is far enough for the DE to start
+
+      networking.networkmanager.enable = true; # lomiri-network-indicator
+      # TODO potentially urfkill for lomiri-network-indicator?
+
+      services.dbus.packages = with pkgs.lomiri; [ libusermetrics ];
+
+      environment.systemPackages = with pkgs.lomiri; [ lomiri-schemas ];
+
+      services.telepathy.enable = true;
+
+      users.users.usermetrics = {
+        group = "usermetrics";
+        home = "/var/lib/usermetrics";
+        createHome = true;
+        isSystemUser = true;
+      };
+
+      users.groups.usermetrics = { };
     };
 
-    # Setup needed by some indicators
-
-    services.accounts-daemon.enable = true; # messages
-
-    # Lomiri-ish setup for Lomiri indicators
-    # TODO move into a Lomiri module, once the package set is far enough for the DE to start
-
-    networking.networkmanager.enable = true; # lomiri-network-indicator
-    # TODO potentially urfkill for lomiri-network-indicator?
-
-    services.dbus.packages = with pkgs.lomiri; [
-      libusermetrics
-    ];
-
-    environment.systemPackages = with pkgs.lomiri; [
-      lomiri-schemas
-    ];
-
-    services.telepathy.enable = true;
-
-    users.users.usermetrics = {
-      group = "usermetrics";
-      home = "/var/lib/usermetrics";
-      createHome = true;
-      isSystemUser = true;
-    };
-
-    users.groups.usermetrics = { };
-  };
-
   # TODO session indicator starts up in a semi-broken state, but works fine after a restart. maybe being started before graphical session is truly up & ready?
-  testScript = { nodes, ... }: let
-    runCommandOverServiceList = list: command:
-      lib.strings.concatMapStringsSep "\n" command list;
-
-    runCommandOverAyatanaIndicators = runCommandOverServiceList
-      (builtins.filter
-        (service: !(lib.strings.hasPrefix "lomiri" service || lib.strings.hasPrefix "telephony-service" service))
-        nodes.machine.systemd.user.targets."ayatana-indicators".wants);
-
-    runCommandOverAllIndicators = runCommandOverServiceList
-      nodes.machine.systemd.user.targets."ayatana-indicators".wants;
-  in ''
-    start_all()
-    machine.wait_for_x()
-
-    # Desktop environment should reach graphical-session.target
-    machine.wait_for_unit("graphical-session.target", "${user}")
-
-    # MATE relies on XDG autostart to bring up the indicators.
-    # Not sure *when* XDG autostart fires them up, and awaiting pgrep success seems to misbehave?
-    machine.sleep(10)
-
-    # Now check if all indicators were brought up successfully, and kill them for later
-  '' + (runCommandOverAyatanaIndicators (service: let serviceExec = builtins.replaceStrings [ "." ] [ "-" ] service; in ''
-    machine.succeed("pgrep -u ${user} -f ${serviceExec}")
-    machine.succeed("pkill -f ${serviceExec}")
-  '')) + ''
-
-    # Ayatana target is the preferred way of starting up indicators on SystemD session, the graphical session is responsible for starting this if it supports them.
-    # Mate currently doesn't do this, so start it manually for checking (https://github.com/mate-desktop/mate-indicator-applet/issues/63)
-    machine.systemctl("start ayatana-indicators.target", "${user}")
-    machine.wait_for_unit("ayatana-indicators.target", "${user}")
-
-    # Let all indicator services do their startups, potential post-launch crash & restart cycles so we can properly check for failures
-    # Not sure if there's a better way of awaiting this without false-positive potential
-    machine.sleep(10)
-
-    # Now check if all indicator services were brought up successfully
-  '' + runCommandOverAllIndicators (service: ''
-    machine.wait_for_unit("${service}", "${user}")
-  '');
-})
+  testScript =
+    { nodes, ... }:
+    let
+      runCommandOverServiceList = list: command: lib.strings.concatMapStringsSep "\n" command list;
+
+      runCommandOverAyatanaIndicators = runCommandOverServiceList
+        nodes.machine.systemd.user.targets.ayatana-indicators.wants;
+
+      runCommandOverLomiriIndicators = runCommandOverServiceList nodes.machine.systemd.user.targets.lomiri-indicators.wants;
+    in
+    ''
+      start_all()
+      machine.wait_for_x()
+
+      # Desktop environment should reach graphical-session.target
+      machine.wait_for_unit("graphical-session.target", "${user}")
+
+      # MATE relies on XDG autostart to bring up the indicators.
+      # Not sure *when* XDG autostart fires them up, and awaiting pgrep success seems to misbehave?
+      machine.sleep(10)
+
+      # Now check if all indicators were brought up successfully, and kill them for later
+    ''
+    + (runCommandOverAyatanaIndicators (
+      service:
+      let
+        serviceExec = builtins.replaceStrings [ "." ] [ "-" ] service;
+      in
+      ''
+        machine.wait_until_succeeds("pgrep -u ${user} -f ${serviceExec}")
+        machine.succeed("pkill -f ${serviceExec}")
+      ''
+    ))
+    + ''
+
+      # Ayatana target is the preferred way of starting up indicators on SystemD session, the graphical session is responsible for starting this if it supports them.
+      # Mate currently doesn't do this, so start it manually for checking (https://github.com/mate-desktop/mate-indicator-applet/issues/63)
+      machine.systemctl("start ayatana-indicators.target", "${user}")
+      machine.wait_for_unit("ayatana-indicators.target", "${user}")
+
+      # Let all indicator services do their startups, potential post-launch crash & restart cycles so we can properly check for failures
+      # Not sure if there's a better way of awaiting this without false-positive potential
+      machine.sleep(10)
+
+      # Now check if all indicator services were brought up successfully
+    ''
+    + runCommandOverAyatanaIndicators (service: ''
+      machine.wait_for_unit("${service}", "${user}")
+    '')
+    + ''
+      # Stop the target
+      machine.systemctl("stop ayatana-indicators.target", "${user}")
+
+      # Let all indicator services do their shutdowns
+      # Not sure if there's a better way of awaiting this without false-positive potential
+      machine.sleep(10)
+
+      # Lomiri uses a different target, which launches a slightly different set of indicators
+      machine.systemctl("start lomiri-indicators.target", "${user}")
+      machine.wait_for_unit("lomiri-indicators.target", "${user}")
+
+      # Let all indicator services do their startups, potential post-launch crash & restart cycles so we can properly check for failures
+      # Not sure if there's a better way of awaiting this without false-positive potential
+      machine.sleep(10)
+
+      # Now check if all indicator services were brought up successfully
+    ''
+    + runCommandOverLomiriIndicators (service: ''
+      machine.wait_for_unit("${service}", "${user}")
+    '');
+}
diff --git a/nixos/tests/benchexec.nix b/nixos/tests/benchexec.nix
new file mode 100644
index 0000000000000..3fc9ebc2c35f5
--- /dev/null
+++ b/nixos/tests/benchexec.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  user = "alice";
+in
+{
+  name = "benchexec";
+
+  nodes.benchexec = {
+    imports = [ ./common/user-account.nix ];
+
+    programs.benchexec = {
+      enable = true;
+      users = [ user ];
+    };
+  };
+
+  testScript = { ... }:
+    let
+      runexec = lib.getExe' pkgs.benchexec "runexec";
+      echo = builtins.toString pkgs.benchexec;
+      test = lib.getExe (pkgs.writeShellApplication rec {
+        name = "test";
+        meta.mainProgram = name;
+        text = "echo '${echo}'";
+      });
+      wd = "/tmp";
+      stdout = "${wd}/runexec.out";
+      stderr = "${wd}/runexec.err";
+    in
+    ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      benchexec.succeed(''''\
+          systemd-run \
+            --property='StandardOutput=file:${stdout}' \
+            --property='StandardError=file:${stderr}' \
+            --unit=runexec --wait --user --machine='${user}@' \
+            --working-directory ${wd} \
+          '${runexec}' \
+            --debug \
+            --read-only-dir / \
+            --hidden-dir /home \
+            '${test}' \
+      '''')
+      benchexec.succeed("grep -s '${echo}' ${wd}/output.log")
+      benchexec.succeed("test \"$(grep -Ec '((start|wall|cpu)time|memory)=' ${stdout})\" = 4")
+      benchexec.succeed("! grep -E '(WARNING|ERROR)' ${stderr}")
+    '';
+
+  interactive.nodes.benchexec.services.kmscon = {
+    enable = true;
+    fonts = [{ name = "Fira Code"; package = pkgs.fira-code; }];
+  };
+})
diff --git a/nixos/tests/bind.nix b/nixos/tests/bind.nix
index 15accbd49db43..95a9fc4e58bbf 100644
--- a/nixos/tests/bind.nix
+++ b/nixos/tests/bind.nix
@@ -22,7 +22,6 @@ import ./make-test-python.nix {
 
   testScript = ''
     machine.wait_for_unit("bind.service")
-    machine.wait_for_open_port(53)
     machine.succeed("host 192.168.0.1 127.0.0.1 | grep -qF ns.example.org")
   '';
 }
diff --git a/nixos/tests/bittorrent.nix b/nixos/tests/bittorrent.nix
index 473b05d4c98e8..83d9168a6fa59 100644
--- a/nixos/tests/bittorrent.nix
+++ b/nixos/tests/bittorrent.nix
@@ -6,163 +6,199 @@
 # which only works if the first client successfully uses the UPnP-IGD
 # protocol to poke a hole in the NAT.
 
-import ./make-test-python.nix ({ pkgs, ... }:
-
-let
-
-  # Some random file to serve.
-  file = pkgs.hello.src;
-
-  internalRouterAddress = "192.168.3.1";
-  internalClient1Address = "192.168.3.2";
-  externalRouterAddress = "80.100.100.1";
-  externalClient2Address = "80.100.100.2";
-  externalTrackerAddress = "80.100.100.3";
-
-  download-dir = "/var/lib/transmission/Downloads";
-  transmissionConfig = { ... }: {
-    environment.systemPackages = [ pkgs.transmission ];
-    services.transmission = {
-      enable = true;
-      settings = {
-        dht-enabled = false;
-        message-level = 2;
-        inherit download-dir;
-      };
-    };
-  };
-in
-
-{
-  name = "bittorrent";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ domenkozar eelco rob bobvanderlinden ];
-  };
-
-  nodes = {
-    tracker = { pkgs, ... }: {
-      imports = [ transmissionConfig ];
-
-      virtualisation.vlans = [ 1 ];
-      networking.firewall.enable = false;
-      networking.interfaces.eth1.ipv4.addresses = [
-        { address = externalTrackerAddress; prefixLength = 24; }
-      ];
-
-      # We need Apache on the tracker to serve the torrents.
-      services.httpd = {
-        enable = true;
-        virtualHosts = {
-          "torrentserver.org" = {
-            adminAddr = "foo@example.org";
-            documentRoot = "/tmp";
+import ./make-test-python.nix (
+  { pkgs, ... }:
+
+  let
+
+    # Some random file to serve.
+    file = pkgs.hello.src;
+
+    internalRouterAddress = "192.168.3.1";
+    internalClient1Address = "192.168.3.2";
+    externalRouterAddress = "80.100.100.1";
+    externalClient2Address = "80.100.100.2";
+    externalTrackerAddress = "80.100.100.3";
+
+    download-dir = "/var/lib/transmission/Downloads";
+    transmissionConfig =
+      { pkgs, ... }:
+      {
+        environment.systemPackages = [ pkgs.transmission_3 ];
+        services.transmission = {
+          enable = true;
+          settings = {
+            dht-enabled = false;
+            message-level = 2;
+            inherit download-dir;
           };
         };
       };
-      services.opentracker.enable = true;
-    };
-
-    router = { pkgs, nodes, ... }: {
-      virtualisation.vlans = [ 1 2 ];
-      networking.nat.enable = true;
-      networking.nat.internalInterfaces = [ "eth2" ];
-      networking.nat.externalInterface = "eth1";
-      networking.firewall.enable = true;
-      networking.firewall.trustedInterfaces = [ "eth2" ];
-      networking.interfaces.eth0.ipv4.addresses = [];
-      networking.interfaces.eth1.ipv4.addresses = [
-        { address = externalRouterAddress; prefixLength = 24; }
-      ];
-      networking.interfaces.eth2.ipv4.addresses = [
-        { address = internalRouterAddress; prefixLength = 24; }
+  in
+
+  {
+    name = "bittorrent";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [
+        domenkozar
+        rob
+        bobvanderlinden
       ];
-      services.miniupnpd = {
-        enable = true;
-        externalInterface = "eth1";
-        internalIPs = [ "eth2" ];
-        appendConfig = ''
-          ext_ip=${externalRouterAddress}
-        '';
-      };
     };
 
-    client1 = { pkgs, nodes, ... }: {
-      imports = [ transmissionConfig ];
-      environment.systemPackages = [ pkgs.miniupnpc ];
+    nodes = {
+      tracker =
+        { pkgs, ... }:
+        {
+          imports = [ transmissionConfig ];
+
+          virtualisation.vlans = [ 1 ];
+          networking.firewall.enable = false;
+          networking.interfaces.eth1.ipv4.addresses = [
+            {
+              address = externalTrackerAddress;
+              prefixLength = 24;
+            }
+          ];
+
+          # We need Apache on the tracker to serve the torrents.
+          services.httpd = {
+            enable = true;
+            virtualHosts = {
+              "torrentserver.org" = {
+                adminAddr = "foo@example.org";
+                documentRoot = "/tmp";
+              };
+            };
+          };
+          services.opentracker.enable = true;
+        };
 
-      virtualisation.vlans = [ 2 ];
-      networking.interfaces.eth0.ipv4.addresses = [];
-      networking.interfaces.eth1.ipv4.addresses = [
-        { address = internalClient1Address; prefixLength = 24; }
-      ];
-      networking.defaultGateway = internalRouterAddress;
-      networking.firewall.enable = false;
-    };
+      router =
+        { pkgs, nodes, ... }:
+        {
+          virtualisation.vlans = [
+            1
+            2
+          ];
+          networking.nat.enable = true;
+          networking.nat.internalInterfaces = [ "eth2" ];
+          networking.nat.externalInterface = "eth1";
+          networking.firewall.enable = true;
+          networking.firewall.trustedInterfaces = [ "eth2" ];
+          networking.interfaces.eth0.ipv4.addresses = [ ];
+          networking.interfaces.eth1.ipv4.addresses = [
+            {
+              address = externalRouterAddress;
+              prefixLength = 24;
+            }
+          ];
+          networking.interfaces.eth2.ipv4.addresses = [
+            {
+              address = internalRouterAddress;
+              prefixLength = 24;
+            }
+          ];
+          services.miniupnpd = {
+            enable = true;
+            externalInterface = "eth1";
+            internalIPs = [ "eth2" ];
+            appendConfig = ''
+              ext_ip=${externalRouterAddress}
+            '';
+          };
+        };
 
-    client2 = { pkgs, ... }: {
-      imports = [ transmissionConfig ];
+      client1 =
+        { pkgs, nodes, ... }:
+        {
+          imports = [ transmissionConfig ];
+          environment.systemPackages = [ pkgs.miniupnpc ];
+
+          virtualisation.vlans = [ 2 ];
+          networking.interfaces.eth0.ipv4.addresses = [ ];
+          networking.interfaces.eth1.ipv4.addresses = [
+            {
+              address = internalClient1Address;
+              prefixLength = 24;
+            }
+          ];
+          networking.defaultGateway = internalRouterAddress;
+          networking.firewall.enable = false;
+        };
 
-      virtualisation.vlans = [ 1 ];
-      networking.interfaces.eth0.ipv4.addresses = [];
-      networking.interfaces.eth1.ipv4.addresses = [
-        { address = externalClient2Address; prefixLength = 24; }
-      ];
-      networking.firewall.enable = false;
+      client2 =
+        { pkgs, ... }:
+        {
+          imports = [ transmissionConfig ];
+
+          virtualisation.vlans = [ 1 ];
+          networking.interfaces.eth0.ipv4.addresses = [ ];
+          networking.interfaces.eth1.ipv4.addresses = [
+            {
+              address = externalClient2Address;
+              prefixLength = 24;
+            }
+          ];
+          networking.firewall.enable = false;
+        };
     };
-  };
-
-  testScript = { nodes, ... }: ''
-      start_all()
-
-      # Wait for network and miniupnpd.
-      router.systemctl("start network-online.target")
-      router.wait_for_unit("network-online.target")
-      router.wait_for_unit("miniupnpd")
-
-      # Create the torrent.
-      tracker.succeed("mkdir ${download-dir}/data")
-      tracker.succeed(
-          "cp ${file} ${download-dir}/data/test.tar.bz2"
-      )
-      tracker.succeed(
-          "transmission-create ${download-dir}/data/test.tar.bz2 --private --tracker http://${externalTrackerAddress}:6969/announce --outfile /tmp/test.torrent"
-      )
-      tracker.succeed("chmod 644 /tmp/test.torrent")
-
-      # Start the tracker.  !!! use a less crappy tracker
-      tracker.systemctl("start network-online.target")
-      tracker.wait_for_unit("network-online.target")
-      tracker.wait_for_unit("opentracker.service")
-      tracker.wait_for_open_port(6969)
-
-      # Start the initial seeder.
-      tracker.succeed(
-          "transmission-remote --add /tmp/test.torrent --no-portmap --no-dht --download-dir ${download-dir}/data"
-      )
-
-      # Now we should be able to download from the client behind the NAT.
-      tracker.wait_for_unit("httpd")
-      client1.systemctl("start network-online.target")
-      client1.wait_for_unit("network-online.target")
-      client1.succeed("transmission-remote --add http://${externalTrackerAddress}/test.torrent >&2 &")
-      client1.wait_for_file("${download-dir}/test.tar.bz2")
-      client1.succeed(
-          "cmp ${download-dir}/test.tar.bz2 ${file}"
-      )
-
-      # Bring down the initial seeder.
-      tracker.stop_job("transmission")
-
-      # Now download from the second client.  This can only succeed if
-      # the first client created a NAT hole in the router.
-      client2.systemctl("start network-online.target")
-      client2.wait_for_unit("network-online.target")
-      client2.succeed(
-          "transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht >&2 &"
-      )
-      client2.wait_for_file("${download-dir}/test.tar.bz2")
-      client2.succeed(
-          "cmp ${download-dir}/test.tar.bz2 ${file}"
-      )
-    '';
-})
+
+    testScript =
+      { nodes, ... }:
+      ''
+        start_all()
+
+        # Wait for network and miniupnpd.
+        router.systemctl("start network-online.target")
+        router.wait_for_unit("network-online.target")
+        router.wait_for_unit("miniupnpd")
+
+        # Create the torrent.
+        tracker.succeed("mkdir ${download-dir}/data")
+        tracker.succeed(
+            "cp ${file} ${download-dir}/data/test.tar.bz2"
+        )
+        tracker.succeed(
+            "transmission-create ${download-dir}/data/test.tar.bz2 --private --tracker http://${externalTrackerAddress}:6969/announce --outfile /tmp/test.torrent"
+        )
+        tracker.succeed("chmod 644 /tmp/test.torrent")
+
+        # Start the tracker.  !!! use a less crappy tracker
+        tracker.systemctl("start network-online.target")
+        tracker.wait_for_unit("network-online.target")
+        tracker.wait_for_unit("opentracker.service")
+        tracker.wait_for_open_port(6969)
+
+        # Start the initial seeder.
+        tracker.succeed(
+            "transmission-remote --add /tmp/test.torrent --no-portmap --no-dht --download-dir ${download-dir}/data"
+        )
+
+        # Now we should be able to download from the client behind the NAT.
+        tracker.wait_for_unit("httpd")
+        client1.systemctl("start network-online.target")
+        client1.wait_for_unit("network-online.target")
+        client1.succeed("transmission-remote --add http://${externalTrackerAddress}/test.torrent >&2 &")
+        client1.wait_for_file("${download-dir}/test.tar.bz2")
+        client1.succeed(
+            "cmp ${download-dir}/test.tar.bz2 ${file}"
+        )
+
+        # Bring down the initial seeder.
+        tracker.stop_job("transmission")
+
+        # Now download from the second client.  This can only succeed if
+        # the first client created a NAT hole in the router.
+        client2.systemctl("start network-online.target")
+        client2.wait_for_unit("network-online.target")
+        client2.succeed(
+            "transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht >&2 &"
+        )
+        client2.wait_for_file("${download-dir}/test.tar.bz2")
+        client2.succeed(
+            "cmp ${download-dir}/test.tar.bz2 ${file}"
+        )
+      '';
+  }
+)
diff --git a/nixos/tests/borgbackup.nix b/nixos/tests/borgbackup.nix
index 4160e727f047b..af7c12009c363 100644
--- a/nixos/tests/borgbackup.nix
+++ b/nixos/tests/borgbackup.nix
@@ -7,6 +7,8 @@ let
   keepFile = "important_file";
   keepFileData = "important_data";
   localRepo = "/root/back:up";
+  # a repository on a file system which is not mounted automatically
+  localRepoMount = "/noAutoMount";
   archiveName = "my_archive";
   remoteRepo = "borg@server:."; # No need to specify path
   privateKey = pkgs.writeText "id_ed25519" ''
@@ -42,6 +44,12 @@ in {
 
   nodes = {
     client = { ... }: {
+      virtualisation.fileSystems.${localRepoMount} = {
+        device = "tmpfs";
+        fsType = "tmpfs";
+        options = [ "noauto" ];
+      };
+
       services.borgbackup.jobs = {
 
         local = {
@@ -65,6 +73,13 @@ in {
           startAt = [ ]; # Do not run automatically
         };
 
+        localMount = {
+          paths = dataDir;
+          repo = localRepoMount;
+          encryption.mode = "none";
+          startAt = [ ];
+        };
+
         remote = {
           paths = dataDir;
           repo = remoteRepo;
@@ -178,6 +193,17 @@ in {
             "cat /mnt/borg/${dataDir}/${keepFile}"
         )
 
+    with subtest("localMount"):
+        # the file system for the repo should not be already mounted
+        client.fail("mount | grep ${localRepoMount}")
+        # ensure trying to write to the mountpoint before the fs is mounted fails
+        client.succeed("chattr +i ${localRepoMount}")
+        borg = "borg"
+        client.systemctl("start --wait borgbackup-job-localMount")
+        client.fail("systemctl is-failed borgbackup-job-localMount")
+        # Make sure exactly one archive has been created
+        assert int(client.succeed("{} list '${localRepoMount}' | wc -l".format(borg))) > 0
+
     with subtest("remote"):
         borg = "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519' borg"
         server.wait_for_unit("sshd.service")
diff --git a/nixos/tests/borgmatic.nix b/nixos/tests/borgmatic.nix
new file mode 100644
index 0000000000000..70ad43e8bd358
--- /dev/null
+++ b/nixos/tests/borgmatic.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "borgmatic";
+  nodes.machine = { ... }: {
+    services.borgmatic = {
+      enable = true;
+      settings = {
+        source_directories = [ "/home" ];
+        repositories = [
+          {
+            label = "local";
+            path = "/var/backup";
+          }
+        ];
+        keep_daily = 7;
+      };
+    };
+  };
+
+  testScript = ''
+    machine.succeed("borgmatic rcreate -e none")
+    machine.succeed("borgmatic")
+  '';
+})
diff --git a/nixos/tests/bpf.nix b/nixos/tests/bpf.nix
index 150ed0958862e..0020c7ee2d693 100644
--- a/nixos/tests/bpf.nix
+++ b/nixos/tests/bpf.nix
@@ -16,14 +16,14 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     # list probes
     machine.succeed("bpftrace -l")
     # simple BEGIN probe (user probe on bpftrace itself)
-    print(machine.succeed("bpftrace -e 'BEGIN { print(\"ok\"); exit(); }'"))
+    print(machine.succeed("bpftrace -e 'BEGIN { print(\"ok\\n\"); exit(); }'"))
     # tracepoint
     print(machine.succeed("bpftrace -e 'tracepoint:syscalls:sys_enter_* { print(probe); exit() }'"))
     # kprobe
     print(machine.succeed("bpftrace -e 'kprobe:schedule { print(probe); exit() }'"))
     # BTF
     print(machine.succeed("bpftrace -e 'kprobe:schedule { "
-        "    printf(\"tgid: %d\", ((struct task_struct*) curtask)->tgid); exit() "
+        "    printf(\"tgid: %d\\n\", ((struct task_struct*) curtask)->tgid); exit() "
         "}'"))
     # module BTF (bpftrace >= 0.17)
     # test is currently disabled on aarch64 as kfunc does not work there yet
@@ -32,5 +32,8 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         "bpftrace -e 'kfunc:nft_trans_alloc_gfp { "
         "    printf(\"portid: %d\\n\", args->ctx->portid); "
         "} BEGIN { exit() }'"))
+    # glibc includes
+    print(machine.succeed("bpftrace -e '#include <errno.h>\n"
+        "BEGIN { printf(\"ok %d\\n\", EINVAL); exit(); }'"))
   '';
 })
diff --git a/nixos/tests/budgie.nix b/nixos/tests/budgie.nix
index 203e718c8c6d9..9f24ea71de1d4 100644
--- a/nixos/tests/budgie.nix
+++ b/nixos/tests/budgie.nix
@@ -25,7 +25,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     services.xserver.desktopManager.budgie = {
       enable = true;
       extraPlugins = [
-        pkgs.budgiePlugins.budgie-analogue-clock-applet
+        pkgs.budgie-analogue-clock-applet
       ];
     };
   };
@@ -63,7 +63,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       with subtest("Check if various environment variables are set"):
           cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/budgie-wm)/environ"
           machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Budgie:GNOME'")
-          machine.succeed(f"{cmd} | grep 'BUDGIE_PLUGIN_DATADIR' | grep '${pkgs.budgie.budgie-desktop-with-plugins.pname}'")
+          machine.succeed(f"{cmd} | grep 'BUDGIE_PLUGIN_DATADIR' | grep '${pkgs.budgie-desktop-with-plugins.pname}'")
 
       with subtest("Open run dialog"):
           machine.send_key("alt-f2")
diff --git a/nixos/tests/buildbot.nix b/nixos/tests/buildbot.nix
index 149d73bba09c5..0f65ac21c83d6 100644
--- a/nixos/tests/buildbot.nix
+++ b/nixos/tests/buildbot.nix
@@ -14,7 +14,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           "steps.ShellCommand(command=['bash', 'fakerepo.sh'])"
         ];
         changeSource = [
-          "changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)"
+          "changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollInterval=300)"
         ];
       };
       networking.firewall.allowedTCPPorts = [ 8010 8011 9989 ];
diff --git a/nixos/tests/cagebreak.nix b/nixos/tests/cagebreak.nix
index 1fef7cb57cfc5..4d7664c1505f3 100644
--- a/nixos/tests/cagebreak.nix
+++ b/nixos/tests/cagebreak.nix
@@ -14,9 +14,7 @@ in
   };
 
   nodes.machine = { config, ... }:
-  let
-    alice = config.users.users.alice;
-  in {
+  {
     # Automatically login on tty1 as a normal user:
     imports = [ ./common/user-account.nix ];
     services.getty.autologinUser = "alice";
@@ -31,7 +29,7 @@ in
       fi
     '';
 
-    hardware.opengl.enable = true;
+    hardware.graphics.enable = true;
     programs.xwayland.enable = true;
     security.polkit.enable = true;
     environment.systemPackages = [ pkgs.cagebreak pkgs.wayland-utils ];
diff --git a/nixos/tests/castopod.nix b/nixos/tests/castopod.nix
index 3257cd3d363c7..57e035354d23e 100644
--- a/nixos/tests/castopod.nix
+++ b/nixos/tests/castopod.nix
@@ -98,6 +98,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
             driver = Firefox(options=options, service=service)
             driver = Firefox(options=options)
             driver.implicitly_wait(30)
+            driver.set_page_load_timeout(60)
 
             # install ##########################################################
 
@@ -207,7 +208,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
             text = ''
               out=/tmp/podcast.mp3
               sox -n -r 48000 -t wav - synth ${targetPodcastDuration} sine 440 `
-              `| lame --noreplaygain -cbr -q 9 -b 320 - $out
+              `| lame --noreplaygain --cbr -q 9 -b 320 - $out
               FILESIZE="$(stat -c%s $out)"
               [ "$FILESIZE" -gt 0 ]
               [ "$FILESIZE" -le "${toString targetPodcastSize}" ]
diff --git a/nixos/tests/centrifugo.nix b/nixos/tests/centrifugo.nix
index 45c2904f5585f..8e940f74caa4b 100644
--- a/nixos/tests/centrifugo.nix
+++ b/nixos/tests/centrifugo.nix
@@ -24,12 +24,10 @@ in
             engine = "redis";
             # Connect to local Redis shard via Unix socket.
             redis_address =
-              let
-                otherNodes = lib.take index nodes ++ lib.drop (index + 1) nodes;
-              in
-              map (name: "${name}:${toString redisPort}") otherNodes ++ [
+              let toRedisAddresses = map (name: "${name}:${toString redisPort}"); in
+              toRedisAddresses (lib.take index nodes) ++ [
                 "unix://${config.services.redis.servers.centrifugo.unixSocket}"
-              ];
+              ] ++ toRedisAddresses (lib.drop (index + 1) nodes);
             usage_stats_disable = true;
             api_insecure = true;
           };
diff --git a/nixos/tests/ceph-single-node-bluestore-dmcrypt.nix b/nixos/tests/ceph-single-node-bluestore-dmcrypt.nix
new file mode 100644
index 0000000000000..13407f054931b
--- /dev/null
+++ b/nixos/tests/ceph-single-node-bluestore-dmcrypt.nix
@@ -0,0 +1,273 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+
+  let
+    # the single node ipv6 address
+    ip = "2001:db8:ffff::";
+    # the global ceph cluster id
+    cluster = "54465b37-b9d8-4539-a1f9-dd33c75ee45a";
+    # the fsids of OSDs
+    osd-fsid-map = {
+      "0" = "1c1b7ea9-06bf-4d30-9a01-37ac3a0254aa";
+      "1" = "bd5a6f49-69d5-428c-ac25-a99f0c44375c";
+      "2" = "c90de6c7-86c6-41da-9694-e794096dfc5c";
+    };
+
+  in
+  {
+    name = "basic-single-node-ceph-cluster-bluestore-dmcrypt";
+    meta = {
+      maintainers = with lib.maintainers; [
+        benaryorg
+        nh2
+      ];
+    };
+
+    nodes = {
+      ceph =
+        { pkgs, config, ... }:
+        {
+          # disks for bluestore
+          virtualisation.emptyDiskImages = [
+            20480
+            20480
+            20480
+          ];
+
+          # networking setup (no external connectivity required, only local IPv6)
+          networking.useDHCP = false;
+          systemd.network = {
+            enable = true;
+            wait-online.extraArgs = [
+              "-i"
+              "lo"
+            ];
+            networks = {
+              "40-loopback" = {
+                enable = true;
+                name = "lo";
+                DHCP = "no";
+                addresses = [ { Address = "${ip}/128"; } ];
+              };
+            };
+          };
+
+          # do not start the ceph target by default so we can format the disks first
+          systemd.targets.ceph.wantedBy = lib.mkForce [ ];
+
+          # add the packages to systemPackages so the testscript doesn't run into any unexpected issues
+          # this shouldn't be required on production systems which have their required packages in the unit paths only
+          # but it helps in case one needs to actually run the tooling anyway
+          environment.systemPackages = with pkgs; [
+            ceph
+            cryptsetup
+            lvm2
+          ];
+
+          services.ceph = {
+            enable = true;
+            client.enable = true;
+            extraConfig = {
+              public_addr = ip;
+              cluster_addr = ip;
+              # ipv6
+              ms_bind_ipv4 = "false";
+              ms_bind_ipv6 = "true";
+              # msgr2 settings
+              ms_cluster_mode = "secure";
+              ms_service_mode = "secure";
+              ms_client_mode = "secure";
+              ms_mon_cluster_mode = "secure";
+              ms_mon_service_mode = "secure";
+              ms_mon_client_mode = "secure";
+              # less default modules, cuts down on memory and startup time in the tests
+              mgr_initial_modules = "";
+              # distribute by OSD, not by host, as per https://docs.ceph.com/en/reef/cephadm/install/#single-host
+              osd_crush_chooseleaf_type = "0";
+            };
+            client.extraConfig."mon.0" = {
+              host = "ceph";
+              mon_addr = "v2:[${ip}]:3300";
+              public_addr = "v2:[${ip}]:3300";
+            };
+            global = {
+              fsid = cluster;
+              clusterNetwork = "${ip}/64";
+              publicNetwork = "${ip}/64";
+              monInitialMembers = "0";
+            };
+
+            mon = {
+              enable = true;
+              daemons = [ "0" ];
+            };
+
+            osd = {
+              enable = true;
+              daemons = builtins.attrNames osd-fsid-map;
+            };
+
+            mgr = {
+              enable = true;
+              daemons = [ "ceph" ];
+            };
+          };
+
+          systemd.services =
+            let
+              osd-name = id: "ceph-osd-${id}";
+              osd-pre-start = id: [
+                "!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm activate --bluestore ${id} ${osd-fsid-map.${id}} --no-systemd"
+                "${config.services.ceph.osd.package.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${id} --cluster ${config.services.ceph.global.clusterName}"
+              ];
+              osd-post-stop = id: [
+                "!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm deactivate ${id}"
+              ];
+              map-osd = id: {
+                name = osd-name id;
+                value = {
+                  serviceConfig.ExecStartPre = lib.mkForce (osd-pre-start id);
+                  serviceConfig.ExecStopPost = osd-post-stop id;
+                  unitConfig.ConditionPathExists = lib.mkForce [ ];
+                  unitConfig.StartLimitBurst = lib.mkForce 4;
+                  path = with pkgs; [
+                    util-linux
+                    lvm2
+                    cryptsetup
+                  ];
+                };
+              };
+            in
+            lib.pipe config.services.ceph.osd.daemons [
+              (builtins.map map-osd)
+              builtins.listToAttrs
+            ];
+        };
+    };
+
+    testScript =
+      { ... }:
+      ''
+        start_all()
+
+        ceph.wait_for_unit("default.target")
+
+        # Bootstrap ceph-mon daemon
+        ceph.succeed(
+            "mkdir -p /var/lib/ceph/bootstrap-osd",
+            "ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+            "ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+            "ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'",
+            "ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+            "ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring",
+            "monmaptool --create --fsid ${cluster} --addv 0 'v2:[${ip}]:3300/0' --clobber /tmp/ceph.initial-monmap",
+            "mkdir -p /var/lib/ceph/mon/ceph-0",
+            "ceph-mon --mkfs -i 0 --monmap /tmp/ceph.initial-monmap --keyring /tmp/ceph.mon.keyring",
+            "chown ceph:ceph -R /tmp/ceph.mon.keyring /var/lib/ceph",
+            "systemctl start ceph-mon-0.service",
+        )
+
+        ceph.wait_for_unit("ceph-mon-0.service")
+        # should the mon not start or bind for some reason this gives us a better error message than the config commands running into a timeout
+        ceph.wait_for_open_port(3300, "${ip}")
+        ceph.succeed(
+            # required for HEALTH_OK
+            "ceph config set mon auth_allow_insecure_global_id_reclaim false",
+            # IPv6
+            "ceph config set global ms_bind_ipv4 false",
+            "ceph config set global ms_bind_ipv6 true",
+            # the new (secure) protocol
+            "ceph config set global ms_bind_msgr1 false",
+            "ceph config set global ms_bind_msgr2 true",
+            # just a small little thing
+            "ceph config set mon mon_compact_on_start true",
+        )
+
+        # Can't check ceph status until a mon is up
+        ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
+
+        # Bootstrap OSDs (do this before starting the mgr because cryptsetup and the mgr both eat a lot of memory)
+        ceph.succeed(
+            # this will automatically do what's required for LVM, cryptsetup, and stores all the data in Ceph's internal databases
+            "ceph-volume lvm prepare --bluestore --data /dev/vdb --dmcrypt --no-systemd --osd-id 0 --osd-fsid ${osd-fsid-map."0"}",
+            "ceph-volume lvm prepare --bluestore --data /dev/vdc --dmcrypt --no-systemd --osd-id 1 --osd-fsid ${osd-fsid-map."1"}",
+            "ceph-volume lvm prepare --bluestore --data /dev/vdd --dmcrypt --no-systemd --osd-id 2 --osd-fsid ${osd-fsid-map."2"}",
+            "sudo ceph-volume lvm deactivate 0",
+            "sudo ceph-volume lvm deactivate 1",
+            "sudo ceph-volume lvm deactivate 2",
+            "chown -R ceph:ceph /var/lib/ceph",
+        )
+
+        # Start OSDs (again, argon2id eats memory, so this happens before starting the mgr)
+        ceph.succeed(
+            "systemctl start ceph-osd-0.service",
+            "systemctl start ceph-osd-1.service",
+            "systemctl start ceph-osd-2.service",
+        )
+        ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
+        ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+
+        # Start the ceph-mgr daemon, after copying in the keyring
+        ceph.succeed(
+            "mkdir -p /var/lib/ceph/mgr/ceph-ceph/",
+            "ceph auth get-or-create -o /var/lib/ceph/mgr/ceph-ceph/keyring mgr.ceph mon 'allow profile mgr' osd 'allow *' mds 'allow *'",
+            "chown -R ceph:ceph /var/lib/ceph/mgr/ceph-ceph/",
+            "systemctl start ceph-mgr-ceph.service",
+        )
+        ceph.wait_for_unit("ceph-mgr-ceph")
+        ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
+        ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
+        ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+        ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
+        # test the actual storage
+        ceph.succeed(
+            "ceph osd pool create single-node-test 32 32",
+            "ceph osd pool ls | grep 'single-node-test'",
+
+            # We need to enable an application on the pool, otherwise it will
+            # stay unhealthy in state POOL_APP_NOT_ENABLED.
+            # Creating a CephFS would do this automatically, but we haven't done that here.
+            # See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
+            # We use the custom application name "nixos-test" for this.
+            "ceph osd pool application enable single-node-test nixos-test",
+
+            "ceph osd pool rename single-node-test single-node-other-test",
+            "ceph osd pool ls | grep 'single-node-other-test'",
+        )
+        ceph.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
+        ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+        ceph.wait_until_succeeds("ceph -s | grep '33 active+clean'")
+        ceph.fail(
+            # the old pool should be gone
+            "ceph osd pool ls | grep 'multi-node-test'",
+            # deleting the pool should fail without setting mon_allow_pool_delete
+            "ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
+        )
+
+        # rebooting gets rid of any potential tmpfs mounts or device-mapper devices
+        ceph.shutdown()
+        ceph.start()
+        ceph.wait_for_unit("default.target")
+
+        # Start it up (again OSDs first due to memory constraints of cryptsetup and mgr)
+        ceph.systemctl("start ceph-mon-0.service")
+        ceph.wait_for_unit("ceph-mon-0")
+        ceph.systemctl("start ceph-osd-0.service")
+        ceph.wait_for_unit("ceph-osd-0")
+        ceph.systemctl("start ceph-osd-1.service")
+        ceph.wait_for_unit("ceph-osd-1")
+        ceph.systemctl("start ceph-osd-2.service")
+        ceph.wait_for_unit("ceph-osd-2")
+        ceph.systemctl("start ceph-mgr-ceph.service")
+        ceph.wait_for_unit("ceph-mgr-ceph")
+
+        # Ensure the cluster comes back up again
+        ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
+        ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
+        ceph.wait_until_succeeds("ceph osd stat | grep -E '3 osds: 3 up[^,]*, 3 in'")
+        ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
+        ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+      '';
+  }
+)
diff --git a/nixos/tests/cgit.nix b/nixos/tests/cgit.nix
index 6aed06adefdff..073b141b14e73 100644
--- a/nixos/tests/cgit.nix
+++ b/nixos/tests/cgit.nix
@@ -23,10 +23,16 @@ in {
         nginx.location = "/(c)git/";
         repos = {
           some-repo = {
-            path = "/srv/git/some-repo";
+            path = "/tmp/git/some-repo";
             desc = "some-repo description";
           };
         };
+        settings = {
+          readme = [
+            ":README.md"
+            ":date.txt"
+          ];
+        };
       };
 
       environment.systemPackages = [ pkgs.git ];
@@ -50,24 +56,48 @@ in {
 
     server.fail("curl -fsS http://localhost/robots.txt")
 
-    server.succeed("${pkgs.writeShellScript "setup-cgit-test-repo" ''
+    server.succeed("sudo -u cgit ${pkgs.writeShellScript "setup-cgit-test-repo" ''
       set -e
-      git init --bare -b master /srv/git/some-repo
+      git init --bare -b master /tmp/git/some-repo
       git init -b master reference
       cd reference
-      git remote add origin /srv/git/some-repo
-      date > date.txt
+      git remote add origin /tmp/git/some-repo
+      { echo -n "cgit NixOS Test at "; date; } > date.txt
       git add date.txt
       git -c user.name=test -c user.email=test@localhost commit -m 'add date'
       git push -u origin master
     ''}")
 
+    # test web download
     server.succeed(
         "curl -fsS 'http://localhost/%28c%29git/some-repo/plain/date.txt?id=master' | diff -u reference/date.txt -"
     )
 
+    # test http clone
     server.succeed(
        "git clone http://localhost/%28c%29git/some-repo && diff -u reference/date.txt some-repo/date.txt"
     )
+
+    # test list settings by greping for the fallback readme
+    server.succeed(
+        "curl -fsS 'http://localhost/%28c%29git/some-repo/about/' | grep -F 'cgit NixOS Test at'"
+    )
+
+    # add real readme
+    server.succeed("sudo -u cgit ${pkgs.writeShellScript "cgit-commit-readme" ''
+      set -e
+      echo '# cgit NixOS test README' > reference/README.md
+      git -C reference add README.md
+      git -C reference -c user.name=test -c user.email=test@localhost commit -m 'add readme'
+      git -C reference push
+    ''}")
+
+    # test list settings by greping for the real readme
+    server.succeed(
+        "curl -fsS 'http://localhost/%28c%29git/some-repo/about/' | grep -F '# cgit NixOS test README'"
+    )
+    server.fail(
+        "curl -fsS 'http://localhost/%28c%29git/some-repo/about/' | grep -F 'cgit NixOS Test at'"
+    )
   '';
 })
diff --git a/nixos/tests/chromadb.nix b/nixos/tests/chromadb.nix
new file mode 100644
index 0000000000000..be04d10e74de0
--- /dev/null
+++ b/nixos/tests/chromadb.nix
@@ -0,0 +1,26 @@
+{ lib, pkgs, ... }:
+
+let
+  lib = pkgs.lib;
+
+in
+{
+  name = "chromadb";
+  meta.maintainers = [ lib.maintainers.drupol ];
+
+  nodes = {
+    machine =
+      { pkgs, ... }:
+      {
+        services.chromadb = {
+          enable = true;
+        };
+      };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("chromadb.service")
+    machine.wait_for_open_port(8000)
+  '';
+}
diff --git a/nixos/tests/chrony.nix b/nixos/tests/chrony.nix
index 578b1e32d50c9..2dcc363728beb 100644
--- a/nixos/tests/chrony.nix
+++ b/nixos/tests/chrony.nix
@@ -7,25 +7,24 @@ import ./make-test-python.nix ({ lib, ... }:
   };
 
   nodes = {
-    default = {
+    machine = {
       services.chrony.enable = true;
-    };
-    graphene-hardened = {
-      services.chrony.enable = true;
-      services.chrony.enableMemoryLocking = true;
-      environment.memoryAllocator.provider = "graphene-hardened";
-      # dhcpcd privsep is incompatible with graphene-hardened
-      networking.useNetworkd = true;
+
+      specialisation.hardened.configuration = {
+        services.chrony.enableMemoryLocking = true;
+        environment.memoryAllocator.provider = "graphene-hardened";
+        # dhcpcd privsep is incompatible with graphene-hardened
+        networking.useNetworkd = true;
+      };
     };
   };
 
-  testScript = {nodes, ...} : let
-    graphene-hardened = nodes.graphene-hardened.system.build.toplevel;
-  in ''
-    default.start()
-    default.wait_for_unit('multi-user.target')
-    default.succeed('systemctl is-active chronyd.service')
-    default.succeed('${graphene-hardened}/bin/switch-to-configuration test')
-    default.succeed('systemctl is-active chronyd.service')
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit('multi-user.target')
+    machine.succeed('systemctl is-active chronyd.service')
+    machine.succeed('/run/booted-system/specialisation/hardened/bin/switch-to-configuration test')
+    machine.succeed('systemctl restart chronyd.service')
+    machine.wait_for_unit('chronyd.service')
   '';
 })
diff --git a/nixos/tests/cinnamon-wayland.nix b/nixos/tests/cinnamon-wayland.nix
index 19529d820d9c1..cba0c9f60e8db 100644
--- a/nixos/tests/cinnamon-wayland.nix
+++ b/nixos/tests/cinnamon-wayland.nix
@@ -14,7 +14,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     };
 
     # For the sessionPath subtest.
-    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gnome.gpaste ];
+    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ];
   };
 
   enableOCR = true;
diff --git a/nixos/tests/cinnamon.nix b/nixos/tests/cinnamon.nix
index 694308152149b..57300c3e4b16b 100644
--- a/nixos/tests/cinnamon.nix
+++ b/nixos/tests/cinnamon.nix
@@ -13,7 +13,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     environment.cinnamon.excludePackages = [ pkgs.gnome-text-editor ];
 
     # For the sessionPath subtest.
-    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gnome.gpaste ];
+    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ];
   };
 
   enableOCR = true;
diff --git a/nixos/tests/clatd.nix b/nixos/tests/clatd.nix
index 00021d87ba5f4..d0d504851ce4e 100644
--- a/nixos/tests/clatd.nix
+++ b/nixos/tests/clatd.nix
@@ -6,8 +6,8 @@
 # Client | clat    Address: 192.0.0.1/32  (configured via clatd)
 #        |         Route:   default
 #        |
-#        | eth1    Address: 2001:db8::2/64
-#        |  |      Route:   default via 2001:db8::1
+#        | eth1    Address: Assigned via SLAAC within 2001:db8::/64
+#        |  |      Route:   default via IPv6LL address
 #        +--|---
 #           | VLAN 3
 #        +--|---
@@ -31,7 +31,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
 {
   name = "clatd";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ hax404 ];
+    maintainers = [ hax404 jmbaur ];
   };
 
   nodes = {
@@ -59,25 +59,26 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
             "100.64.0.2/24"
           ];
           routes = [
-            { routeConfig = { Destination = "192.0.2.0/24"; Gateway = "100.64.0.1"; }; }
+            { Destination = "192.0.2.0/24"; Gateway = "100.64.0.1"; }
           ];
         };
       };
     };
 
     # The router is configured with static IPv4 addresses towards the server
-    # and IPv6 addresses towards the client. For NAT64, the Well-Known prefix
-    # 64:ff9b::/96 is used. NAT64 is done with TAYGA which provides the
-    # tun-interface nat64 and does the translation over it. The IPv6 packets
-    # are sent to this interfaces and received as IPv4 packets and vice versa.
-    # As TAYGA only translates IPv6 addresses to dedicated IPv4 addresses, it
-    # needs a pool of IPv4 addresses which must be at least as big as the
-    # expected amount of clients. In this test, the packets from the pool are
-    # directly routed towards the client. In normal cases, there would be a
-    # second source NAT44 to map all clients behind one IPv4 address.
+    # and IPv6 addresses towards the client. DNS64 is exposed towards the
+    # client so clatd is able to auto-discover the PLAT prefix. For NAT64, the
+    # Well-Known prefix 64:ff9b::/96 is used. NAT64 is done with TAYGA which
+    # provides the tun-interface nat64 and does the translation over it. The
+    # IPv6 packets are sent to this interfaces and received as IPv4 packets and
+    # vice versa. As TAYGA only translates IPv6 addresses to dedicated IPv4
+    # addresses, it needs a pool of IPv4 addresses which must be at least as
+    # big as the expected amount of clients. In this test, the packets from the
+    # pool are directly routed towards the client. In normal cases, there would
+    # be a second source NAT44 to map all clients behind one IPv4 address.
     router = {
       boot.kernel.sysctl = {
-        "net.ipv4.ip_forward" = 1;
+        "net.ipv4.conf.all.forwarding" = 1;
         "net.ipv6.conf.all.forwarding" = 1;
       };
 
@@ -102,6 +103,36 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
         };
       };
 
+      systemd.network.networks."40-eth2" = {
+        networkConfig.IPv6SendRA = true;
+        ipv6Prefixes = [ { Prefix = "2001:db8::/64"; } ];
+        ipv6PREF64Prefixes = [ { Prefix = "64:ff9b::/96"; } ];
+        ipv6SendRAConfig = {
+          EmitDNS = true;
+          DNS = "_link_local";
+        };
+      };
+
+      services.resolved.extraConfig = ''
+        DNSStubListener=no
+      '';
+
+      networking.extraHosts = ''
+        192.0.0.171 ipv4only.arpa
+        192.0.0.170 ipv4only.arpa
+      '';
+
+      services.coredns = {
+        enable = true;
+        config = ''
+          .:53 {
+            bind ::
+            hosts /etc/hosts
+            dns64 64:ff9b::/96
+          }
+        '';
+      };
+
       services.tayga = {
         enable = true;
         ipv4 = {
@@ -127,10 +158,10 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       };
     };
 
-    # The client is configured with static IPv6 addresses. It has also a static
-    # default route towards the router. To reach the IPv4-only server, the
-    # client starts the clat daemon which starts and configures the local
-    # IPv4 -> IPv6 translation via Tayga.
+    # The client uses SLAAC to assign IPv6 addresses. To reach the IPv4-only
+    # server, the client starts the clat daemon which starts and configures the
+    # local IPv4 -> IPv6 translation via Tayga after discovering the PLAT
+    # prefix via DNS64.
     client = {
       virtualisation.vlans = [
         3 # towards router
@@ -145,25 +176,36 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
         enable = true;
         networks."vlan1" = {
           matchConfig.Name = "eth1";
-          address = [
-            "2001:db8::2/64"
-          ];
-          routes = [
-            { routeConfig = { Destination = "::/0"; Gateway = "2001:db8::1"; }; }
-          ];
+
+          # NOTE: clatd does not actually use the PREF64 prefix discovered by
+          # systemd-networkd (nor does systemd-networkd do anything with it,
+          # yet), but we set this to confirm it works. See the test script
+          # below.
+          ipv6AcceptRAConfig.UsePREF64 = true;
         };
       };
 
       services.clatd = {
         enable = true;
-        settings.plat-prefix = "64:ff9b::/96";
+        # NOTE: Perl's Net::DNS resolver does not seem to work well querying
+        # for AAAA records to systemd-resolved's default IPv4 bind address
+        # (127.0.0.53), so we add an IPv6 listener address to systemd-resolved
+        # and tell clatd to use that instead.
+        settings.dns64-servers = "::1";
       };
 
+      # Allow clatd to find dns server. See comment above.
+      services.resolved.extraConfig = ''
+        DNSStubListenerExtra=::1
+      '';
+
       environment.systemPackages = [ pkgs.mtr ];
     };
   };
 
   testScript = ''
+    import json
+
     start_all()
 
     # wait for all machines to start up
@@ -178,6 +220,11 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
         'journalctl -u clatd -e | grep -q "Starting up TAYGA, using config file"'
       )
 
+    with subtest("networkd exports PREF64 prefix"):
+      assert json.loads(client.succeed("networkctl status eth1 --json=short"))[
+          "NDisc"
+      ]["PREF64"][0]["Prefix"] == [0x0, 0x64, 0xFF, 0x9B] + ([0] * 12)
+
     with subtest("Test ICMP"):
       client.wait_until_succeeds("ping -c 3 100.64.0.2 >&2")
 
diff --git a/nixos/tests/cntr.nix b/nixos/tests/cntr.nix
index 598143beb6c0f..2166fb8f9b092 100644
--- a/nixos/tests/cntr.nix
+++ b/nixos/tests/cntr.nix
@@ -18,7 +18,7 @@ let
             inherit backend;
             containers.nginx = {
               image = "nginx-container";
-              imageFile = pkgs.dockerTools.examples.nginx;
+              imageStream = pkgs.dockerTools.examples.nginxStream;
               ports = [ "8181:80" ];
             };
           };
diff --git a/nixos/tests/commafeed.nix b/nixos/tests/commafeed.nix
new file mode 100644
index 0000000000000..7b65720818a9b
--- /dev/null
+++ b/nixos/tests/commafeed.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+  {
+    name = "commafeed";
+
+    nodes.server = {
+      services.commafeed = {
+        enable = true;
+      };
+    };
+
+    testScript = ''
+      server.start()
+      server.wait_for_unit("commafeed.service")
+      server.wait_for_open_port(8082)
+      server.succeed("curl --fail --silent http://localhost:8082")
+    '';
+
+    meta.maintainers = [ lib.maintainers.raroh73 ];
+  }
+)
diff --git a/nixos/tests/common/acme/client/default.nix b/nixos/tests/common/acme/client/default.nix
index 503e610d1ac9e..f9b08b519dbeb 100644
--- a/nixos/tests/common/acme/client/default.nix
+++ b/nixos/tests/common/acme/client/default.nix
@@ -1,4 +1,4 @@
-{ lib, nodes, pkgs, ... }:
+{ nodes, ... }:
 let
   caCert = nodes.acme.test-support.acme.caCert;
   caDomain = nodes.acme.test-support.acme.caDomain;
diff --git a/nixos/tests/containers-bridge.nix b/nixos/tests/containers-bridge.nix
index d2e16299edaad..3001db33ba5a3 100644
--- a/nixos/tests/containers-bridge.nix
+++ b/nixos/tests/containers-bridge.nix
@@ -8,7 +8,7 @@ in
 import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "containers-bridge";
   meta = {
-    maintainers = with lib.maintainers; [ aristid aszlig eelco kampfschlaefer ];
+    maintainers = with lib.maintainers; [ aristid aszlig kampfschlaefer ];
   };
 
   nodes.machine =
diff --git a/nixos/tests/containers-imperative.nix b/nixos/tests/containers-imperative.nix
index fff00e4f73a85..c654c43788078 100644
--- a/nixos/tests/containers-imperative.nix
+++ b/nixos/tests/containers-imperative.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "containers-imperative";
   meta = {
-    maintainers = with lib.maintainers; [ aristid aszlig eelco kampfschlaefer ];
+    maintainers = with lib.maintainers; [ aristid aszlig kampfschlaefer ];
   };
 
   nodes.machine =
@@ -33,6 +33,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         stdenv stdenvNoCC emptyContainer.config.containers.foo.path
         libxslt desktop-file-utils texinfo docbook5 libxml2
         docbook_xsl_ns xorg.lndir documentation-highlighter
+        perlPackages.ConfigIniFiles
       ];
     };
 
diff --git a/nixos/tests/containers-ip.nix b/nixos/tests/containers-ip.nix
index ecff99a3f0c25..034e5d660415a 100644
--- a/nixos/tests/containers-ip.nix
+++ b/nixos/tests/containers-ip.nix
@@ -14,7 +14,7 @@ let
 in import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "containers-ipv4-ipv6";
   meta = {
-    maintainers = with lib.maintainers; [ aristid aszlig eelco kampfschlaefer ];
+    maintainers = with lib.maintainers; [ aristid aszlig kampfschlaefer ];
   };
 
   nodes.machine =
diff --git a/nixos/tests/containers-portforward.nix b/nixos/tests/containers-portforward.nix
index b8c7aabc5a50b..1a9880fe93133 100644
--- a/nixos/tests/containers-portforward.nix
+++ b/nixos/tests/containers-portforward.nix
@@ -8,7 +8,7 @@ in
 import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "containers-portforward";
   meta = {
-    maintainers = with lib.maintainers; [ aristid aszlig eelco kampfschlaefer ianwookim ];
+    maintainers = with lib.maintainers; [ aristid aszlig kampfschlaefer ianwookim ];
   };
 
   nodes.machine =
diff --git a/nixos/tests/containers-reloadable.nix b/nixos/tests/containers-reloadable.nix
index 876e62c1da9ea..00d850cae2a17 100644
--- a/nixos/tests/containers-reloadable.nix
+++ b/nixos/tests/containers-reloadable.nix
@@ -1,71 +1,57 @@
 import ./make-test-python.nix ({ pkgs, lib, ... }:
-let
-  client_base = {
-    containers.test1 = {
-      autoStart = true;
-      config = {
-        environment.etc.check.text = "client_base";
-      };
-    };
-
-    # prevent make-test-python.nix to change IP
-    networking.interfaces = {
-      eth1.ipv4.addresses = lib.mkOverride 0 [ ];
-    };
-  };
-in {
+{
   name = "containers-reloadable";
   meta = {
     maintainers = with lib.maintainers; [ danbst ];
   };
 
   nodes = {
-    client = { ... }: {
-      imports = [ client_base ];
-    };
+    machine = { lib, ... }: {
+      containers.test1 = {
+        autoStart = true;
+        config.environment.etc.check.text = "client_base";
+      };
 
-    client_c1 = { lib, ... }: {
-      imports = [ client_base ];
+      # prevent make-test-python.nix to change IP
+      networking.interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [ ];
 
-      containers.test1.config = {
-        environment.etc.check.text = lib.mkForce "client_c1";
-        services.httpd.enable = true;
-        services.httpd.adminAddr = "nixos@example.com";
+      specialisation.c1.configuration = {
+        containers.test1.config = {
+          environment.etc.check.text = lib.mkForce "client_c1";
+          services.httpd.enable = true;
+          services.httpd.adminAddr = "nixos@example.com";
+        };
       };
-    };
-    client_c2 = { lib, ... }: {
-      imports = [ client_base ];
 
-      containers.test1.config = {
-        environment.etc.check.text = lib.mkForce "client_c2";
-        services.nginx.enable = true;
+      specialisation.c2.configuration = {
+        containers.test1.config = {
+          environment.etc.check.text = lib.mkForce "client_c2";
+          services.nginx.enable = true;
+        };
       };
     };
   };
 
-  testScript = {nodes, ...}: let
-    c1System = nodes.client_c1.config.system.build.toplevel;
-    c2System = nodes.client_c2.config.system.build.toplevel;
-  in ''
-    client.start()
-    client.wait_for_unit("default.target")
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("default.target")
 
-    assert "client_base" in client.succeed("nixos-container run test1 cat /etc/check")
+    assert "client_base" in machine.succeed("nixos-container run test1 cat /etc/check")
 
     with subtest("httpd is available after activating config1"):
-        client.succeed(
-            "${c1System}/bin/switch-to-configuration test >&2",
+        machine.succeed(
+            "/run/booted-system/specialisation/c1/bin/switch-to-configuration test >&2",
             "[[ $(nixos-container run test1 cat /etc/check) == client_c1 ]] >&2",
             "systemctl status httpd -M test1 >&2",
         )
 
     with subtest("httpd is not available any longer after switching to config2"):
-        client.succeed(
-            "${c2System}/bin/switch-to-configuration test >&2",
+        machine.succeed(
+            "/run/booted-system/specialisation/c2/bin/switch-to-configuration test >&2",
             "[[ $(nixos-container run test1 cat /etc/check) == client_c2 ]] >&2",
             "systemctl status nginx -M test1 >&2",
         )
-        client.fail("systemctl status httpd -M test1 >&2")
+        machine.fail("systemctl status httpd -M test1 >&2")
   '';
 
 })
diff --git a/nixos/tests/containers-restart_networking.nix b/nixos/tests/containers-restart_networking.nix
index e1ad8157b2883..568ca5ee3fede 100644
--- a/nixos/tests/containers-restart_networking.nix
+++ b/nixos/tests/containers-restart_networking.nix
@@ -1,20 +1,4 @@
-let
-  client_base = {
-    networking.firewall.enable = false;
-
-    containers.webserver = {
-      autoStart = true;
-      privateNetwork = true;
-      hostBridge = "br0";
-      config = {
-        networking.firewall.enable = false;
-        networking.interfaces.eth0.ipv4.addresses = [
-          { address = "192.168.1.122"; prefixLength = 24; }
-        ];
-      };
-    };
-  };
-in import ./make-test-python.nix ({ pkgs, lib, ... }:
+import ./make-test-python.nix ({ pkgs, lib, ... }:
 {
   name = "containers-restart_networking";
   meta = {
@@ -22,46 +6,55 @@ in import ./make-test-python.nix ({ pkgs, lib, ... }:
   };
 
   nodes = {
-    client = { lib, ... }: client_base // {
+    client = {
       virtualisation.vlans = [ 1 ];
 
-      networking.bridges.br0 = {
-        interfaces = [];
-        rstp = false;
-      };
-      networking.interfaces = {
-        eth1.ipv4.addresses = lib.mkOverride 0 [ ];
-        br0.ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+      networking.firewall.enable = false;
+
+      containers.webserver = {
+        autoStart = true;
+        privateNetwork = true;
+        hostBridge = "br0";
+        config = {
+          networking.firewall.enable = false;
+          networking.interfaces.eth0.ipv4.addresses = [
+            { address = "192.168.1.122"; prefixLength = 24; }
+          ];
+        };
       };
 
-    };
-    client_eth1 = { lib, ... }: client_base // {
       networking.bridges.br0 = {
-        interfaces = [ "eth1" ];
+        interfaces = [];
         rstp = false;
       };
-      networking.interfaces = {
-        eth1.ipv4.addresses = lib.mkOverride 0 [ ];
-        br0.ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
-      };
-    };
-    client_eth1_rstp = { lib, ... }: client_base // {
-      networking.bridges.br0 = {
-        interfaces = [ "eth1" ];
-        rstp = true;
+
+      networking.interfaces.br0.ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+
+      specialisation.eth1.configuration = {
+        networking.bridges.br0.interfaces = [ "eth1" ];
+        networking.interfaces = {
+          eth1.ipv4.addresses = lib.mkForce [ ];
+          eth1.ipv6.addresses = lib.mkForce [ ];
+          br0.ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
+        };
       };
-      networking.interfaces = {
-        eth1.ipv4.addresses = lib.mkOverride 0 [ ];
-        br0.ipv4.addresses =  [ { address = "192.168.1.2"; prefixLength = 24; } ];
+
+      specialisation.eth1-rstp.configuration = {
+        networking.bridges.br0 = {
+          interfaces = [ "eth1" ];
+          rstp = lib.mkForce true;
+        };
+
+        networking.interfaces = {
+          eth1.ipv4.addresses = lib.mkForce [ ];
+          eth1.ipv6.addresses = lib.mkForce [ ];
+          br0.ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
+        };
       };
     };
   };
 
-  testScript = {nodes, ...}: let
-    originalSystem = nodes.client.config.system.build.toplevel;
-    eth1_bridged = nodes.client_eth1.config.system.build.toplevel;
-    eth1_rstp = nodes.client_eth1_rstp.config.system.build.toplevel;
-  in ''
+  testScript = ''
     client.start()
 
     client.wait_for_unit("default.target")
@@ -75,7 +68,7 @@ in import ./make-test-python.nix ({ pkgs, lib, ... }:
 
     with subtest("Bridged configuration without STP preserves connectivity"):
         client.succeed(
-            "${eth1_bridged}/bin/switch-to-configuration test >&2"
+            "/run/booted-system/specialisation/eth1/bin/switch-to-configuration test >&2"
         )
 
         client.succeed(
@@ -87,7 +80,7 @@ in import ./make-test-python.nix ({ pkgs, lib, ... }:
 
     #  activating rstp needs another service, therefore the bridge will restart and the container will lose its connectivity
     # with subtest("Bridged configuration with STP"):
-    #     client.succeed("${eth1_rstp}/bin/switch-to-configuration test >&2")
+    #     client.succeed("/run/booted-system/specialisation/eth1-rstp/bin/switch-to-configuration test >&2")
     #     client.execute("ip -4 a >&2")
     #     client.execute("ip l >&2")
     #
@@ -100,7 +93,7 @@ in import ./make-test-python.nix ({ pkgs, lib, ... }:
 
     with subtest("Reverting to initial configuration preserves connectivity"):
         client.succeed(
-            "${originalSystem}/bin/switch-to-configuration test >&2"
+            "/run/booted-system/bin/switch-to-configuration test >&2"
         )
 
         client.succeed("ping 192.168.1.122 -c 1 -n >&2")
diff --git a/nixos/tests/crabfit.nix b/nixos/tests/crabfit.nix
index 0cd0741f6fa4b..eb38a0ae0cfcd 100644
--- a/nixos/tests/crabfit.nix
+++ b/nixos/tests/crabfit.nix
@@ -4,7 +4,7 @@ import ./make-test-python.nix (
   {
     name = "crabfit";
 
-    meta.maintainers = with lib.maintainers; [ thubrecht ];
+    meta.maintainers = [ ];
 
     nodes = {
       machine =
diff --git a/nixos/tests/cryptpad.nix b/nixos/tests/cryptpad.nix
new file mode 100644
index 0000000000000..9d6af15f5f862
--- /dev/null
+++ b/nixos/tests/cryptpad.nix
@@ -0,0 +1,71 @@
+{ pkgs, ... }:
+let
+  certs = pkgs.runCommand "cryptpadSelfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    mkdir -p $out
+    cd $out
+    openssl req -x509 -newkey rsa:4096 \
+      -keyout key.pem -out cert.pem -nodes -days 3650 \
+      -subj '/CN=cryptpad.localhost' \
+      -addext 'subjectAltName = DNS.1:cryptpad.localhost, DNS.2:cryptpad-sandbox.localhost'
+  '';
+  # data sniffed from cryptpad's /checkup network trace, seems to be re-usable
+  test_write_data = pkgs.writeText "cryptpadTestData" ''
+    {"command":"WRITE_BLOCK","content":{"publicKey":"O2onvM62pC1io6jQKm8Nc2UyFXcd4kOmOsBIoYtZ2ik=","signature":"aXcM9SMO59lwA7q7HbYB+AnzymmxSyy/KhkG/cXIBVzl8v+kkPWXmFuWhcuKfRF8yt3Zc3ktIsHoFyuyDSAwAA==","ciphertext":"AFwCIfBHKdFzDKjMg4cu66qlJLpP+6Yxogbl3o9neiQou5P8h8yJB8qgnQ=="},"publicKey":"O2onvM62pC1io6jQKm8Nc2UyFXcd4kOmOsBIoYtZ2ik=","nonce":"bitSbJMNSzOsg98nEzN80a231PCkBQeH"}
+  '';
+in
+{
+  name = "cryptpad";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ martinetd ];
+  };
+
+  nodes.machine = {
+    services.cryptpad = {
+      enable = true;
+      configureNginx = true;
+      settings = {
+        httpUnsafeOrigin = "https://cryptpad.localhost";
+        httpSafeOrigin = "https://cryptpad-sandbox.localhost";
+      };
+    };
+    services.nginx = {
+      virtualHosts."cryptpad.localhost" = {
+        enableACME = false;
+        sslCertificate = "${certs}/cert.pem";
+        sslCertificateKey = "${certs}/key.pem";
+      };
+    };
+    security = {
+      pki.certificateFiles = [ "${certs}/cert.pem" ];
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("cryptpad.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_open_port(3000)
+
+    # test home page
+    machine.succeed("curl --fail https://cryptpad.localhost -o /tmp/cryptpad_home.html")
+    machine.succeed("grep -F 'CryptPad: Collaboration suite' /tmp/cryptpad_home.html")
+
+    # test scripts/build.js actually generated customize content from config
+    machine.succeed("grep -F 'meta property=\"og:url\" content=\"https://cryptpad.localhost/index.html' /tmp/cryptpad_home.html")
+
+    # make sure child pages are accessible (e.g. check nginx try_files paths)
+    machine.succeed(
+        "grep -oE '/(customize|components)[^\"]*' /tmp/cryptpad_home.html"
+        "  | while read -r page; do"
+        "        curl -O --fail https://cryptpad.localhost$page || exit;"
+        "    done")
+
+    # test some API (e.g. check cryptpad main process)
+    machine.succeed("curl --fail -d @${test_write_data} -H 'Content-Type: application/json' https://cryptpad.localhost/api/auth")
+
+    # test telemetry has been disabled
+    machine.fail("journalctl -u cryptpad | grep TELEMETRY");
+
+    # for future improvements
+    machine.log(machine.execute("systemd-analyze security cryptpad.service")[1])
+  '';
+}
diff --git a/nixos/tests/curl-impersonate.nix b/nixos/tests/curl-impersonate.nix
index 33b10da1dfd0f..13eb54b5b2d0d 100644
--- a/nixos/tests/curl-impersonate.nix
+++ b/nixos/tests/curl-impersonate.nix
@@ -97,6 +97,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
       pyyaml
       pytest-asyncio
       dpkt
+      ts1-signatures
     ]}"
 
     # Prepare test root prefix
@@ -113,7 +114,7 @@ in {
   name = "curl-impersonate";
 
   meta = with lib.maintainers; {
-    maintainers = [ lilyinstarlight ];
+    maintainers = [ ];
   };
 
   nodes = {
diff --git a/nixos/tests/dae.nix b/nixos/tests/dae.nix
index 42a2eb5fe0be5..4b856450d9e1d 100644
--- a/nixos/tests/dae.nix
+++ b/nixos/tests/dae.nix
@@ -15,7 +15,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
     services.dae = {
       enable = true;
       config = ''
-        global{}
+        global { disable_waiting_network: true }
         routing{}
       '';
     };
diff --git a/nixos/tests/darling-dmg.nix b/nixos/tests/darling-dmg.nix
new file mode 100644
index 0000000000000..0a2ec82981228
--- /dev/null
+++ b/nixos/tests/darling-dmg.nix
@@ -0,0 +1,34 @@
+{ lib, pkgs, ... }:
+# This needs to be a VM test because the FUSE kernel module can't be used inside of a derivation in the Nix sandbox.
+# This test also exercises the LZFSE support in darling-dmg.
+let
+  # The last kitty release which is stored on an HFS+ filesystem inside the disk image
+  test-dmg-file = pkgs.fetchurl {
+    url = "https://github.com/kovidgoyal/kitty/releases/download/v0.17.4/kitty-0.17.4.dmg";
+    hash = "sha256-m+c5s8fFrgUc0xQNI196WplYBZq9+lNgems5haZUdvA=";
+  };
+in
+{
+  name = "darling-dmg";
+  meta.maintainers = with lib.maintainers; [ Luflosi ];
+
+  nodes.machine = {};
+
+  testScript = ''
+    start_all()
+
+    machine.succeed("mkdir mount-point")
+    machine.succeed("'${pkgs.darling-dmg}/bin/darling-dmg' '${test-dmg-file}' mount-point")
+
+    # Crude way to verify the contents
+    # Taken from https://stackoverflow.com/questions/545387/linux-compute-a-single-hash-for-a-given-folder-contents
+    # This could be improved. It does not check symlinks for example.
+    hash = machine.succeed("""
+      (find mount-point -type f -print0  | sort -z | xargs -0 sha256sum; \
+       find mount-point \( -type f -o -type d \) -print0 | sort -z | \
+         xargs -0 stat -c '%n %a') \
+      | sha256sum
+    """).strip()
+    assert hash == "00e61c2ef171093fbf194e420c17bb84bcdb823238d70eb46e375bab2427cc21  -", f"The disk image contents differ from what was expected (was {hash})"
+  '';
+}
diff --git a/nixos/tests/ddns-updater.nix b/nixos/tests/ddns-updater.nix
new file mode 100644
index 0000000000000..caa763e09bba3
--- /dev/null
+++ b/nixos/tests/ddns-updater.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+  let
+    port = 6000;
+  in
+  {
+    name = "ddns-updater";
+
+    meta.maintainers = with lib.maintainers; [ delliott ];
+
+    nodes.machine =
+      { pkgs, ... }:
+      {
+        services.ddns-updater = {
+          enable = true;
+          environment = {
+            LISTENING_ADDRESS = ":" + (toString port);
+          };
+        };
+      };
+
+    testScript = ''
+      machine.wait_for_unit("ddns-updater.service")
+      machine.wait_for_open_port(${toString port})
+      machine.succeed("curl --fail http://localhost:${toString port}/")
+    '';
+  }
+)
diff --git a/nixos/tests/dependency-track.nix b/nixos/tests/dependency-track.nix
new file mode 100644
index 0000000000000..ab0d78827286d
--- /dev/null
+++ b/nixos/tests/dependency-track.nix
@@ -0,0 +1,65 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+  let
+    dependencyTrackPort = 8081;
+  in
+  {
+    name = "dependency-track";
+    meta = {
+      maintainers = pkgs.lib.teams.cyberus.members;
+    };
+
+    nodes = {
+      server =
+        { pkgs, ... }:
+        {
+          virtualisation = {
+            cores = 2;
+            diskSize = 4096;
+          };
+
+          environment.systemPackages = with pkgs; [ curl ];
+          systemd.services.dependency-track = {
+            # source: https://github.com/DependencyTrack/dependency-track/blob/37e0ba59e8057c18a87a7a76e247a8f75677a56c/dev/scripts/data-nist-generate-dummy.sh
+            preStart = ''
+              set -euo pipefail
+
+              NIST_DIR="$HOME/.dependency-track/nist"
+
+              rm -rf "$NIST_DIR"
+              mkdir -p "$NIST_DIR"
+
+              for feed in $(seq "2024" "2002"); do
+                touch "$NIST_DIR/nvdcve-1.1-$feed.json.gz"
+                echo "9999999999999" > "$NIST_DIR/nvdcve-1.1-$feed.json.gz.ts"
+              done
+            '';
+          };
+          services.dependency-track = {
+            enable = true;
+            port = dependencyTrackPort;
+            nginx.domain = "localhost";
+            database.passwordFile = "${pkgs.writeText "dbPassword" ''hunter2'THE'''H''''E''}";
+          };
+        };
+    };
+
+    testScript = ''
+      import json
+
+      start_all()
+
+      server.wait_for_unit("dependency-track.service")
+      server.wait_until_succeeds(
+        "journalctl -o cat -u dependency-track.service | grep 'Dependency-Track is ready'"
+      )
+      server.wait_for_open_port(${toString dependencyTrackPort})
+
+      with subtest("version api returns correct version"):
+        version = json.loads(
+          server.succeed("curl http://localhost/api/version")
+        )
+        assert version["version"] == "${pkgs.dependency-track.version}"
+    '';
+  }
+)
diff --git a/nixos/tests/devpi-server.nix b/nixos/tests/devpi-server.nix
new file mode 100644
index 0000000000000..2a16d49724dbc
--- /dev/null
+++ b/nixos/tests/devpi-server.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({pkgs, ...}: let
+  server-port = 3141;
+in {
+  name = "devpi-server";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [cafkafk];
+  };
+
+  nodes = {
+    devpi = {...}: {
+      services.devpi-server = {
+        enable = true;
+        host = "0.0.0.0";
+        port = server-port;
+        openFirewall = true;
+        secretFile = pkgs.writeText "devpi-secret" "v263P+V3YGDYUyfYL/RBURw+tCPMDw94R/iCuBNJrDhaYrZYjpA6XPFVDDH8ViN20j77y2PHoMM/U0opNkVQ2g==";
+      };
+    };
+
+    client1 = {...}: {
+      environment.systemPackages = with pkgs; [
+        devpi-client
+        jq
+      ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+    devpi.wait_for_unit("devpi-server.service")
+    devpi.wait_for_open_port(${builtins.toString server-port})
+
+    client1.succeed("devpi getjson http://devpi:${builtins.toString server-port}")
+  '';
+})
diff --git a/nixos/tests/dex-oidc.nix b/nixos/tests/dex-oidc.nix
index e54ae18ca9373..d3baa4fbf2455 100644
--- a/nixos/tests/dex-oidc.nix
+++ b/nixos/tests/dex-oidc.nix
@@ -57,15 +57,16 @@ import ./make-test-python.nix ({ lib, ... }: {
 
   testScript = ''
     with subtest("Web server gets ready"):
-        machine.wait_for_unit("dex.service")
+        machine.wait_for_unit("dex.service", timeout=120)
         # Wait until server accepts connections
-        machine.wait_until_succeeds("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid'")
+        machine.wait_until_succeeds("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid'", timeout=120)
 
     with subtest("Login"):
         state = machine.succeed("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid' | sed -n 's/.*state=\\(.*\\)\">.*/\\1/p'").strip()
         print(f"Got state {state}")
-        machine.succeed(f"curl -fs 'localhost:8080/dex/auth/mock/login?back=&state={state}' -d 'login=admin&password=password'")
-        code = machine.succeed(f"curl -fs localhost:8080/dex/approval?req={state} | sed -n 's/.*code=\\(.*\\)&amp;.*/\\1/p'").strip()
+        # Login request returns 303 with redirect_url that has code as query parameter:
+        # https://example.com/callback?code=kibsamwdupuy2iwqnlbqei3u6&state=
+        code = machine.succeed(f"curl -fs 'localhost:8080/dex/auth/mock/login?back=&state={state}' -d 'login=admin&password=password' -w '%{{redirect_url}}' | sed -n 's/.*code=\\(.*\\)&.*/\\1/p'")
         print(f"Got approval code {code}")
         bearer = machine.succeed(f"curl -fs localhost:8080/dex/token -u oidcclient:oidcclientsecret -d 'grant_type=authorization_code&redirect_uri=https://example.com/callback&code={code}' | jq .access_token -r").strip()
         print(f"Got access token {bearer}")
diff --git a/nixos/tests/dnscrypt-wrapper/default.nix b/nixos/tests/dnscrypt-wrapper/default.nix
deleted file mode 100644
index 1a794931dc500..0000000000000
--- a/nixos/tests/dnscrypt-wrapper/default.nix
+++ /dev/null
@@ -1,148 +0,0 @@
-
-{ lib, pkgs, ... }:
-
-let
-  snakeoil = import ../common/acme/server/snakeoil-certs.nix;
-
-  hosts = lib.mkForce
-   { "fd::a" = [ "server" snakeoil.domain ];
-     "fd::b" = [ "client" ];
-   };
-in
-
-{
-  name = "dnscrypt-wrapper";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ rnhmjoj ];
-  };
-
-  nodes = {
-    server = {
-      networking.hosts = hosts;
-      networking.interfaces.eth1.ipv6.addresses = lib.singleton
-        { address = "fd::a"; prefixLength = 64; };
-
-        services.dnscrypt-wrapper =
-          { enable = true;
-            address = "[::]";
-            port = 5353;
-            keys.expiration = 5; # days
-            keys.checkInterval = 2;  # min
-            # The keypair was generated by the command:
-            # dnscrypt-wrapper --gen-provider-keypair \
-            #  --provider-name=2.dnscrypt-cert.server \
-            providerKey.public = "${./public.key}";
-            providerKey.secret = "${./secret.key}";
-          };
-
-        # nameserver
-        services.bind.enable = true;
-        services.bind.zones = lib.singleton
-          { name = ".";
-            master = true;
-            file = pkgs.writeText "root.zone" ''
-              $TTL 3600
-              . IN SOA example.org. admin.example.org. ( 1 3h 1h 1w 1d )
-              . IN NS example.org.
-              example.org. IN AAAA 2001:db8::1
-            '';
-          };
-
-        # webserver
-        services.nginx.enable = true;
-        services.nginx.virtualHosts.${snakeoil.domain} =
-          { onlySSL = true;
-            listenAddresses = [ "localhost" ];
-            sslCertificate = snakeoil.${snakeoil.domain}.cert;
-            sslCertificateKey = snakeoil.${snakeoil.domain}.key;
-            locations."/ip".extraConfig = ''
-              default_type text/plain;
-              return 200 "Ciao $remote_addr!\n";
-            '';
-          };
-
-        # demultiplex HTTP and DNS from port 443
-        services.sslh =
-          { enable = true;
-            method = "ev";
-            settings.transparent = true;
-            settings.listen = lib.mkForce
-              [ { host = "server"; port = "443"; is_udp = false; }
-                { host = "server"; port = "443"; is_udp = true; }
-              ];
-            settings.protocols =
-              [ # Send TLS to webserver (TCP)
-                { name = "tls"; host= "localhost"; port= "443"; }
-                # Send DNSCrypt to dnscrypt-wrapper (TCP or UDP)
-                { name = "anyprot"; host = "localhost"; port = "5353"; }
-                { name = "anyprot"; host = "localhost"; port = "5353"; is_udp = true;}
-              ];
-          };
-
-        networking.firewall.allowedTCPPorts = [ 443 ];
-        networking.firewall.allowedUDPPorts = [ 443 ];
-      };
-
-    client = {
-      networking.hosts = hosts;
-      networking.interfaces.eth1.ipv6.addresses = lib.singleton
-        { address = "fd::b"; prefixLength = 64; };
-
-      services.dnscrypt-proxy2.enable = true;
-      services.dnscrypt-proxy2.upstreamDefaults = false;
-      services.dnscrypt-proxy2.settings =
-        { server_names = [ "server" ];
-          listen_addresses = [ "[::1]:53" ];
-          cache = false;
-          # Computed using https://dnscrypt.info/stamps/
-          static.server.stamp =
-            "sdns://AQAAAAAAAAAADzE5Mi4xNjguMS4yOjQ0MyAUQdg6"
-            +"_RIIpK6pHkINhrv7nxwIG5c7b_m5NJVT3A1AXRYyLmRuc2NyeXB0LWNlcnQuc2VydmVy";
-        };
-      networking.nameservers = [ "::1" ];
-      security.pki.certificateFiles = [ snakeoil.ca.cert ];
-    };
-
-  };
-
-  testScript = ''
-    with subtest("The server can generate the ephemeral keypair"):
-        server.wait_for_unit("dnscrypt-wrapper")
-        server.wait_for_file("/var/lib/dnscrypt-wrapper/2.dnscrypt-cert.server.key")
-        server.wait_for_file("/var/lib/dnscrypt-wrapper/2.dnscrypt-cert.server.crt")
-        almost_expiration = server.succeed("date --date '4days 23 hours 56min'").strip()
-
-    with subtest("The DNSCrypt client can connect to the server"):
-        server.wait_for_unit("sslh")
-        client.wait_until_succeeds("journalctl -u dnscrypt-proxy2 --grep '\[server\] OK'")
-
-    with subtest("HTTP client can connect to the server"):
-        server.wait_for_unit("nginx")
-        client.succeed("curl -s --fail https://${snakeoil.domain}/ip | grep -q fd::b")
-
-    with subtest("DNS queries over UDP are working"):
-        server.wait_for_unit("bind")
-        client.wait_for_open_port(53)
-        assert "2001:db8::1" in client.wait_until_succeeds(
-            "host -U example.org"
-        ), "The IP address of 'example.org' does not match 2001:db8::1"
-
-    with subtest("DNS queries over TCP are working"):
-        server.wait_for_unit("bind")
-        client.wait_for_open_port(53)
-        assert "2001:db8::1" in client.wait_until_succeeds(
-            "host -T example.org"
-        ), "The IP address of 'example.org' does not match 2001:db8::1"
-
-    with subtest("The server rotates the ephemeral keys"):
-        # advance time by a little less than 5 days
-        server.succeed(f"date -s '{almost_expiration}'")
-        client.succeed(f"date -s '{almost_expiration}'")
-        server.wait_for_file("/var/lib/dnscrypt-wrapper/oldkeys")
-
-    with subtest("The client can still connect to the server"):
-        client.systemctl("restart dnscrypt-proxy2")
-        client.wait_until_succeeds("host -T example.org")
-        client.wait_until_succeeds("host -U example.org")
-  '';
-}
diff --git a/nixos/tests/dnscrypt-wrapper/public.key b/nixos/tests/dnscrypt-wrapper/public.key
deleted file mode 100644
index 80232b97f529d..0000000000000
--- a/nixos/tests/dnscrypt-wrapper/public.key
+++ /dev/null
@@ -1 +0,0 @@
-AØ:ý¤®©B
†»ûŸ—;où¹4•SÜ
@]
\ No newline at end of file
diff --git a/nixos/tests/dnscrypt-wrapper/secret.key b/nixos/tests/dnscrypt-wrapper/secret.key
deleted file mode 100644
index 01fbf8e08b7a3..0000000000000
--- a/nixos/tests/dnscrypt-wrapper/secret.key
+++ /dev/null
@@ -1 +0,0 @@
-G½>Æ©» ì>Ðà¥(Ò²‡¼J•«º=Ÿ„ÝÁlìAØ:ý¤®©B
†»ûŸ—;où¹4•SÜ
@]
\ No newline at end of file
diff --git a/nixos/tests/docker-tools-nix-shell.nix b/nixos/tests/docker-tools-nix-shell.nix
new file mode 100644
index 0000000000000..c2ae2124e0a18
--- /dev/null
+++ b/nixos/tests/docker-tools-nix-shell.nix
@@ -0,0 +1,95 @@
+# nix-build -A nixosTests.docker-tools-nix-shell
+{ config, lib, ... }:
+let
+  inherit (config.node.pkgs.dockerTools) examples;
+in
+{
+  name = "docker-tools-nix-shell";
+  meta = with lib.maintainers; {
+    maintainers = [
+      infinisil
+      roberth
+    ];
+  };
+
+  nodes = {
+    docker =
+      { ... }:
+      {
+        virtualisation = {
+          diskSize = 3072;
+          docker.enable = true;
+        };
+      };
+  };
+
+  testScript = ''
+    docker.wait_for_unit("sockets.target")
+
+    with subtest("buildImageWithNixDB: Has a nix database"):
+        docker.succeed(
+            "docker load --input='${examples.nix}'",
+            "docker run --rm ${examples.nix.imageName} nix-store -q --references /bin/bash"
+        )
+
+    with subtest("buildNixShellImage: Can build a basic derivation"):
+        docker.succeed(
+            "${examples.nix-shell-basic} | docker load",
+            "docker run --rm nix-shell-basic bash -c 'buildDerivation && $out/bin/hello' | grep '^Hello, world!$'"
+        )
+
+    with subtest("buildNixShellImage: Runs the shell hook"):
+        docker.succeed(
+            "${examples.nix-shell-hook} | docker load",
+            "docker run --rm -it nix-shell-hook | grep 'This is the shell hook!'"
+        )
+
+    with subtest("buildNixShellImage: Sources stdenv, making build inputs available"):
+        docker.succeed(
+            "${examples.nix-shell-inputs} | docker load",
+            "docker run --rm -it nix-shell-inputs | grep 'Hello, world!'"
+        )
+
+    with subtest("buildNixShellImage: passAsFile works"):
+        docker.succeed(
+            "${examples.nix-shell-pass-as-file} | docker load",
+            "docker run --rm -it nix-shell-pass-as-file | grep 'this is a string'"
+        )
+
+    with subtest("buildNixShellImage: run argument works"):
+        docker.succeed(
+            "${examples.nix-shell-run} | docker load",
+            "docker run --rm -it nix-shell-run | grep 'This shell is not interactive'"
+        )
+
+    with subtest("buildNixShellImage: command argument works"):
+        docker.succeed(
+            "${examples.nix-shell-command} | docker load",
+            "docker run --rm -it nix-shell-command | grep 'This shell is interactive'"
+        )
+
+    with subtest("buildNixShellImage: home directory is writable by default"):
+        docker.succeed(
+            "${examples.nix-shell-writable-home} | docker load",
+            "docker run --rm -it nix-shell-writable-home"
+        )
+
+    with subtest("buildNixShellImage: home directory can be made non-existent"):
+        docker.succeed(
+            "${examples.nix-shell-nonexistent-home} | docker load",
+            "docker run --rm -it nix-shell-nonexistent-home"
+        )
+
+    with subtest("buildNixShellImage: can build derivations"):
+        docker.succeed(
+            "${examples.nix-shell-build-derivation} | docker load",
+            "docker run --rm -it nix-shell-build-derivation"
+        )
+
+    with subtest("streamLayeredImage: with nix db"):
+        docker.succeed(
+            "${examples.nix-layered} | docker load",
+            "docker run --rm ${examples.nix-layered.imageName} nix-store -q --references /bin/bash"
+        )
+  '';
+}
diff --git a/nixos/tests/docker-tools.nix b/nixos/tests/docker-tools.nix
index c8a227eb2cf7b..41bd4a621545f 100644
--- a/nixos/tests/docker-tools.nix
+++ b/nixos/tests/docker-tools.nix
@@ -60,7 +60,7 @@ let
     };
 
   nonRootTestImage =
-    pkgs.dockerTools.streamLayeredImage rec {
+    pkgs.dockerTools.streamLayeredImage {
       name = "non-root-test";
       tag = "latest";
       uid = 1000;
@@ -567,60 +567,6 @@ in {
         docker.succeed("docker run --rm image-with-certs:latest test -r /etc/pki/tls/certs/ca-bundle.crt")
         docker.succeed("docker image rm image-with-certs:latest")
 
-    with subtest("buildNixShellImage: Can build a basic derivation"):
-        docker.succeed(
-            "${examples.nix-shell-basic} | docker load",
-            "docker run --rm nix-shell-basic bash -c 'buildDerivation && $out/bin/hello' | grep '^Hello, world!$'"
-        )
-
-    with subtest("buildNixShellImage: Runs the shell hook"):
-        docker.succeed(
-            "${examples.nix-shell-hook} | docker load",
-            "docker run --rm -it nix-shell-hook | grep 'This is the shell hook!'"
-        )
-
-    with subtest("buildNixShellImage: Sources stdenv, making build inputs available"):
-        docker.succeed(
-            "${examples.nix-shell-inputs} | docker load",
-            "docker run --rm -it nix-shell-inputs | grep 'Hello, world!'"
-        )
-
-    with subtest("buildNixShellImage: passAsFile works"):
-        docker.succeed(
-            "${examples.nix-shell-pass-as-file} | docker load",
-            "docker run --rm -it nix-shell-pass-as-file | grep 'this is a string'"
-        )
-
-    with subtest("buildNixShellImage: run argument works"):
-        docker.succeed(
-            "${examples.nix-shell-run} | docker load",
-            "docker run --rm -it nix-shell-run | grep 'This shell is not interactive'"
-        )
-
-    with subtest("buildNixShellImage: command argument works"):
-        docker.succeed(
-            "${examples.nix-shell-command} | docker load",
-            "docker run --rm -it nix-shell-command | grep 'This shell is interactive'"
-        )
-
-    with subtest("buildNixShellImage: home directory is writable by default"):
-        docker.succeed(
-            "${examples.nix-shell-writable-home} | docker load",
-            "docker run --rm -it nix-shell-writable-home"
-        )
-
-    with subtest("buildNixShellImage: home directory can be made non-existent"):
-        docker.succeed(
-            "${examples.nix-shell-nonexistent-home} | docker load",
-            "docker run --rm -it nix-shell-nonexistent-home"
-        )
-
-    with subtest("buildNixShellImage: can build derivations"):
-        docker.succeed(
-            "${examples.nix-shell-build-derivation} | docker load",
-            "docker run --rm -it nix-shell-build-derivation"
-        )
-
     with subtest("streamLayeredImage: chown is persistent in fakeRootCommands"):
         docker.succeed(
             "${chownTestImage} | docker load",
diff --git a/nixos/tests/domination.nix b/nixos/tests/domination.nix
index 409a7f3029c42..04899c5065311 100644
--- a/nixos/tests/domination.nix
+++ b/nixos/tests/domination.nix
@@ -18,6 +18,9 @@ import ./make-test-python.nix ({ pkgs, ... }: {
   testScript =
     ''
       machine.wait_for_x()
+      # Add a dummy sound card, or an error reporting popup will appear,
+      # covering the main window and preventing OCR
+      machine.execute("modprobe snd-dummy")
       machine.execute("domination >&2 &")
       machine.wait_for_window("Menu")
       machine.wait_for_text(r"(New Game|Start Server|Load Game|Help Manual|Join Game|About|Play Online)")
diff --git a/nixos/tests/druid/default.nix b/nixos/tests/druid/default.nix
new file mode 100644
index 0000000000000..d4b7c9bffa772
--- /dev/null
+++ b/nixos/tests/druid/default.nix
@@ -0,0 +1,289 @@
+{ pkgs, ... }:
+let
+  inherit (pkgs) lib;
+  commonConfig = {
+    "druid.zk.service.host" = "zk1:2181";
+    "druid.extensions.loadList" = ''[ "druid-histogram", "druid-datasketches",  "mysql-metadata-storage", "druid-avro-extensions", "druid-parquet-extensions", "druid-lookups-cached-global", "druid-hdfs-storage","druid-kafka-indexing-service","druid-basic-security","druid-kinesis-indexing-service"]'';
+    "druid.startup.logging.logProperties" = "true";
+    "druid.metadata.storage.connector.connectURI" = "jdbc:mysql://mysql:3306/druid";
+    "druid.metadata.storage.connector.user" = "druid";
+    "druid.metadata.storage.connector.password" = "druid";
+    "druid.request.logging.type" = "file";
+    "druid.request.logging.dir" = "/var/log/druid/requests";
+    "druid.javascript.enabled" = "true";
+    "druid.sql.enable" = "true";
+    "druid.metadata.storage.type" = "mysql";
+    "druid.storage.type" = "hdfs";
+    "druid.storage.storageDirectory" = "/druid-deepstore";
+  };
+  log4jConfig = ''
+    <?xml version="1.0" encoding="UTF-8" ?>
+    <Configuration status="WARN">
+     <Appenders>
+        <Console name="Console" target="SYSTEM_OUT">
+          <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
+        </Console>
+      </Appenders>
+      <Loggers>
+        <Root level="error">
+          <AppenderRef ref="Console"/>
+        </Root>
+      </Loggers>
+    </Configuration>
+  '';
+  log4j = pkgs.writeText "log4j2.xml" log4jConfig;
+  coreSite = {
+    "fs.defaultFS" = "hdfs://namenode:8020";
+  };
+  tests = {
+    default = testsForPackage {
+      druidPackage = pkgs.druid;
+      hadoopPackage = pkgs.hadoop_3_2;
+    };
+  };
+  testsForPackage =
+    args:
+    lib.recurseIntoAttrs {
+      druidCluster = testDruidCluster args;
+      passthru.override = args': testsForPackage (args // args');
+    };
+  testDruidCluster =
+    { druidPackage, hadoopPackage, ... }:
+    pkgs.testers.nixosTest {
+      name = "druid-hdfs";
+      nodes = {
+        zk1 =
+          { ... }:
+          {
+            services.zookeeper.enable = true;
+            networking.firewall.allowedTCPPorts = [ 2181 ];
+          };
+        namenode =
+          { ... }:
+          {
+            services.hadoop = {
+              package = hadoopPackage;
+              hdfs = {
+                namenode = {
+                  enable = true;
+                  openFirewall = true;
+                  formatOnInit = true;
+                };
+              };
+              inherit coreSite;
+            };
+          };
+        datanode =
+          { ... }:
+          {
+            services.hadoop = {
+              package = hadoopPackage;
+              hdfs.datanode = {
+                enable = true;
+                openFirewall = true;
+              };
+              inherit coreSite;
+            };
+          };
+        mm =
+          { ... }:
+          {
+            virtualisation.memorySize = 1024;
+            services.druid = {
+              inherit commonConfig log4j;
+              package = druidPackage;
+              extraClassPaths = [ "/etc/hadoop-conf" ];
+              middleManager = {
+                config = {
+                  "druid.indexer.task.baseTaskDir" = "/tmp/druid/persistent/task";
+                  "druid.worker.capacity" = 1;
+                  "druid.indexer.logs.type" = "file";
+                  "druid.indexer.logs.directory" = "/var/log/druid/indexer";
+                  "druid.indexer.runner.startPort" = 8100;
+                  "druid.indexer.runner.endPort" = 8101;
+                };
+                enable = true;
+                openFirewall = true;
+              };
+            };
+            services.hadoop = {
+              gatewayRole.enable = true;
+              package = hadoopPackage;
+              inherit coreSite;
+            };
+          };
+        overlord =
+          { ... }:
+          {
+            services.druid = {
+              inherit commonConfig log4j;
+              package = druidPackage;
+              extraClassPaths = [ "/etc/hadoop-conf" ];
+              overlord = {
+                config = {
+                  "druid.indexer.runner.type" = "remote";
+                  "druid.indexer.storage.type" = "metadata";
+                };
+                enable = true;
+                openFirewall = true;
+              };
+            };
+            services.hadoop = {
+              gatewayRole.enable = true;
+              package = hadoopPackage;
+              inherit coreSite;
+            };
+          };
+        broker =
+          { ... }:
+          {
+            services.druid = {
+              package = druidPackage;
+              inherit commonConfig log4j;
+              extraClassPaths = [ "/etc/hadoop-conf" ];
+              broker = {
+                config = {
+                  "druid.plaintextPort" = 8082;
+                  "druid.broker.http.numConnections" = "2";
+                  "druid.server.http.numThreads" = "2";
+                  "druid.processing.buffer.sizeBytes" = "100";
+                  "druid.processing.numThreads" = "1";
+                  "druid.processing.numMergeBuffers" = "1";
+                  "druid.broker.cache.unCacheable" = ''["groupBy"]'';
+                  "druid.lookup.snapshotWorkingDir" = "/opt/broker/lookups";
+                };
+                enable = true;
+                openFirewall = true;
+              };
+            };
+            services.hadoop = {
+              gatewayRole.enable = true;
+              package = hadoopPackage;
+              inherit coreSite;
+            };
+
+          };
+        historical =
+          { ... }:
+          {
+            services.druid = {
+              package = druidPackage;
+              inherit commonConfig log4j;
+              extraClassPaths = [ "/etc/hadoop-conf" ];
+              historical = {
+                config = {
+                  "maxSize" = 200000000;
+                  "druid.lookup.snapshotWorkingDir" = "/opt/historical/lookups";
+                };
+                segmentLocations = [
+                  {
+                    "path" = "/tmp/1";
+                    "maxSize" = "100000000";
+                  }
+                  {
+                    "path" = "/tmp/2";
+                    "maxSize" = "100000000";
+                  }
+                ];
+                enable = true;
+                openFirewall = true;
+              };
+            };
+            services.hadoop = {
+              gatewayRole.enable = true;
+              package = hadoopPackage;
+              inherit coreSite;
+            };
+
+          };
+        coordinator =
+          { ... }:
+          {
+            services.druid = {
+              package = druidPackage;
+              inherit commonConfig log4j;
+              extraClassPaths = [ "/etc/hadoop-conf" ];
+              coordinator = {
+                config = {
+                  "druid.plaintextPort" = 9091;
+                  "druid.service" = "coordinator";
+                  "druid.coordinator.startDelay" = "PT10S";
+                  "druid.coordinator.period" = "PT10S";
+                  "druid.manager.config.pollDuration" = "PT10S";
+                  "druid.manager.segments.pollDuration" = "PT10S";
+                  "druid.manager.rules.pollDuration" = "PT10S";
+                };
+                enable = true;
+                openFirewall = true;
+              };
+            };
+            services.hadoop = {
+              gatewayRole.enable = true;
+              package = hadoopPackage;
+              inherit coreSite;
+            };
+
+          };
+
+        mysql =
+          { ... }:
+          {
+            services.mysql = {
+              enable = true;
+              package = pkgs.mariadb;
+              initialDatabases = [ { name = "druid"; } ];
+              initialScript = pkgs.writeText "mysql-init.sql" ''
+                CREATE USER 'druid'@'%' IDENTIFIED BY 'druid';
+                GRANT ALL PRIVILEGES ON druid.* TO 'druid'@'%';
+              '';
+            };
+            networking.firewall.allowedTCPPorts = [ 3306 ];
+          };
+
+      };
+      testScript = ''
+        start_all()
+        namenode.wait_for_unit("hdfs-namenode")
+        namenode.wait_for_unit("network.target")
+        namenode.wait_for_open_port(8020)
+        namenode.succeed("ss -tulpne | systemd-cat")
+        namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
+        namenode.wait_for_open_port(9870)
+        datanode.wait_for_unit("hdfs-datanode")
+        datanode.wait_for_unit("network.target")
+
+        mm.succeed("mkdir -p /quickstart/")
+        mm.succeed("cp -r ${pkgs.druid}/quickstart/* /quickstart/")
+        mm.succeed("touch /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
+        mm.succeed("zcat /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz | head -n 10 > /quickstart/tutorial/wikiticker-2015-09-12-sampled.json || true")
+        mm.succeed("rm /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz && gzip /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
+
+        namenode.succeed("sudo -u hdfs hdfs dfs -mkdir /druid-deepstore")
+        namenode.succeed("HADOOP_USER_NAME=druid sudo -u hdfs hdfs dfs -chown druid:hadoop /druid-deepstore")
+
+
+        ### Druid tests
+        coordinator.wait_for_unit("druid-coordinator")
+        overlord.wait_for_unit("druid-overlord")
+        historical.wait_for_unit("druid-historical")
+        mm.wait_for_unit("druid-middleManager")
+
+        coordinator.wait_for_open_port(9091)
+        overlord.wait_for_open_port(8090)
+        historical.wait_for_open_port(8083)
+        mm.wait_for_open_port(8091)
+
+        broker.wait_for_unit("network.target")
+        broker.wait_for_open_port(8082)
+
+        broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-index.json http://coordinator:9091/druid/indexer/v1/task")
+        broker.wait_until_succeeds("curl http://coordinator:9091/druid/coordinator/v1/metadata/datasources | grep  'wikipedia'")
+
+        broker.wait_until_succeeds("curl http://localhost:8082/druid/v2/datasources/ | grep wikipedia")
+        broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-top-pages.json http://localhost:8082/druid/v2/")
+
+      '';
+
+    };
+in
+tests
diff --git a/nixos/tests/ec2.nix b/nixos/tests/ec2.nix
index e649761d029df..4511a37854eae 100644
--- a/nixos/tests/ec2.nix
+++ b/nixos/tests/ec2.nix
@@ -22,6 +22,8 @@ let
           ln -s vda1 /dev/xvda1
         '';
 
+        amazonImage.format = "qcow2";
+
         # In a NixOS test the serial console is occupied by the "backdoor"
         # (see testing/test-instrumentation.nix) and is incompatible with
         # the configuration in virtualisation/amazon-image.nix.
@@ -53,7 +55,7 @@ let
       }
     ];
   }).config;
-  image = "${imageCfg.system.build.amazonImage}/${imageCfg.amazonImage.name}.vhd";
+  image = "${imageCfg.system.build.amazonImage}/${imageCfg.amazonImage.name}.qcow2";
 
   sshKeys = import ./ssh-keys.nix pkgs;
   snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
@@ -63,6 +65,7 @@ let
 in {
   boot-ec2-nixops = makeEc2Test {
     name         = "nixops-userdata";
+    meta.timeout = 600;
     inherit image;
     sshPublicKey = snakeOilPublicKey; # That's right folks! My user's key is also the host key!
 
@@ -95,7 +98,7 @@ in {
       machine.succeed(
           "echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts"
       )
-      machine.succeed("ssh -o BatchMode=yes localhost exit")
+      machine.succeed("ssh -o BatchMode=yes localhost exit", timeout=120)
 
       # Test whether the root disk was resized.
       blocks, block_size = map(int, machine.succeed("stat -c %b:%S -f /").split(":"))
diff --git a/nixos/tests/eintopf.nix b/nixos/tests/eintopf.nix
new file mode 100644
index 0000000000000..a1c05d6513041
--- /dev/null
+++ b/nixos/tests/eintopf.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "eintopf";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ onny ];
+  };
+
+  nodes = {
+    eintopf = { config, pkgs, ... }: {
+      services.eintopf = {
+        enable = true;
+      };
+    };
+  };
+
+  testScript = ''
+    eintopf.start
+    eintopf.wait_for_unit("eintopf.service")
+    eintopf.wait_for_open_port(3333)
+    eintopf.succeed("curl -sSfL http://eintopf:3333 | grep 'Es sind keine Veranstaltungen eingetragen'")
+  '';
+})
diff --git a/nixos/tests/elk.nix b/nixos/tests/elk.nix
index b5a8cb532ae0a..87c82877fe109 100644
--- a/nixos/tests/elk.nix
+++ b/nixos/tests/elk.nix
@@ -16,7 +16,7 @@ let
     import ./make-test-python.nix ({
     inherit name;
     meta = with pkgs.lib.maintainers; {
-      maintainers = [ eelco offline basvandijk ];
+      maintainers = [ offline basvandijk ];
     };
     nodes = {
       one =
diff --git a/nixos/tests/enlightenment.nix b/nixos/tests/enlightenment.nix
index bce14c1ddd5c3..ba917e18c09f4 100644
--- a/nixos/tests/enlightenment.nix
+++ b/nixos/tests/enlightenment.nix
@@ -4,6 +4,9 @@ import ./make-test-python.nix ({ pkgs, ...} :
 
   meta = with pkgs.lib.maintainers; {
     maintainers = [ romildo ];
+    timeout = 600;
+    # OCR tests are flaky
+    broken = true;
   };
 
   nodes.machine = { ... }:
@@ -18,7 +21,6 @@ import ./make-test-python.nix ({ pkgs, ...} :
         user = "alice";
       };
     };
-    hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
     environment.systemPackages = [ pkgs.xdotool ];
     services.acpid.enable = true;
     services.connman.enable = true;
diff --git a/nixos/tests/fcitx5/default.nix b/nixos/tests/fcitx5/default.nix
index c113f2e2c052c..379615bd44133 100644
--- a/nixos/tests/fcitx5/default.nix
+++ b/nixos/tests/fcitx5/default.nix
@@ -14,22 +14,20 @@ rec {
       pkgs.alacritty
     ];
 
-    services.xserver = {
+    services.displayManager.autoLogin = {
       enable = true;
+      user = "alice";
+    };
 
-      displayManager = {
-        lightdm.enable = true;
-        autoLogin = {
-          enable = true;
-          user = "alice";
-        };
-      };
-
+    services.xserver = {
+      enable = true;
+      displayManager.lightdm.enable = true;
       desktopManager.xfce.enable = true;
     };
 
     i18n.inputMethod = {
-      enabled = "fcitx5";
+      enable = true;
+      type = "fcitx5";
       fcitx5.addons = [
         pkgs.fcitx5-chinese-addons
         pkgs.fcitx5-hangul
@@ -89,10 +87,13 @@ rec {
             machine.succeed("xauth merge ${xauth}")
             machine.sleep(5)
 
+            machine.wait_until_succeeds("pgrep fcitx5")
             machine.succeed("su - ${user.name} -c 'kill $(pgrep fcitx5)'")
             machine.sleep(1)
 
             machine.succeed("su - ${user.name} -c 'alacritty >&2 &'")
+            machine.wait_for_window("alice@machine")
+
             machine.succeed("su - ${user.name} -c 'fcitx5 >&2 &'")
             machine.sleep(10)
 
diff --git a/nixos/tests/filesender.nix b/nixos/tests/filesender.nix
new file mode 100644
index 0000000000000..9274ddbf7e90e
--- /dev/null
+++ b/nixos/tests/filesender.nix
@@ -0,0 +1,137 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "filesender";
+  meta = {
+    maintainers = with lib.maintainers; [ nhnn ];
+    broken = pkgs.stdenv.isAarch64; # selenium.common.exceptions.WebDriverException: Message: Unsupported platform/architecture combination: linux/aarch64
+  };
+
+  nodes.filesender = { ... }: let
+    format = pkgs.formats.php { };
+  in {
+    networking.firewall.allowedTCPPorts = [ 80 ];
+
+    services.filesender.enable = true;
+    services.filesender.localDomain = "filesender";
+    services.filesender.settings = {
+      auth_sp_saml_authentication_source = "default";
+      auth_sp_saml_uid_attribute = "uid";
+      storage_filesystem_path = "/tmp";
+      site_url = "http://filesender";
+      force_ssl = false;
+      admin = "";
+      admin_email = "admin@localhost";
+      email_reply_to = "noreply@localhost";
+    };
+    services.simplesamlphp.filesender = {
+      settings = {
+        baseurlpath = "http://filesender/saml";
+        "module.enable".exampleauth = true;
+      };
+      authSources = {
+        admin = [ "core:AdminPassword" ];
+        default = format.lib.mkMixedArray [ "exampleauth:UserPass" ] {
+          "user:password" = {
+            uid = [ "user" ];
+            cn = [ "user" ];
+            mail = [ "user@nixos.org" ];
+          };
+        };
+      };
+    };
+  };
+
+  nodes.client =
+    { pkgs
+    , nodes
+    , ...
+    }:
+    let
+      filesenderIP = (builtins.head (nodes.filesender.networking.interfaces.eth1.ipv4.addresses)).address;
+    in
+    {
+      networking.hosts.${filesenderIP} = [ "filesender" ];
+
+      environment.systemPackages =
+        let
+          username = "user";
+          password = "password";
+          browser-test =
+            pkgs.writers.writePython3Bin "browser-test"
+              {
+                libraries = [ pkgs.python3Packages.selenium ];
+                flakeIgnore = [ "E124" "E501" ];
+              } ''
+              from selenium.webdriver.common.by import By
+              from selenium.webdriver import Firefox
+              from selenium.webdriver.firefox.options import Options
+              from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
+              from selenium.webdriver.firefox.service import Service
+              from selenium.webdriver.support.ui import WebDriverWait
+              from selenium.webdriver.support import expected_conditions as EC
+              from subprocess import STDOUT
+              import string
+              import random
+              import logging
+              import time
+              selenium_logger = logging.getLogger("selenium")
+              selenium_logger.setLevel(logging.DEBUG)
+              selenium_logger.addHandler(logging.StreamHandler())
+              profile = FirefoxProfile()
+              profile.set_preference("browser.download.folderList", 2)
+              profile.set_preference("browser.download.manager.showWhenStarting", False)
+              profile.set_preference("browser.download.dir", "/tmp/firefox")
+              profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain;text/txt")
+              options = Options()
+              options.profile = profile
+              options.add_argument('--headless')
+              service = Service(log_output=STDOUT)
+              driver = Firefox(options=options)
+              driver.set_window_size(1024, 768)
+              driver.implicitly_wait(30)
+              driver.get('http://filesender/')
+              wait = WebDriverWait(driver, 20)
+              wait.until(EC.title_contains("FileSender"))
+              driver.find_element(By.ID, "btn_logon").click()
+              wait.until(EC.title_contains("Enter your username and password"))
+              driver.find_element(By.ID, 'username').send_keys(
+                  '${username}'
+              )
+              driver.find_element(By.ID, 'password').send_keys(
+                  '${password}'
+              )
+              driver.find_element(By.ID, "submit_button").click()
+              wait.until(EC.title_contains("FileSender"))
+              wait.until(EC.presence_of_element_located((By.ID, "topmenu_logoff")))
+              test_string = "".join(random.choices(string.ascii_uppercase + string.digits, k=20))
+              with open("/tmp/test_file.txt", "w") as file:
+                  file.write(test_string)
+              driver.find_element(By.ID, "files").send_keys("/tmp/test_file.txt")
+              time.sleep(2)
+              driver.find_element(By.CSS_SELECTOR, '.start').click()
+              wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download_link")))
+              download_link = driver.find_element(By.CSS_SELECTOR, '.download_link > textarea').get_attribute('value').strip()
+              driver.get(download_link)
+              wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download")))
+              driver.find_element(By.CSS_SELECTOR, '.download').click()
+              wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)")))
+              driver.find_element(By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)").click()
+              driver.close()
+              driver.quit()
+            '';
+        in
+        [
+          pkgs.firefox-unwrapped
+          pkgs.geckodriver
+          browser-test
+        ];
+    };
+
+  testScript = ''
+    start_all()
+    filesender.wait_for_file("/run/phpfpm/filesender.sock")
+    filesender.wait_for_open_port(80)
+    if "If you have received an invitation to access this site as a guest" not in client.wait_until_succeeds("curl -sS -f http://filesender"):
+      raise Exception("filesender returned invalid html")
+    client.succeed("browser-test")
+  '';
+})
diff --git a/nixos/tests/filesystems-overlayfs.nix b/nixos/tests/filesystems-overlayfs.nix
index d7cbf640abe49..faac9078a5206 100644
--- a/nixos/tests/filesystems-overlayfs.nix
+++ b/nixos/tests/filesystems-overlayfs.nix
@@ -26,7 +26,6 @@ in
 
   nodes.machine = { config, pkgs, ... }: {
     boot.initrd.systemd.enable = true;
-    boot.initrd.availableKernelModules = [ "overlay" ];
 
     virtualisation.fileSystems = {
       "/initrd-overlay" = {
diff --git a/nixos/tests/firefly-iii-data-importer.nix b/nixos/tests/firefly-iii-data-importer.nix
new file mode 100644
index 0000000000000..aba41576d7974
--- /dev/null
+++ b/nixos/tests/firefly-iii-data-importer.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+
+  {
+    name = "firefly-iii-data-importer";
+    meta.maintainers = [ lib.maintainers.savyajha ];
+
+    nodes.dataImporter =
+      { ... }:
+      {
+        services.firefly-iii-data-importer = {
+          enable = true;
+          enableNginx = true;
+          settings = {
+            LOG_CHANNEL = "stdout";
+            USE_CACHE = true;
+          };
+        };
+      };
+
+    testScript = ''
+      dataImporter.wait_for_unit("phpfpm-firefly-iii-data-importer.service")
+      dataImporter.wait_for_unit("nginx.service")
+      dataImporter.succeed("curl -fvvv -Ls http://localhost/token | grep 'Firefly III Data Import Tool'")
+    '';
+  }
+)
diff --git a/nixos/tests/firefly-iii.nix b/nixos/tests/firefly-iii.nix
index c93d799320a48..f8e4ca4bfe2b4 100644
--- a/nixos/tests/firefly-iii.nix
+++ b/nixos/tests/firefly-iii.nix
@@ -1,14 +1,19 @@
-import ./make-test-python.nix ({ lib, pkgs, ... }: {
+import ./make-test-python.nix ({ lib, ... }:
+
+let
+  db-pass = "Test2Test2";
+  app-key = "TestTestTestTestTestTestTestTest";
+in
+{
   name = "firefly-iii";
   meta.maintainers = [ lib.maintainers.savyajha ];
 
-  nodes.machine = { config, ... }: {
+  nodes.fireflySqlite = { config, ... }: {
     environment.etc = {
-      "firefly-iii-appkey".text = "TestTestTestTestTestTestTestTest";
+      "firefly-iii-appkey".text = app-key;
     };
     services.firefly-iii = {
       enable = true;
-      virtualHost = "http://localhost";
       enableNginx = true;
       settings = {
         APP_KEY_FILE = "/etc/firefly-iii-appkey";
@@ -18,9 +23,89 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
     };
   };
 
+  nodes.fireflyPostgresql = { config, pkgs, ... }: {
+    environment.etc = {
+      "firefly-iii-appkey".text = app-key;
+      "postgres-pass".text = db-pass;
+    };
+    services.firefly-iii = {
+      enable = true;
+      enableNginx = true;
+      settings = {
+        APP_KEY_FILE = "/etc/firefly-iii-appkey";
+        LOG_CHANNEL = "stdout";
+        SITE_OWNER = "mail@example.com";
+        DB_CONNECTION = "pgsql";
+        DB_DATABASE = "firefly";
+        DB_USERNAME = "firefly";
+        DB_PASSWORD_FILE = "/etc/postgres-pass";
+        PGSQL_SCHEMA = "firefly";
+      };
+    };
+
+    services.postgresql = {
+      enable = true;
+      package = pkgs.postgresql_16;
+      authentication = ''
+        local all postgres peer
+        local firefly firefly password
+      '';
+      initialScript = pkgs.writeText "firefly-init.sql" ''
+        CREATE USER "firefly" WITH LOGIN PASSWORD '${db-pass}';
+        CREATE DATABASE "firefly" WITH OWNER "firefly";
+        \c firefly
+        CREATE SCHEMA AUTHORIZATION firefly;
+      '';
+    };
+  };
+
+  nodes.fireflyMysql = { config, pkgs, ... }: {
+    environment.etc = {
+      "firefly-iii-appkey".text = app-key;
+      "mysql-pass".text = db-pass;
+    };
+    services.firefly-iii = {
+      enable = true;
+      enableNginx = true;
+      settings = {
+        APP_KEY_FILE = "/etc/firefly-iii-appkey";
+        LOG_CHANNEL = "stdout";
+        SITE_OWNER = "mail@example.com";
+        DB_CONNECTION = "mysql";
+        DB_DATABASE = "firefly";
+        DB_USERNAME = "firefly";
+        DB_PASSWORD_FILE = "/etc/mysql-pass";
+        DB_SOCKET = "/run/mysqld/mysqld.sock";
+      };
+    };
+
+    services.mysql = {
+      enable = true;
+      package = pkgs.mariadb;
+      initialScript = pkgs.writeText "firefly-init.sql" ''
+        create database firefly DEFAULT CHARACTER SET utf8mb4;
+        create user 'firefly'@'localhost' identified by '${db-pass}';
+        grant all on firefly.* to 'firefly'@'localhost';
+      '';
+      settings.mysqld.character-set-server = "utf8mb4";
+    };
+  };
+
   testScript = ''
-    machine.wait_for_unit("phpfpm-firefly-iii.service")
-    machine.wait_for_unit("nginx.service")
-    machine.succeed("curl -fvvv -Ls http://localhost/ | grep 'Firefly III'")
+    fireflySqlite.wait_for_unit("phpfpm-firefly-iii.service")
+    fireflySqlite.wait_for_unit("nginx.service")
+    fireflySqlite.succeed("curl -fvvv -Ls http://localhost/ | grep 'Firefly III'")
+    fireflySqlite.succeed("curl -fvvv -Ls http://localhost/v1/js/app.js")
+    fireflySqlite.succeed("systemctl start firefly-iii-cron.service")
+    fireflyPostgresql.wait_for_unit("phpfpm-firefly-iii.service")
+    fireflyPostgresql.wait_for_unit("nginx.service")
+    fireflyPostgresql.wait_for_unit("postgresql.service")
+    fireflyPostgresql.succeed("curl -fvvv -Ls http://localhost/ | grep 'Firefly III'")
+    fireflyPostgresql.succeed("systemctl start firefly-iii-cron.service")
+    fireflyMysql.wait_for_unit("phpfpm-firefly-iii.service")
+    fireflyMysql.wait_for_unit("nginx.service")
+    fireflyMysql.wait_for_unit("mysql.service")
+    fireflyMysql.succeed("curl -fvvv -Ls http://localhost/ | grep 'Firefly III'")
+    fireflyMysql.succeed("systemctl start firefly-iii-cron.service")
   '';
 })
diff --git a/nixos/tests/firefox.nix b/nixos/tests/firefox.nix
index fbea95dc75235..8243defbb9f2e 100644
--- a/nixos/tests/firefox.nix
+++ b/nixos/tests/firefox.nix
@@ -1,9 +1,9 @@
-import ./make-test-python.nix ({ pkgs, firefoxPackage, ... }:
+import ./make-test-python.nix ({ lib, pkgs, firefoxPackage, ... }:
 {
   name = firefoxPackage.pname;
 
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco shlevy ];
+    maintainers = [ shlevy ];
   };
 
   nodes.machine =
@@ -21,8 +21,7 @@ import ./make-test-python.nix ({ pkgs, firefoxPackage, ... }:
       # Create a virtual sound device, with mixing
       # and all, for recording audio.
       boot.kernelModules = [ "snd-aloop" ];
-      sound.enable = true;
-      sound.extraConfig = ''
+      environment.etc."asound.conf".text = ''
         pcm.!default {
           type plug
           slave.pcm pcm.dmixer
@@ -55,7 +54,7 @@ import ./make-test-python.nix ({ pkgs, firefoxPackage, ... }:
     };
 
   testScript = let
-    exe = firefoxPackage.unwrapped.binaryName;
+    exe = lib.getExe firefoxPackage;
   in ''
       from contextlib import contextmanager
 
diff --git a/nixos/tests/firewall.nix b/nixos/tests/firewall.nix
index dd7551f143a5e..139bc31177402 100644
--- a/nixos/tests/firewall.nix
+++ b/nixos/tests/firewall.nix
@@ -3,7 +3,7 @@
 import ./make-test-python.nix ( { pkgs, nftables, ... } : {
   name = "firewall" + pkgs.lib.optionalString nftables "-nftables";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco ];
+    maintainers = [ ];
   };
 
   nodes =
@@ -14,17 +14,10 @@ import ./make-test-python.nix ( { pkgs, nftables, ... } : {
           networking.nftables.enable = nftables;
           services.httpd.enable = true;
           services.httpd.adminAddr = "foo@example.org";
-        };
 
-      # Dummy configuration to check whether firewall.service will be honored
-      # during system activation. This only needs to be different to the
-      # original walled configuration so that there is a change in the service
-      # file.
-      walled2 =
-        { ... }:
-        { networking.firewall.enable = true;
-          networking.firewall.rejectPackets = true;
-          networking.nftables.enable = nftables;
+          specialisation.different-config.configuration = {
+            networking.firewall.rejectPackets = true;
+          };
         };
 
       attacker =
@@ -36,7 +29,6 @@ import ./make-test-python.nix ( { pkgs, nftables, ... } : {
     };
 
   testScript = { nodes, ... }: let
-    newSystem = nodes.walled2.config.system.build.toplevel;
     unit = if nftables then "nftables" else "firewall";
   in ''
     start_all()
@@ -62,7 +54,7 @@ import ./make-test-python.nix ( { pkgs, nftables, ... } : {
 
     # Check whether activation of a new configuration reloads the firewall.
     walled.succeed(
-        "${newSystem}/bin/switch-to-configuration test 2>&1 | grep -qF ${unit}.service"
+        "/run/booted-system/specialisation/different-config/bin/switch-to-configuration test 2>&1 | grep -qF ${unit}.service"
     )
   '';
 })
diff --git a/nixos/tests/fish.nix b/nixos/tests/fish.nix
index 3d9b13c6af70a..c9a1bef51478e 100644
--- a/nixos/tests/fish.nix
+++ b/nixos/tests/fish.nix
@@ -10,6 +10,8 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         coreutils
         procps # kill collides with coreutils' to test https://github.com/NixOS/nixpkgs/issues/56432
       ];
+      # TODO: remove if/when #267880 is merged and this is a default
+      services.logrotate.enable = false;
     };
 
   testScript =
diff --git a/nixos/tests/flaresolverr.nix b/nixos/tests/flaresolverr.nix
new file mode 100644
index 0000000000000..0cec7adf6d6b6
--- /dev/null
+++ b/nixos/tests/flaresolverr.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+  {
+    name = "flaresolverr";
+    meta.maintainers = with lib.maintainers; [ paveloom ];
+
+    nodes.machine =
+      { pkgs, ... }:
+      {
+        services.flaresolverr = {
+          enable = true;
+          port = 8888;
+        };
+      };
+
+    testScript = ''
+      machine.wait_for_unit("flaresolverr.service")
+      machine.wait_for_open_port(8888)
+      machine.succeed("curl --fail http://localhost:8888/")
+    '';
+  }
+)
diff --git a/nixos/tests/flood.nix b/nixos/tests/flood.nix
new file mode 100644
index 0000000000000..075d37e62835f
--- /dev/null
+++ b/nixos/tests/flood.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  port = 3001;
+in
+{
+  name = "flood";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ thiagokokada ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    services.flood = {
+      inherit port;
+      enable = true;
+      openFirewall = true;
+      extraArgs = [ "--baseuri=/" ];
+    };
+  };
+
+  testScript = /* python */ ''
+    machine.start()
+    machine.wait_for_unit("flood.service")
+    machine.wait_for_open_port(${toString port})
+
+    machine.succeed("curl --fail http://localhost:${toString port}")
+  '';
+})
diff --git a/nixos/tests/forgejo.nix b/nixos/tests/forgejo.nix
index 827fae2790c6e..d2315b7f013eb 100644
--- a/nixos/tests/forgejo.nix
+++ b/nixos/tests/forgejo.nix
@@ -1,6 +1,7 @@
 { system ? builtins.currentSystem
 , config ? { }
 , pkgs ? import ../.. { inherit system config; }
+, forgejoPackage ? pkgs.forgejo
 }:
 
 with import ../lib/testing-python.nix { inherit system pkgs; };
@@ -41,6 +42,8 @@ let
     hash = "sha256-h2/UIp8IjPo3eE4Gzx52Fb7pcgG/Ww7u31w5fdKVMos=";
   };
 
+  metricSecret = "fakesecret";
+
   supportedDbTypes = [ "mysql" "postgres" "sqlite3" ];
   makeForgejoTest = type: nameValuePair type (makeTest {
     name = "forgejo-${type}";
@@ -51,6 +54,7 @@ let
         virtualisation.memorySize = 2047;
         services.forgejo = {
           enable = true;
+          package = forgejoPackage;
           database = { inherit type; };
           settings.service.DISABLE_REGISTRATION = true;
           settings."repository.signing".SIGNING_KEY = signingPrivateKeyId;
@@ -59,6 +63,8 @@ let
             ENABLE_PUSH_CREATE_USER = true;
             DEFAULT_PUSH_CREATE_PRIVATE = false;
           };
+          settings.metrics.ENABLED = true;
+          secrets.metrics.TOKEN = pkgs.writeText "metrics_secret" metricSecret;
         };
         environment.systemPackages = [ config.services.forgejo.package pkgs.gnupg pkgs.jq pkgs.file pkgs.htmlq ];
         services.openssh.enable = true;
@@ -141,7 +147,7 @@ let
         assert "BEGIN PGP PUBLIC KEY BLOCK" in server.succeed("curl http://localhost:3000/api/v1/signing-key.gpg")
 
         api_version = json.loads(server.succeed("curl http://localhost:3000/api/forgejo/v1/version")).get("version")
-        assert "development" != api_version and "${pkgs.forgejo.version}+gitea-" in api_version, (
+        assert "development" != api_version and "${forgejoPackage.version}+gitea-" in api_version, (
             "/api/forgejo/v1/version should not return 'development' "
             + f"but should contain a forgejo+gitea compatibility version string. Got '{api_version}' instead."
         )
@@ -192,6 +198,10 @@ let
             timeout=10
         )
 
+        with subtest("Testing /metrics endpoint with token from cfg.secrets"):
+            server.fail("curl --fail http://localhost:3000/metrics")
+            server.succeed('curl --fail http://localhost:3000/metrics -H "Authorization: Bearer ${metricSecret}"')
+
         with subtest("Testing runner registration and action workflow"):
             server.succeed(
                 "su -l forgejo -c 'GITEA_WORK_DIR=/var/lib/forgejo gitea actions generate-runner-token' | sed 's/^/TOKEN=/' | tee /var/lib/forgejo/runner_token"
diff --git a/nixos/tests/freetube.nix b/nixos/tests/freetube.nix
index 10f0773cb884c..c0beeeaae61f0 100644
--- a/nixos/tests/freetube.nix
+++ b/nixos/tests/freetube.nix
@@ -35,7 +35,7 @@ let
         machine.wait_for_text('Your Subscription list is currently empty')
         machine.screenshot("main.png")
         machine.send_key("ctrl-comma")
-        machine.wait_for_text('General Settings', timeout=30)
+        machine.wait_for_text('Data Settings', timeout=60)
         machine.screenshot("preferences.png")
       '';
     });
diff --git a/nixos/tests/freshrss-extensions.nix b/nixos/tests/freshrss-extensions.nix
new file mode 100644
index 0000000000000..f3e893b3b5a87
--- /dev/null
+++ b/nixos/tests/freshrss-extensions.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "freshrss";
+
+  nodes.machine = { pkgs, ... }: {
+    services.freshrss = {
+      enable = true;
+      baseUrl = "http://localhost";
+      authType = "none";
+      extensions = [ pkgs.freshrss-extensions.youtube ];
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_open_port(80)
+    response = machine.succeed("curl -vvv -s http://127.0.0.1:80/i/?c=extension")
+    assert '<span class="ext_name disabled">YouTube Video Feed</span>' in response, "Extension not present in extensions page."
+  '';
+})
diff --git a/nixos/tests/frigate.nix b/nixos/tests/frigate.nix
index 03bd2b89611d5..ecbc68d364316 100644
--- a/nixos/tests/frigate.nix
+++ b/nixos/tests/frigate.nix
@@ -5,7 +5,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
   meta.maintainers = with lib.maintainers; [ hexa ];
 
   nodes = {
-    machine = { config, ... }: {
+    machine = {
       services.frigate = {
         enable = true;
 
@@ -40,26 +40,33 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
         ];
         serviceConfig = {
           DynamicUser = true;
-          ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -re -f lavfi -i smptebars=size=800x600:rate=10 -f mpegts -listen 1 http://0.0.0.0:8080";
+          ExecStart = "${lib.getExe pkgs.ffmpeg-headless} -re -f lavfi -i smptebars=size=1280x720:rate=5 -f mpegts -listen 1 http://0.0.0.0:8080";
           Restart = "always";
         };
       };
+
+      environment.systemPackages = with pkgs; [ httpie ];
     };
   };
 
   testScript = ''
     start_all()
 
+    # wait until frigate is up
     machine.wait_for_unit("frigate.service")
-
-    # Frigate startup
     machine.wait_for_open_port(5001)
 
-    # nginx startup
-    machine.wait_for_open_port(80)
+    # extract admin password from logs
+    machine.wait_until_succeeds("journalctl -u frigate.service -o cat | grep -q 'Password: '")
+    password = machine.execute("journalctl -u frigate.service -o cat | grep -oP '([a-f0-9]{32})'")[1]
+
+    # login and store session
+    machine.log(machine.succeed(f"http --check-status --session=frigate post http://localhost/api/login user=admin password={password}"))
 
-    machine.succeed("curl http://localhost")
+    # make authenticated api requested
+    machine.log(machine.succeed("http --check-status --session=frigate get http://localhost/api/version"))
 
+    # wait for a recording to appear
     machine.wait_for_file("/var/cache/frigate/test@*.mp4")
   '';
 })
diff --git a/nixos/tests/frp.nix b/nixos/tests/frp.nix
index 1f57c031a53a5..717e8718721ce 100644
--- a/nixos/tests/frp.nix
+++ b/nixos/tests/frp.nix
@@ -1,6 +1,6 @@
 import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "frp";
-  meta.maintainers = with lib.maintainers; [ zaldnoay janik ];
+  meta.maintainers = with lib.maintainers; [ zaldnoay ];
   nodes = {
     frps = {
       networking = {
diff --git a/nixos/tests/frr.nix b/nixos/tests/frr.nix
index 0d1a6a694a82c..edd702dc60e6f 100644
--- a/nixos/tests/frr.nix
+++ b/nixos/tests/frr.nix
@@ -99,6 +99,6 @@ import ./make-test-python.nix ({ pkgs, ... }:
                   gw.wait_until_succeeds("vtysh -c 'show ip route' | grep '^O>'")
 
           with subtest("Test ICMP"):
-              client.wait_until_succeeds("ping -c 3 server >&2")
+              client.wait_until_succeeds("ping -4 -c 3 server >&2")
         '';
     })
diff --git a/nixos/tests/ft2-clone.nix b/nixos/tests/ft2-clone.nix
index 5476b38c00bd2..813e258cd2bec 100644
--- a/nixos/tests/ft2-clone.nix
+++ b/nixos/tests/ft2-clone.nix
@@ -8,8 +8,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     imports = [
       ./common/x11.nix
     ];
-
-    sound.enable = true;
     environment.systemPackages = [ pkgs.ft2-clone ];
   };
 
diff --git a/nixos/tests/gancio.nix b/nixos/tests/gancio.nix
new file mode 100644
index 0000000000000..8f4696d6f6cc4
--- /dev/null
+++ b/nixos/tests/gancio.nix
@@ -0,0 +1,87 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+  let
+    extraHosts = ''
+      192.168.13.12 agenda.example.com
+    '';
+  in
+  {
+    name = "gancio";
+    meta.maintainers = with pkgs.lib.maintainers; [ jbgi ];
+
+    nodes = {
+      server =
+        { pkgs, ... }:
+        {
+          networking = {
+            interfaces.eth1 = {
+              ipv4.addresses = [
+                {
+                  address = "192.168.13.12";
+                  prefixLength = 24;
+                }
+              ];
+            };
+            inherit extraHosts;
+            firewall.allowedTCPPorts = [ 80 ];
+          };
+          environment.systemPackages = [ pkgs.gancio ];
+          services.gancio = {
+            enable = true;
+            settings = {
+              hostname = "agenda.example.com";
+              db.dialect = "postgres";
+            };
+            plugins = [ pkgs.gancioPlugins.telegram-bridge ];
+            userLocale = {
+              en = {
+                register = {
+                  description = "My new registration page description";
+                };
+              };
+            };
+            nginx = {
+              enableACME = false;
+              forceSSL = false;
+            };
+          };
+        };
+
+      client =
+        { pkgs, ... }:
+        {
+          environment.systemPackages = [ pkgs.jq ];
+          networking = {
+            interfaces.eth1 = {
+              ipv4.addresses = [
+                {
+                  address = "192.168.13.1";
+                  prefixLength = 24;
+                }
+              ];
+            };
+            inherit extraHosts;
+          };
+        };
+    };
+
+    testScript = ''
+      start_all()
+
+      server.wait_for_unit("postgresql")
+      server.wait_for_unit("gancio")
+      server.wait_for_unit("nginx")
+      server.wait_for_file("/run/gancio/socket")
+      server.wait_for_open_port(80)
+
+      # Check can create user via cli
+      server.succeed("cd /var/lib/gancio && sudo -u gancio gancio users create admin dummy admin")
+
+      # Check event list is returned
+      client.wait_until_succeeds("curl --verbose --fail-with-body http://agenda.example.com/api/events", timeout=30)
+
+      server.shutdown()
+      client.shutdown()
+    '';
+  }
+)
diff --git a/nixos/tests/garage/default.nix b/nixos/tests/garage/default.nix
index a42236e9a5bbe..b7f9bb4b865bd 100644
--- a/nixos/tests/garage/default.nix
+++ b/nixos/tests/garage/default.nix
@@ -51,4 +51,5 @@ in
   [
     "0_8"
     "0_9"
+    "1_x"
   ]
diff --git a/nixos/tests/garage/with-3node-replication.nix b/nixos/tests/garage/with-3node-replication.nix
index d4387b198d976..266a1082893f7 100644
--- a/nixos/tests/garage/with-3node-replication.nix
+++ b/nixos/tests/garage/with-3node-replication.nix
@@ -7,10 +7,10 @@ args@{ mkNode, ver, ... }:
   };
 
   nodes = {
-    node1 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::1"; };
-    node2 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::2"; };
-    node3 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::3"; };
-    node4 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::4"; };
+    node1 = mkNode { replicationMode = "3"; publicV6Address = "fc00:1::1"; };
+    node2 = mkNode { replicationMode = "3"; publicV6Address = "fc00:1::2"; };
+    node3 = mkNode { replicationMode = "3"; publicV6Address = "fc00:1::3"; };
+    node4 = mkNode { replicationMode = "3"; publicV6Address = "fc00:1::4"; };
   };
 
   testScript = ''
diff --git a/nixos/tests/gitdaemon.nix b/nixos/tests/gitdaemon.nix
index 052fa902b4504..2211960b457f8 100644
--- a/nixos/tests/gitdaemon.nix
+++ b/nixos/tests/gitdaemon.nix
@@ -20,7 +20,7 @@ in {
 
         systemd.tmpfiles.rules = [
           # type path mode user group age arg
-          " d    /git 0755 root root  -   -"
+          " d    /git 0755 git  git   -   -"
         ];
 
         services.gitDaemon = {
@@ -56,6 +56,10 @@ in {
             "rm -r /project",
         )
 
+    # Change user/group to default daemon user/group from module
+    # to avoid "fatal: detected dubious ownership in repository at '/git/project.git'"
+    server.succeed("chown git:git -R /git/project.git")
+
     with subtest("git daemon starts"):
         server.wait_for_unit("git-daemon.service")
 
diff --git a/nixos/tests/gitlab.nix b/nixos/tests/gitlab.nix
index 52fe588930dfc..a099a8201ae50 100644
--- a/nixos/tests/gitlab.nix
+++ b/nixos/tests/gitlab.nix
@@ -34,6 +34,8 @@ in {
     gitlab = { ... }: {
       imports = [ common/user-account.nix ];
 
+      environment.systemPackages = with pkgs; [ git ];
+
       virtualisation.memorySize = 6144;
       virtualisation.cores = 4;
       virtualisation.useNixStoreImage = true;
diff --git a/nixos/tests/gitolite-fcgiwrap.nix b/nixos/tests/gitolite-fcgiwrap.nix
index abf1db37003a6..43d65faebbee2 100644
--- a/nixos/tests/gitolite-fcgiwrap.nix
+++ b/nixos/tests/gitolite-fcgiwrap.nix
@@ -24,7 +24,12 @@ import ./make-test-python.nix (
               {
                 networking.firewall.allowedTCPPorts = [ 80 ];
 
-                services.fcgiwrap.enable = true;
+                services.fcgiwrap.instances.gitolite = {
+                  process.user = "gitolite";
+                  process.group = "gitolite";
+                  socket = { inherit (config.services.nginx) user group; };
+                };
+
                 services.gitolite = {
                   enable = true;
                   adminPubkey = adminPublicKey;
@@ -59,7 +64,7 @@ import ./make-test-python.nix (
                     fastcgi_param SCRIPT_FILENAME ${pkgs.gitolite}/bin/gitolite-shell;
 
                     # use Unix domain socket or inet socket
-                    fastcgi_pass unix:/run/fcgiwrap.sock;
+                    fastcgi_pass unix:${config.services.fcgiwrap.instances.gitolite.socket.address};
                   '';
                 };
 
@@ -82,7 +87,7 @@ import ./make-test-python.nix (
 
           server.wait_for_unit("gitolite-init.service")
           server.wait_for_unit("nginx.service")
-          server.wait_for_file("/run/fcgiwrap.sock")
+          server.wait_for_file("/run/fcgiwrap-gitolite.sock")
 
           client.wait_for_unit("multi-user.target")
           client.succeed(
diff --git a/nixos/tests/glance.nix b/nixos/tests/glance.nix
new file mode 100644
index 0000000000000..daa3d9a4a8160
--- /dev/null
+++ b/nixos/tests/glance.nix
@@ -0,0 +1,36 @@
+{ lib, ... }:
+
+{
+  name = "glance";
+
+  nodes = {
+    machine_default =
+      { pkgs, ... }:
+      {
+        services.glance = {
+          enable = true;
+        };
+      };
+
+    machine_custom_port =
+      { pkgs, ... }:
+      {
+        services.glance = {
+          enable = true;
+          settings.server.port = 5678;
+        };
+      };
+  };
+
+  testScript = ''
+    machine_default.start()
+    machine_default.wait_for_unit("glance.service")
+    machine_default.wait_for_open_port(8080)
+
+    machine_custom_port.start()
+    machine_custom_port.wait_for_unit("glance.service")
+    machine_custom_port.wait_for_open_port(5678)
+  '';
+
+  meta.maintainers = [ lib.maintainers.drupol ];
+}
diff --git a/nixos/tests/gnome-extensions.nix b/nixos/tests/gnome-extensions.nix
index 51ccabd7e6a65..109fdf6b0846c 100644
--- a/nixos/tests/gnome-extensions.nix
+++ b/nixos/tests/gnome-extensions.nix
@@ -45,7 +45,7 @@ import ./make-test-python.nix (
               # Eval API is now internal so Shell needs to run in unsafe mode.
               # TODO: improve test driver so that it supports openqa-like manipulation
               # that would allow us to drop this mess.
-              "${pkgs.gnome.gnome-shell}/bin/gnome-shell --unsafe-mode"
+              "${pkgs.gnome-shell}/bin/gnome-shell --unsafe-mode"
             ];
           };
         };
@@ -84,7 +84,6 @@ import ./make-test-python.nix (
       "dash-to-dock"
       "dash-to-panel"
       "ddterm"
-      "emoji-selector"
       "gsconnect"
       "system-monitor-next"
       "desktop-icons-ng-ding"
@@ -138,11 +137,11 @@ import ./make-test-python.nix (
             # Enable and optionally disable
 
             machine.succeed(f"${run "gnome-extensions enable {extension}"}")
-            checkState("ENABLED", extension)
+            checkState("ACTIVE", extension)
 
             if disable:
                 machine.succeed(f"${run "gnome-extensions disable {extension}"}")
-                checkState("DISABLED", extension)
+                checkState("INACTIVE", extension)
     ''
     + lib.concatLines (map (e: ''checkExtension("${e}", False)'') alwaysOnExtensions)
     + lib.concatLines (map (e: ''checkExtension("${e}", True)'') testExtensions)
diff --git a/nixos/tests/gnome-xorg.nix b/nixos/tests/gnome-xorg.nix
index c8ffb459edece..45538ef4eab40 100644
--- a/nixos/tests/gnome-xorg.nix
+++ b/nixos/tests/gnome-xorg.nix
@@ -35,7 +35,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
               # Eval API is now internal so Shell needs to run in unsafe mode.
               # TODO: improve test driver so that it supports openqa-like manipulation
               # that would allow us to drop this mess.
-              "${pkgs.gnome.gnome-shell}/bin/gnome-shell --unsafe-mode"
+              "${pkgs.gnome-shell}/bin/gnome-shell --unsafe-mode"
             ];
           };
         };
diff --git a/nixos/tests/gnome.nix b/nixos/tests/gnome.nix
index 98d61c7ea1723..9a296b93682c4 100644
--- a/nixos/tests/gnome.nix
+++ b/nixos/tests/gnome.nix
@@ -31,7 +31,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
               # Eval API is now internal so Shell needs to run in unsafe mode.
               # TODO: improve test driver so that it supports openqa-like manipulation
               # that would allow us to drop this mess.
-              "${pkgs.gnome.gnome-shell}/bin/gnome-shell --unsafe-mode"
+              "${pkgs.gnome-shell}/bin/gnome-shell --unsafe-mode"
             ];
           };
         };
diff --git a/nixos/tests/gnupg.nix b/nixos/tests/gnupg.nix
index 65a9a93007fd9..68110c9d6d549 100644
--- a/nixos/tests/gnupg.nix
+++ b/nixos/tests/gnupg.nix
@@ -69,9 +69,11 @@ import ./make-test-python.nix ({ pkgs, lib, ...}:
           machine.wait_until_tty_matches("1", "Change")
           machine.send_chars("O\n")
           machine.wait_until_tty_matches("1", "Please enter")
-          machine.send_chars("pgp_p4ssphrase\n")
-          machine.wait_until_tty_matches("1", "Please re-enter")
-          machine.send_chars("pgp_p4ssphrase\n")
+          machine.send_chars("pgp_p4ssphrase")
+          machine.send_key("tab")
+          machine.send_chars("pgp_p4ssphrase")
+          machine.wait_until_tty_matches("1", "Passphrases match")
+          machine.send_chars("\n")
           machine.wait_until_tty_matches("1", "public and secret key created")
 
       with subtest("Confirm the key is in the keyring"):
@@ -90,9 +92,11 @@ import ./make-test-python.nix ({ pkgs, lib, ...}:
           machine.wait_until_tty_matches("1", "Enter passphrase")
           machine.send_chars("ssh_p4ssphrase\n")
           machine.wait_until_tty_matches("1", "Please enter")
-          machine.send_chars("ssh_agent_p4ssphrase\n")
-          machine.wait_until_tty_matches("1", "Please re-enter")
-          machine.send_chars("ssh_agent_p4ssphrase\n")
+          machine.send_chars("ssh_agent_p4ssphrase")
+          machine.send_key("tab")
+          machine.send_chars("ssh_agent_p4ssphrase")
+          machine.wait_until_tty_matches("1", "Passphrases match")
+          machine.send_chars("\n")
 
       with subtest("Confirm the SSH key has been registered"):
           machine.wait_until_succeeds(as_alice("ssh-add -l | grep -q alice@machine"))
diff --git a/nixos/tests/goatcounter.nix b/nixos/tests/goatcounter.nix
new file mode 100644
index 0000000000000..ee3b373383e2d
--- /dev/null
+++ b/nixos/tests/goatcounter.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix (
+  { lib, pkgs, ... }:
+
+  {
+    name = "goatcounter";
+
+    meta.maintainers = with lib.maintainers; [ bhankas ];
+
+    nodes.machine =
+      { config, ... }:
+      {
+        virtualisation.memorySize = 2048;
+
+        services.goatcounter = {
+          enable = true;
+          proxy = true;
+        };
+      };
+
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("goatcounter.service")
+      # wait for goatcounter to fully come up
+
+      with subtest("goatcounter service starts"):
+          machine.wait_until_succeeds(
+              "curl -sSfL http://localhost:8081/ > /dev/null",
+              timeout=30
+          )
+    '';
+  }
+)
diff --git a/nixos/tests/gotenberg.nix b/nixos/tests/gotenberg.nix
new file mode 100644
index 0000000000000..aa39b2d349d79
--- /dev/null
+++ b/nixos/tests/gotenberg.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+
+  {
+    name = "gotenberg";
+    meta.maintainers = with lib.maintainers; [ pyrox0 ];
+
+    nodes.machine = {
+      services.gotenberg = {
+        enable = true;
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      machine.wait_for_unit("gotenberg.service")
+
+      # Gotenberg startup
+      machine.wait_for_open_port(3000)
+
+      # Ensure healthcheck endpoint succeeds
+      machine.succeed("curl http://localhost:3000/health")
+    '';
+  }
+)
diff --git a/nixos/tests/gotify-server.nix b/nixos/tests/gotify-server.nix
index c8d7fa172a7b7..495b1c8e3443c 100644
--- a/nixos/tests/gotify-server.nix
+++ b/nixos/tests/gotify-server.nix
@@ -9,7 +9,9 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
 
     services.gotify = {
       enable = true;
-      port = 3000;
+      environment = {
+        GOTIFY_SERVER_PORT = 3000;
+      };
     };
   };
 
diff --git a/nixos/tests/grafana/basic.nix b/nixos/tests/grafana/basic.nix
index dd389bc8a3d1f..fae6bd4dbbcfb 100644
--- a/nixos/tests/grafana/basic.nix
+++ b/nixos/tests/grafana/basic.nix
@@ -10,7 +10,7 @@ let
         analytics.reporting_enabled = false;
 
         server = {
-          http_addr = "localhost";
+          http_addr = "::1";
           domain = "localhost";
         };
 
@@ -47,7 +47,7 @@ let
 
     postgresql = {
       services.grafana.settings.database = {
-        host = "127.0.0.1:5432";
+        host = "[::1]:5432";
         user = "grafana";
       };
       services.postgresql = {
@@ -91,9 +91,9 @@ in {
 
     with subtest("Declarative plugins installed"):
         declarativePlugins.wait_for_unit("grafana.service")
-        declarativePlugins.wait_for_open_port(3000)
+        declarativePlugins.wait_for_open_port(3000, addr="::1")
         declarativePlugins.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/plugins | grep grafana-clock-panel"
+            "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/plugins | grep grafana-clock-panel"
         )
         declarativePlugins.shutdown()
 
@@ -101,10 +101,10 @@ in {
         sqlite.wait_for_unit("grafana.service")
         sqlite.wait_for_open_port(3000)
         print(sqlite.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users -i"
+            "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/org/users -i"
         ))
         sqlite.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
+            "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/org/users | grep admin\@localhost"
         )
         sqlite.shutdown()
 
@@ -112,10 +112,10 @@ in {
         socket.wait_for_unit("grafana.service")
         socket.wait_for_open_port(80)
         print(socket.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1/api/org/users -i"
+            "curl -sSfN -u testadmin:snakeoilpwd http://[::1]/api/org/users -i"
         ))
         socket.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1/api/org/users | grep admin\@localhost"
+            "curl -sSfN -u testadmin:snakeoilpwd http://[::1]/api/org/users | grep admin\@localhost"
         )
         socket.shutdown()
 
@@ -125,7 +125,7 @@ in {
         postgresql.wait_for_open_port(3000)
         postgresql.wait_for_open_port(5432)
         postgresql.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
+            "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/org/users | grep admin\@localhost"
         )
         postgresql.shutdown()
 
@@ -135,7 +135,7 @@ in {
         mysql.wait_for_open_port(3000)
         mysql.wait_for_open_port(3306)
         mysql.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
+            "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/org/users | grep admin\@localhost"
         )
         mysql.shutdown()
   '';
diff --git a/nixos/tests/grafana/provision/default.nix b/nixos/tests/grafana/provision/default.nix
index d33d16ce12099..4a137638800b6 100644
--- a/nixos/tests/grafana/provision/default.nix
+++ b/nixos/tests/grafana/provision/default.nix
@@ -1,7 +1,7 @@
 import ../../make-test-python.nix ({ lib, pkgs, ... }:
 
 let
-  inherit (lib) mkMerge nameValuePair maintainers;
+  inherit (lib) mkMerge maintainers;
 
   baseGrafanaConf = {
     services.grafana = {
@@ -11,7 +11,7 @@ let
         analytics.reporting_enabled = false;
 
         server = {
-          http_addr = "localhost";
+          http_addr = "::1";
           domain = "localhost";
         };
 
@@ -33,35 +33,6 @@ let
   };
 
   extraNodeConfs = {
-    provisionLegacyNotifiers = {
-      services.grafana.provision = {
-        datasources.settings = {
-          apiVersion = 1;
-          datasources = [{
-            name = "Test Datasource";
-            type = "testdata";
-            access = "proxy";
-            uid = "test_datasource";
-          }];
-        };
-        dashboards.settings = {
-          apiVersion = 1;
-          providers = [{
-            name = "default";
-            options.path = "/var/lib/grafana/dashboards";
-          }];
-        };
-        notifiers = [{
-          uid = "test_notifiers";
-          name = "Test Notifiers";
-          type = "email";
-          settings = {
-            singleEmail = true;
-            addresses = "test@test.com";
-          };
-        }];
-      };
-    };
     provisionNix = {
       services.grafana.provision = {
         datasources.settings = {
@@ -191,7 +162,7 @@ in {
   name = "grafana-provision";
 
   meta = with maintainers; {
-    maintainers = [ kfears willibutz ];
+    maintainers = [ willibutz ];
   };
 
   inherit nodes;
@@ -206,51 +177,41 @@ in {
     for description, machine in [nodeNix, nodeYaml, nodeYamlDir]:
         with subtest(f"Should start provision node: {description}"):
             machine.wait_for_unit("grafana.service")
-            machine.wait_for_open_port(3000)
+            machine.wait_for_open_port(3000, addr="::1")
 
         with subtest(f"Successful datasource provision with {description}"):
             machine.succeed(
-                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/datasources/uid/test_datasource | grep Test\ Datasource"
+                "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/datasources/uid/test_datasource | grep Test\ Datasource"
             )
 
         with subtest(f"Successful dashboard provision with {description}"):
             machine.succeed(
-                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/dashboards/uid/test_dashboard | grep Test\ Dashboard"
+                "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/dashboards/uid/test_dashboard | grep Test\ Dashboard"
             )
 
         with subtest(f"Successful rule provision with {description}"):
             machine.succeed(
-                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/alert-rules/test_rule | grep Test\ Rule"
+                "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/v1/provisioning/alert-rules/test_rule | grep Test\ Rule"
             )
 
         with subtest(f"Successful contact point provision with {description}"):
             machine.succeed(
-                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/contact-points | grep Test\ Contact\ Point"
+                "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/v1/provisioning/contact-points | grep Test\ Contact\ Point"
             )
 
         with subtest(f"Successful policy provision with {description}"):
             machine.succeed(
-                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/policies | grep Test\ Contact\ Point"
+                "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/v1/provisioning/policies | grep Test\ Contact\ Point"
             )
 
         with subtest(f"Successful template provision with {description}"):
             machine.succeed(
-                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/templates | grep Test\ Template"
+                "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/v1/provisioning/templates | grep Test\ Template"
             )
 
         with subtest("Successful mute timings provision with {description}"):
             machine.succeed(
-                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/mute-timings | grep Test\ Mute\ Timing"
+                "curl -sSfN -u testadmin:snakeoilpwd http://[::1]:3000/api/v1/provisioning/mute-timings | grep Test\ Mute\ Timing"
             )
-
-    with subtest("Successful notifiers provision"):
-        provisionLegacyNotifiers.wait_for_unit("grafana.service")
-        provisionLegacyNotifiers.wait_for_open_port(3000)
-        print(provisionLegacyNotifiers.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/alert-notifications/uid/test_notifiers"
-        ))
-        provisionLegacyNotifiers.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/alert-notifications/uid/test_notifiers | grep Test\ Notifiers"
-        )
   '';
 })
diff --git a/nixos/tests/graylog.nix b/nixos/tests/graylog.nix
index 3f7cc3a914390..b52c2976a73f8 100644
--- a/nixos/tests/graylog.nix
+++ b/nixos/tests/graylog.nix
@@ -1,10 +1,10 @@
 import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "graylog";
-  meta.maintainers = with lib.maintainers; [ ];
+  meta.maintainers = [ ];
 
   nodes.machine = { pkgs, ... }: {
     virtualisation.memorySize = 4096;
-    virtualisation.diskSize = 4096;
+    virtualisation.diskSize = 1024 * 6;
 
     services.mongodb.enable = true;
     services.elasticsearch.enable = true;
@@ -65,9 +65,18 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
   in ''
     machine.start()
     machine.wait_for_unit("graylog.service")
+
+    machine.wait_until_succeeds(
+      "journalctl -o cat -u graylog.service | grep 'Started REST API at <127.0.0.1:9000>'"
+    )
+
     machine.wait_for_open_port(9000)
     machine.succeed("curl -sSfL http://127.0.0.1:9000/")
 
+    machine.wait_until_succeeds(
+      "journalctl -o cat -u graylog.service | grep 'Graylog server up and running'"
+    )
+
     session = machine.succeed(
         "curl -X POST "
         + "-sSfL http://127.0.0.1:9000/api/system/sessions "
@@ -88,6 +97,10 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     )
 
     machine.wait_until_succeeds(
+      "journalctl -o cat -u graylog.service | grep -E 'Input \[GELF UDP/Demo/[[:alnum:]]{24}\] is now RUNNING'"
+    )
+
+    machine.wait_until_succeeds(
         "test \"$(curl -sSfL 'http://127.0.0.1:9000/api/cluster/inputstates' "
         + f"-u {session}:session "
         + "-H 'Accept: application/json' "
diff --git a/nixos/tests/greetd-no-shadow.nix b/nixos/tests/greetd-no-shadow.nix
new file mode 100644
index 0000000000000..382218ffa948f
--- /dev/null
+++ b/nixos/tests/greetd-no-shadow.nix
@@ -0,0 +1,49 @@
+import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
+{
+  name = "greetd-no-shadow";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes.machine =
+    { pkgs, lib, ... }: {
+
+      users.users.alice = {
+        isNormalUser = true;
+        group = "alice";
+        password = "foobar";
+      };
+      users.groups.alice = {};
+
+      # This means login(1) breaks, so we must use greetd/agreety instead.
+      security.shadow.enable = false;
+
+      services.greetd = {
+        enable = true;
+        settings = {
+          default_session = {
+            command = "${pkgs.greetd.greetd}/bin/agreety --cmd bash";
+          };
+        };
+      };
+    };
+
+  testScript = ''
+      machine.start()
+
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_until_succeeds("pgrep -f 'agretty.*tty1'")
+      machine.screenshot("postboot")
+
+      with subtest("Log in as alice on a virtual console"):
+          machine.wait_until_tty_matches("1", "login: ")
+          machine.send_chars("alice\n")
+          machine.wait_until_tty_matches("1", "login: alice")
+          machine.wait_until_succeeds("pgrep login")
+          machine.wait_until_tty_matches("1", "Password: ")
+          machine.send_chars("foobar\n")
+          machine.wait_until_succeeds("pgrep -u alice bash")
+          machine.send_chars("touch done\n")
+          machine.wait_for_file("/home/alice/done")
+  '';
+})
diff --git a/nixos/tests/hadoop/hadoop.nix b/nixos/tests/hadoop/hadoop.nix
index 6162ccfd33d47..cc631bb468106 100644
--- a/nixos/tests/hadoop/hadoop.nix
+++ b/nixos/tests/hadoop/hadoop.nix
@@ -99,6 +99,7 @@ import ../make-test-python.nix ({ package, ... }: {
       };
 
       dn1 = { ... }: {
+        virtualisation.diskSize = 4096;
         services.hadoop = {
           inherit package coreSite hdfsSite;
           hdfs.datanode = {
diff --git a/nixos/tests/harmonia.nix b/nixos/tests/harmonia.nix
index a9beac82f8e12..d97f0fb89ede4 100644
--- a/nixos/tests/harmonia.nix
+++ b/nixos/tests/harmonia.nix
@@ -7,7 +7,7 @@
     harmonia = {
       services.harmonia = {
         enable = true;
-        signKeyPath = pkgs.writeText "cache-key" "cache.example.com-1:9FhO0w+7HjZrhvmzT1VlAZw4OSAlFGTgC24Seg3tmPl4gZBdwZClzTTHr9cVzJpwsRSYLTu7hEAQe3ljy92CWg==";
+        signKeyPaths = [(pkgs.writeText "cache-key" "cache.example.com-1:9FhO0w+7HjZrhvmzT1VlAZw4OSAlFGTgC24Seg3tmPl4gZBdwZClzTTHr9cVzJpwsRSYLTu7hEAQe3ljy92CWg==")];
         settings.priority = 35;
       };
 
diff --git a/nixos/tests/hocker-fetchdocker/default.nix b/nixos/tests/hocker-fetchdocker/default.nix
deleted file mode 100644
index b5c06126c2e80..0000000000000
--- a/nixos/tests/hocker-fetchdocker/default.nix
+++ /dev/null
@@ -1,16 +0,0 @@
-import ../make-test-python.nix ({ pkgs, ...} : {
-  name = "test-hocker-fetchdocker";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ ixmatus ];
-    broken = true; # tries to download from registry-1.docker.io - how did this ever work?
-  };
-
-  nodes.machine = import ./machine.nix;
-
-  testScript = ''
-    start_all()
-
-    machine.wait_for_unit("sockets.target")
-    machine.wait_until_succeeds("docker run registry-1.docker.io/v2/library/hello-world:latest")
-  '';
-})
diff --git a/nixos/tests/hocker-fetchdocker/hello-world-container.nix b/nixos/tests/hocker-fetchdocker/hello-world-container.nix
deleted file mode 100644
index a127875264e95..0000000000000
--- a/nixos/tests/hocker-fetchdocker/hello-world-container.nix
+++ /dev/null
@@ -1,19 +0,0 @@
-{ fetchDockerConfig, fetchDockerLayer, fetchdocker }:
-fetchdocker rec {
-    name = "hello-world";
-    registry = "https://registry-1.docker.io/v2/";
-    repository = "library";
-    imageName = "hello-world";
-    tag = "latest";
-    imageConfig = fetchDockerConfig {
-      inherit tag registry repository imageName;
-      sha256 = "1ivbd23hyindkahzfw4kahgzi6ibzz2ablmgsz6340vc6qr1gagj";
-    };
-    imageLayers = let
-      layer0 = fetchDockerLayer {
-        inherit registry repository imageName;
-        layerDigest = "ca4f61b1923c10e9eb81228bd46bee1dfba02b9c7dac1844527a734752688ede";
-        sha256 = "1plfd194fwvsa921ib3xkhms1yqxxrmx92r2h7myj41wjaqn2kya";
-      };
-      in [ layer0 ];
-  }
diff --git a/nixos/tests/hocker-fetchdocker/machine.nix b/nixos/tests/hocker-fetchdocker/machine.nix
deleted file mode 100644
index 885adebe14985..0000000000000
--- a/nixos/tests/hocker-fetchdocker/machine.nix
+++ /dev/null
@@ -1,26 +0,0 @@
-{ pkgs, ... }:
-{ nixpkgs.config.packageOverrides = pkgs': {
-    hello-world-container = pkgs'.callPackage ./hello-world-container.nix { };
-  };
-
-  virtualisation.docker = {
-    enable  = true;
-    package = pkgs.docker;
-  };
-
-  systemd.services.docker-load-fetchdocker-image = {
-    description = "Docker load hello-world-container";
-    wantedBy    = [ "multi-user.target" ];
-    wants       = [ "docker.service" ];
-    after       = [ "docker.service" ];
-
-    script = ''
-      ${pkgs.hello-world-container}/compositeImage.sh | ${pkgs.docker}/bin/docker load
-    '';
-
-    serviceConfig = {
-      Type = "oneshot";
-    };
-  };
-}
-
diff --git a/nixos/tests/home-assistant.nix b/nixos/tests/home-assistant.nix
index 05fb2fa1e06aa..47902fa4e1340 100644
--- a/nixos/tests/home-assistant.nix
+++ b/nixos/tests/home-assistant.nix
@@ -44,6 +44,8 @@ in {
       # test loading custom components
       customComponents = with pkgs.home-assistant-custom-components; [
         prometheus_sensor
+        # tests loading multiple components from a single package
+        spook
       ];
 
       # test loading lovelace modules
@@ -179,7 +181,8 @@ in {
 
     with subtest("Check that custom components get installed"):
         hass.succeed("test -f ${configDir}/custom_components/prometheus_sensor/manifest.json")
-        hass.wait_until_succeeds("journalctl -u home-assistant.service | grep -q 'We found a custom integration prometheus_sensor which has not been tested by Home Assistant'")
+        for integration in ("prometheus_sensor", "spook", "spook_inverse"):
+            hass.wait_until_succeeds(f"journalctl -u home-assistant.service | grep -q 'We found a custom integration {integration} which has not been tested by Home Assistant'")
 
     with subtest("Check that lovelace modules are referenced and fetchable"):
         hass.succeed("grep -q 'mini-graph-card-bundle.js' '${configDir}/configuration.yaml'")
@@ -228,7 +231,8 @@ in {
         cursor = get_journal_cursor()
         hass.succeed("${system}/specialisation/removeCustomThings/bin/switch-to-configuration test")
         hass.fail("grep -q 'mini-graph-card-bundle.js' '${configDir}/ui-lovelace.yaml'")
-        hass.fail("test -f ${configDir}/custom_components/prometheus_sensor/manifest.json")
+        for integration in ("prometheus_sensor", "spook", "spook_inverse"):
+            hass.fail(f"test -f ${configDir}/custom_components/{integration}/manifest.json")
         wait_for_homeassistant(cursor)
 
     with subtest("Check that no errors were logged"):
diff --git a/nixos/tests/homebox.nix b/nixos/tests/homebox.nix
new file mode 100644
index 0000000000000..2d14a153c976d
--- /dev/null
+++ b/nixos/tests/homebox.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+  let
+    port = "7745";
+  in
+  {
+    name = "homebox";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ patrickdag ];
+    };
+    nodes.machine = {
+      services.homebox = {
+        enable = true;
+        settings.HBOX_WEB_PORT = port;
+      };
+    };
+    testScript = ''
+      machine.wait_for_unit("homebox.service")
+      machine.wait_for_open_port(${port})
+
+      machine.succeed("curl --fail -X GET 'http://localhost:${port}/'")
+      out = machine.succeed("curl --fail 'http://localhost:${port}/api/v1/status'")
+      assert '"health":true' in out
+    '';
+  }
+)
diff --git a/nixos/tests/hydra/default.nix b/nixos/tests/hydra/default.nix
index 98c3c6fbae9fc..0f5092f196538 100644
--- a/nixos/tests/hydra/default.nix
+++ b/nixos/tests/hydra/default.nix
@@ -11,7 +11,7 @@ let
   inherit (import ./common.nix { inherit system; }) baseConfig;
 
   hydraPkgs = {
-    inherit (pkgs) hydra_unstable;
+    inherit (pkgs) hydra;
   };
 
   makeHydraTest = with pkgs.lib; name: package: makeTest {
diff --git a/nixos/tests/ifm.nix b/nixos/tests/ifm.nix
new file mode 100644
index 0000000000000..60901cb3f7371
--- /dev/null
+++ b/nixos/tests/ifm.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "ifm";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ litchipi ];
+  };
+
+  nodes = {
+    server = rec {
+      services.ifm = {
+        enable = true;
+        port = 9001;
+        dataDir = "/data";
+      };
+
+      system.activationScripts.ifm-setup-dir = ''
+        mkdir -p ${services.ifm.dataDir}
+        chmod u+w,g+w,o+w ${services.ifm.dataDir}
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("ifm.service")
+    server.wait_for_open_port(9001)
+    server.succeed("curl --fail http://localhost:9001")
+
+    server.succeed("echo \"testfile\" > testfile && shasum testfile >> checksums")
+    server.succeed("curl --fail http://localhost:9001 -X POST -F \"api=upload\" -F \"dir=\" -F \"file=@testfile\" | grep \"OK\"");
+    server.succeed("rm testfile")
+    server.succeed("curl --fail http://localhost:9001 -X POST -F \"api=download\" -F \"filename=testfile\" -F \"dir=\" --output testfile");
+    server.succeed("shasum testfile >> checksums && shasum --check checksums")
+  '';
+})
diff --git a/nixos/tests/incus/container.nix b/nixos/tests/incus/container.nix
index a71c5355046a5..2329721d9504f 100644
--- a/nixos/tests/incus/container.nix
+++ b/nixos/tests/incus/container.nix
@@ -1,4 +1,4 @@
-import ../make-test-python.nix ({ pkgs, lib, extra ? {}, name ? "incus-container", ... } :
+import ../make-test-python.nix ({ pkgs, lib, extra ? {}, name ? "incus-container", incus ? pkgs.incus-lts, ... } :
 
 let
   releases = import ../../release.nix {
@@ -11,8 +11,8 @@ let
     extra;
   };
 
-  container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
-  container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+  container-image-metadata = "${releases.incusContainerMeta.${pkgs.stdenv.hostPlatform.system}}/tarball/nixos-system-${pkgs.stdenv.hostPlatform.system}.tar.xz";
+  container-image-rootfs = "${releases.incusContainerImage.${pkgs.stdenv.hostPlatform.system}}/nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}.squashfs";
 in
 {
   inherit name;
@@ -28,7 +28,10 @@ in
       memorySize = 1024;
       diskSize = 4096;
 
-      incus.enable = true;
+      incus = {
+        enable = true;
+        package = incus;
+      };
     };
     networking.nftables.enable = true;
   };
@@ -58,7 +61,7 @@ in
     machine.succeed("incus admin init --minimal")
 
     with subtest("Container image can be imported"):
-        machine.succeed("incus image import ${container-image-metadata}/*/*.tar.xz ${container-image-rootfs}/*/*.tar.xz --alias nixos")
+        machine.succeed("incus image import ${container-image-metadata} ${container-image-rootfs} --alias nixos")
 
     with subtest("Container can be launched and managed"):
         machine.succeed("incus launch nixos container")
@@ -70,51 +73,60 @@ in
         machine.succeed("incus exec container mount | grep 'lxcfs on /proc/cpuinfo type fuse.lxcfs'")
         machine.succeed("incus exec container mount | grep 'lxcfs on /proc/meminfo type fuse.lxcfs'")
 
-    with subtest("Container CPU limits can be managed"):
-        set_container("limits.cpu 1")
-        cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
-        assert cpuinfo == "1", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 1, got: {cpuinfo}"
-
-        set_container("limits.cpu 2")
-        cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
-        assert cpuinfo == "2", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 2, got: {cpuinfo}"
-
-    with subtest("Container memory limits can be managed"):
-        set_container("limits.memory 64MB")
-        meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
-        meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
-        assert meminfo_bytes == "62500 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '62500 kB', got: '{meminfo_bytes}'"
-
-        set_container("limits.memory 128MB")
-        meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
-        meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
-        assert meminfo_bytes == "125000 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '125000 kB', got: '{meminfo_bytes}'"
-
-    with subtest("lxc-container generator configures plain container"):
-        # reuse the existing container to save some time
-        machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
-        check_sysctl("container")
-
-    with subtest("lxc-container generator configures nested container"):
-        machine.execute("incus delete --force container")
-        machine.succeed("incus launch nixos container --config security.nesting=true")
-        with machine.nested("Waiting for instance to start and be usable"):
-          retry(instance_is_up)
-
-        machine.fail("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
-        target = machine.succeed("incus exec container readlink -- -f /run/systemd/system/systemd-binfmt.service").strip()
-        assert target == "/dev/null", "lxc generator did not correctly mask /run/systemd/system/systemd-binfmt.service"
-
-        check_sysctl("container")
-
-    with subtest("lxc-container generator configures privileged container"):
-        machine.execute("incus delete --force container")
-        machine.succeed("incus launch nixos container --config security.privileged=true")
-        with machine.nested("Waiting for instance to start and be usable"):
-          retry(instance_is_up)
-
-        machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
-
-        check_sysctl("container")
+    with subtest("resource limits"):
+        with subtest("Container CPU limits can be managed"):
+            set_container("limits.cpu 1")
+            cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
+            assert cpuinfo == "1", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 1, got: {cpuinfo}"
+
+            set_container("limits.cpu 2")
+            cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
+            assert cpuinfo == "2", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 2, got: {cpuinfo}"
+
+        with subtest("Container memory limits can be managed"):
+            set_container("limits.memory 64MB")
+            meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
+            meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
+            assert meminfo_bytes == "62500 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '62500 kB', got: '{meminfo_bytes}'"
+
+            set_container("limits.memory 128MB")
+            meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
+            meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
+            assert meminfo_bytes == "125000 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '125000 kB', got: '{meminfo_bytes}'"
+
+    with subtest("lxc-generator"):
+        with subtest("lxc-container generator configures plain container"):
+            # reuse the existing container to save some time
+            machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
+            check_sysctl("container")
+
+        with subtest("lxc-container generator configures nested container"):
+            machine.execute("incus delete --force container")
+            machine.succeed("incus launch nixos container --config security.nesting=true")
+            with machine.nested("Waiting for instance to start and be usable"):
+              retry(instance_is_up)
+
+            machine.fail("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
+            target = machine.succeed("incus exec container readlink -- -f /run/systemd/system/systemd-binfmt.service").strip()
+            assert target == "/dev/null", "lxc generator did not correctly mask /run/systemd/system/systemd-binfmt.service"
+
+            check_sysctl("container")
+
+        with subtest("lxc-container generator configures privileged container"):
+            machine.execute("incus delete --force container")
+            machine.succeed("incus launch nixos container --config security.privileged=true")
+            with machine.nested("Waiting for instance to start and be usable"):
+              retry(instance_is_up)
+
+            machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
+
+            check_sysctl("container")
+
+    with subtest("softDaemonRestart"):
+        with subtest("Instance remains running when softDaemonRestart is enabled and services is stopped"):
+            pid = machine.succeed("incus info container | grep 'PID'").split(":")[1].strip()
+            machine.succeed(f"ps {pid}")
+            machine.succeed("systemctl stop incus")
+            machine.succeed(f"ps {pid}")
   '';
 })
diff --git a/nixos/tests/incus/default.nix b/nixos/tests/incus/default.nix
index b850c4fba018d..c33bf1600f27a 100644
--- a/nixos/tests/incus/default.nix
+++ b/nixos/tests/incus/default.nix
@@ -3,24 +3,27 @@
   config ? { },
   pkgs ? import ../../.. { inherit system config; },
   handleTestOn,
+  incus ? pkgs.incus-lts,
 }:
 {
   container-legacy-init = import ./container.nix {
     name = "container-legacy-init";
-    inherit system pkgs;
+    inherit incus system pkgs;
   };
   container-systemd-init = import ./container.nix {
     name = "container-systemd-init";
-    inherit system pkgs;
+    inherit incus system pkgs;
     extra = {
       boot.initrd.systemd.enable = true;
     };
   };
-  lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
-  openvswitch = import ./openvswitch.nix { inherit system pkgs; };
-  preseed = import ./preseed.nix { inherit system pkgs; };
-  socket-activated = import ./socket-activated.nix { inherit system pkgs; };
-  storage = import ./storage.nix { inherit system pkgs; };
-  ui = import ./ui.nix { inherit system pkgs; };
-  virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix { inherit system pkgs; };
+  incusd-options = import ./incusd-options.nix { inherit incus system pkgs; };
+  lxd-to-incus = import ./lxd-to-incus.nix { inherit incus system pkgs; };
+  openvswitch = import ./openvswitch.nix { inherit incus system pkgs; };
+  socket-activated = import ./socket-activated.nix { inherit incus system pkgs; };
+  storage = import ./storage.nix { inherit incus system pkgs; };
+  ui = import ./ui.nix { inherit incus system pkgs; };
+  virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix {
+    inherit incus system pkgs;
+  };
 }
diff --git a/nixos/tests/incus/incusd-options.nix b/nixos/tests/incus/incusd-options.nix
new file mode 100644
index 0000000000000..a223f1c8cb55b
--- /dev/null
+++ b/nixos/tests/incus/incusd-options.nix
@@ -0,0 +1,114 @@
+# this is a set of tests for non-default options. typically the default options
+# will be handled by the other tests
+import ../make-test-python.nix (
+  {
+    pkgs,
+    lib,
+    incus ? pkgs.incus-lts,
+    ...
+  }:
+
+  let
+    releases = import ../../release.nix {
+      configuration = {
+        # Building documentation makes the test unnecessarily take a longer time:
+        documentation.enable = lib.mkForce false;
+      };
+    };
+
+    container-image-metadata = "${
+      releases.incusContainerMeta.${pkgs.stdenv.hostPlatform.system}
+    }/tarball/nixos-system-${pkgs.stdenv.hostPlatform.system}.tar.xz";
+    container-image-rootfs = "${
+      releases.incusContainerImage.${pkgs.stdenv.hostPlatform.system}
+    }/nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}.squashfs";
+  in
+  {
+    name = "incusd-options";
+
+    meta = {
+      maintainers = lib.teams.lxc.members;
+    };
+
+    nodes.machine = {
+      virtualisation = {
+        cores = 2;
+        memorySize = 1024;
+        diskSize = 4096;
+
+        incus = {
+          enable = true;
+          package = incus;
+          softDaemonRestart = false;
+
+          preseed = {
+            networks = [
+              {
+                name = "nixostestbr0";
+                type = "bridge";
+                config = {
+                  "ipv4.address" = "10.0.100.1/24";
+                  "ipv4.nat" = "true";
+                };
+              }
+            ];
+            profiles = [
+              {
+                name = "default";
+                devices = {
+                  eth0 = {
+                    name = "eth0";
+                    network = "nixostestbr0";
+                    type = "nic";
+                  };
+                  root = {
+                    path = "/";
+                    pool = "nixostest_pool";
+                    size = "35GiB";
+                    type = "disk";
+                  };
+                };
+              }
+            ];
+            storage_pools = [
+              {
+                name = "nixostest_pool";
+                driver = "dir";
+              }
+            ];
+          };
+        };
+      };
+      networking.nftables.enable = true;
+    };
+
+    testScript = ''
+      def instance_is_up(_) -> bool:
+          status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
+          return status == 0
+
+      machine.wait_for_unit("incus.service")
+      machine.wait_for_unit("incus-preseed.service")
+
+      with subtest("Container image can be imported"):
+          machine.succeed("incus image import ${container-image-metadata} ${container-image-rootfs} --alias nixos")
+
+      with subtest("Container can be launched and managed"):
+          machine.succeed("incus launch nixos container")
+          with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
+          machine.succeed("echo true | incus exec container /run/current-system/sw/bin/bash -")
+
+      with subtest("Verify preseed resources created"):
+          machine.succeed("incus profile show default")
+          machine.succeed("incus network info nixostestbr0")
+          machine.succeed("incus storage show nixostest_pool")
+
+      with subtest("Instance is stopped when softDaemonRestart is disabled and services is stopped"):
+          pid = machine.succeed("incus info container | grep 'PID'").split(":")[1].strip()
+          machine.succeed(f"ps {pid}")
+          machine.succeed("systemctl stop incus")
+          machine.fail(f"ps {pid}")
+    '';
+  }
+)
diff --git a/nixos/tests/incus/lxd-to-incus.nix b/nixos/tests/incus/lxd-to-incus.nix
index e93b76591eca4..66f78cbd33b40 100644
--- a/nixos/tests/incus/lxd-to-incus.nix
+++ b/nixos/tests/incus/lxd-to-incus.nix
@@ -1,6 +1,11 @@
 import ../make-test-python.nix (
 
-  { pkgs, lib, ... }:
+  {
+    pkgs,
+    lib,
+    incus ? pkgs.incus-lts,
+    ...
+  }:
 
   let
     releases = import ../../release.nix { configuration.documentation.enable = lib.mkForce false; };
@@ -65,7 +70,10 @@ import ../make-test-python.nix (
             ];
           };
 
-          incus.enable = true;
+          incus = {
+            enable = true;
+            package = incus;
+          };
         };
         networking.nftables.enable = true;
       };
diff --git a/nixos/tests/incus/openvswitch.nix b/nixos/tests/incus/openvswitch.nix
index 5d4aef031ad0a..1cead99080e7a 100644
--- a/nixos/tests/incus/openvswitch.nix
+++ b/nixos/tests/incus/openvswitch.nix
@@ -1,4 +1,4 @@
-import ../make-test-python.nix ({ pkgs, lib, ... } :
+import ../make-test-python.nix ({ pkgs, lib, incus ? pkgs.incus-lts, ... } :
 
 {
   name = "incus-openvswitch";
@@ -9,7 +9,11 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
 
   nodes.machine = { lib, ... }: {
     virtualisation = {
-      incus.enable = true;
+      incus = {
+        enable = true;
+        package = incus;
+      };
+
       vswitch.enable = true;
       incus.preseed = {
         networks = [
diff --git a/nixos/tests/incus/preseed.nix b/nixos/tests/incus/preseed.nix
deleted file mode 100644
index f2d928115f3ec..0000000000000
--- a/nixos/tests/incus/preseed.nix
+++ /dev/null
@@ -1,63 +0,0 @@
-import ../make-test-python.nix ({ pkgs, lib, ... } :
-
-{
-  name = "incus-preseed";
-
-  meta = {
-    maintainers = lib.teams.lxc.members;
-  };
-
-  nodes.machine = { lib, ... }: {
-    virtualisation = {
-      incus.enable = true;
-
-      incus.preseed = {
-        networks = [
-          {
-            name = "nixostestbr0";
-            type = "bridge";
-            config = {
-              "ipv4.address" = "10.0.100.1/24";
-              "ipv4.nat" = "true";
-            };
-          }
-        ];
-        profiles = [
-          {
-            name = "nixostest_default";
-            devices = {
-              eth0 = {
-                name = "eth0";
-                network = "nixostestbr0";
-                type = "nic";
-              };
-              root = {
-                path = "/";
-                pool = "default";
-                size = "35GiB";
-                type = "disk";
-              };
-            };
-          }
-        ];
-        storage_pools = [
-          {
-            name = "nixostest_pool";
-            driver = "dir";
-          }
-        ];
-      };
-    };
-    networking.nftables.enable = true;
-  };
-
-  testScript = ''
-    machine.wait_for_unit("incus.service")
-    machine.wait_for_unit("incus-preseed.service")
-
-    with subtest("Verify preseed resources created"):
-      machine.succeed("incus profile show nixostest_default")
-      machine.succeed("incus network info nixostestbr0")
-      machine.succeed("incus storage show nixostest_pool")
-  '';
-})
diff --git a/nixos/tests/incus/socket-activated.nix b/nixos/tests/incus/socket-activated.nix
index 59caf1090fbd8..55c5496396e91 100644
--- a/nixos/tests/incus/socket-activated.nix
+++ b/nixos/tests/incus/socket-activated.nix
@@ -1,4 +1,4 @@
-import ../make-test-python.nix ({ pkgs, lib, ... } :
+import ../make-test-python.nix ({ pkgs, lib, incus ? pkgs.incus-lts, ... } :
 
 {
   name = "incus-socket-activated";
@@ -9,8 +9,11 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
 
   nodes.machine = { lib, ... }: {
     virtualisation = {
-      incus.enable = true;
-      incus.socketActivation = true;
+      incus = {
+        enable = true;
+        package = incus;
+        socketActivation = true;
+      };
     };
     networking.nftables.enable = true;
   };
diff --git a/nixos/tests/incus/storage.nix b/nixos/tests/incus/storage.nix
index 190f4f7451c20..05ea6ba996eb2 100644
--- a/nixos/tests/incus/storage.nix
+++ b/nixos/tests/incus/storage.nix
@@ -1,5 +1,10 @@
 import ../make-test-python.nix (
-  { pkgs, lib, ... }:
+  {
+    pkgs,
+    lib,
+    incus ? pkgs.incus-lts,
+    ...
+  }:
 
   {
     name = "incus-storage";
@@ -19,7 +24,10 @@ import ../make-test-python.nix (
 
         virtualisation = {
           emptyDiskImages = [ 2048 ];
-          incus.enable = true;
+          incus = {
+            enable = true;
+            package = incus;
+          };
         };
       };
 
diff --git a/nixos/tests/incus/ui.nix b/nixos/tests/incus/ui.nix
index 837eb14844cea..a255d6fabe839 100644
--- a/nixos/tests/incus/ui.nix
+++ b/nixos/tests/incus/ui.nix
@@ -1,4 +1,4 @@
-import ../make-test-python.nix ({ pkgs, lib, ... }: {
+import ../make-test-python.nix ({ pkgs, lib, incus ? pkgs.incus-lts, ... }: {
   name = "incus-ui";
 
   meta = {
@@ -7,7 +7,10 @@ import ../make-test-python.nix ({ pkgs, lib, ... }: {
 
   nodes.machine = { lib, ... }: {
     virtualisation = {
-      incus.enable = true;
+      incus = {
+        enable = true;
+        package = incus;
+      };
       incus.ui.enable = true;
     };
     networking.nftables.enable = true;
diff --git a/nixos/tests/incus/virtual-machine.nix b/nixos/tests/incus/virtual-machine.nix
index eebbbd113ed16..f5ac4c8eee1f2 100644
--- a/nixos/tests/incus/virtual-machine.nix
+++ b/nixos/tests/incus/virtual-machine.nix
@@ -1,4 +1,4 @@
-import ../make-test-python.nix ({ pkgs, lib, ... }:
+import ../make-test-python.nix ({ pkgs, lib, incus ? pkgs.incus-lts, ... }:
 
 let
   releases = import ../../release.nix {
@@ -11,8 +11,8 @@ let
     };
   };
 
-  vm-image-metadata = releases.lxdVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
-  vm-image-disk = releases.lxdVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
+  vm-image-metadata = releases.incusVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
+  vm-image-disk = releases.incusVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
 
   instance-name = "instance1";
 in
@@ -33,7 +33,10 @@ in
       # Provide a TPM to test vTPM support for guests
       tpm.enable = true;
 
-      incus.enable = true;
+      incus = {
+        enable = true;
+        package = incus;
+      };
     };
     networking.nftables.enable = true;
   };
@@ -61,10 +64,10 @@ in
         with machine.nested("Waiting for instance to start and be usable"):
           retry(instance_is_up)
 
-    with subtest("lxd-agent is started"):
-        machine.succeed("incus exec ${instance-name} systemctl is-active lxd-agent")
+    with subtest("incus-agent is started"):
+        machine.succeed("incus exec ${instance-name} systemctl is-active incus-agent")
 
-    with subtest("lxd-agent has a valid path"):
+    with subtest("incus-agent has a valid path"):
         machine.succeed("incus exec ${instance-name} -- bash -c 'true'")
 
     with subtest("guest supports cpu hotplug"):
@@ -75,5 +78,11 @@ in
         machine.succeed("incus config set ${instance-name} limits.cpu=2")
         count = int(machine.succeed("incus exec ${instance-name} -- nproc").strip())
         assert count == 2, f"Wrong number of CPUs reported, want: 2, got: {count}"
+
+    with subtest("Instance remains running when softDaemonRestart is enabled and services is stopped"):
+        pid = machine.succeed("incus info ${instance-name} | grep 'PID'").split(":")[1].strip()
+        machine.succeed(f"ps {pid}")
+        machine.succeed("systemctl stop incus")
+        machine.succeed(f"ps {pid}")
   '';
 })
diff --git a/nixos/tests/initrd-network.nix b/nixos/tests/initrd-network.nix
index f2483b7393de4..abbc3d0fce822 100644
--- a/nixos/tests/initrd-network.nix
+++ b/nixos/tests/initrd-network.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ pkgs, lib, ...} : {
   name = "initrd-network";
 
-  meta.maintainers = [ pkgs.lib.maintainers.eelco ];
+  meta.maintainers = [ ];
 
   nodes.machine = { ... }: {
     imports = [ ../modules/profiles/minimal.nix ];
diff --git a/nixos/tests/initrd-secrets.nix b/nixos/tests/initrd-secrets.nix
index 0f3f83b0904e3..dbbdd83588491 100644
--- a/nixos/tests/initrd-secrets.nix
+++ b/nixos/tests/initrd-secrets.nix
@@ -9,7 +9,7 @@ let
   testWithCompressor = compressor: testing.makeTest {
     name = "initrd-secrets-${compressor}";
 
-    meta.maintainers = [ lib.maintainers.lheckemann ];
+    meta.maintainers = [ ];
 
     nodes.machine = { ... }: {
       virtualisation.useBootLoader = true;
diff --git a/nixos/tests/installed-tests/flatpak-builder.nix b/nixos/tests/installed-tests/flatpak-builder.nix
index d5e04fcf975ce..fd3df6bdea8f5 100644
--- a/nixos/tests/installed-tests/flatpak-builder.nix
+++ b/nixos/tests/installed-tests/flatpak-builder.nix
@@ -5,8 +5,11 @@ makeInstalledTest {
 
   testConfig = {
     services.flatpak.enable = true;
-    xdg.portal.enable = true;
-    xdg.portal.extraPortals = with pkgs; [ xdg-desktop-portal-gtk ];
+    xdg.portal = {
+      enable = true;
+      extraPortals = with pkgs; [ xdg-desktop-portal-gtk ];
+      config.common.default = "gtk";
+    };
     environment.systemPackages = with pkgs; [ flatpak-builder ] ++ flatpak-builder.installedTestsDependencies;
     virtualisation.diskSize = 2048;
   };
diff --git a/nixos/tests/installed-tests/gnome-photos.nix b/nixos/tests/installed-tests/gnome-photos.nix
index bcb6479ee89c6..010ad97024026 100644
--- a/nixos/tests/installed-tests/gnome-photos.nix
+++ b/nixos/tests/installed-tests/gnome-photos.nix
@@ -13,7 +13,7 @@ makeInstalledTest {
       (stdenv.mkDerivation {
         name = "desktop-gsettings";
         dontUnpack = true;
-        nativeBuildInputs = [ glib wrapGAppsHook ];
+        nativeBuildInputs = [ glib wrapGAppsHook3 ];
         buildInputs = [ gsettings-desktop-schemas ];
         installPhase = ''
           runHook preInstall
diff --git a/nixos/tests/installed-tests/ibus.nix b/nixos/tests/installed-tests/ibus.nix
index 028c20c29f2d6..8cae29112f98b 100644
--- a/nixos/tests/installed-tests/ibus.nix
+++ b/nixos/tests/installed-tests/ibus.nix
@@ -5,7 +5,10 @@ makeInstalledTest {
 
   testConfig = {
     i18n.supportedLocales = [ "all" ];
-    i18n.inputMethod.enabled = "ibus";
+    i18n.inputMethod = {
+      enable = true;
+      type = "ibus";
+    };
     systemd.user.services.ibus-daemon = {
       serviceConfig.ExecStart = "${pkgs.ibus}/bin/ibus-daemon --xim --verbose";
       wantedBy = [ "graphical-session.target" ];
diff --git a/nixos/tests/installed-tests/ostree.nix b/nixos/tests/installed-tests/ostree.nix
index 90e09ad4ddf49..b90870204225b 100644
--- a/nixos/tests/installed-tests/ostree.nix
+++ b/nixos/tests/installed-tests/ostree.nix
@@ -1,4 +1,4 @@
-{ pkgs, lib, makeInstalledTest, ... }:
+{ pkgs, makeInstalledTest, ... }:
 
 makeInstalledTest {
   tested = pkgs.ostree;
diff --git a/nixos/tests/installer-systemd-stage-1.nix b/nixos/tests/installer-systemd-stage-1.nix
index 1dd55dada042a..3b5e0ed8e7bba 100644
--- a/nixos/tests/installer-systemd-stage-1.nix
+++ b/nixos/tests/installer-systemd-stage-1.nix
@@ -19,7 +19,7 @@
     luksroot
     luksroot-format1
     luksroot-format2
-    # lvm
+    lvm
     separateBoot
     separateBootFat
     separateBootZfs
@@ -37,6 +37,8 @@
     clevisLuksFallback
     clevisZfs
     clevisZfsFallback
+    clevisZfsParentDataset
+    clevisZfsParentDatasetFallback
     gptAutoRoot
     clevisBcachefs
     clevisBcachefsFallback
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index 7e835041eb39f..d4caf3ceaf1fb 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -11,7 +11,7 @@ let
 
   # The configuration to install.
   makeConfig = { bootLoader, grubDevice, grubIdentifier, grubUseEfi
-               , extraConfig, forceGrubReinstallCount ? 0, flake ? false
+               , extraConfig, forceGrubReinstallCount ? 0, withTestInstrumentation ? true
                , clevisTest
                }:
     pkgs.writeText "configuration.nix" ''
@@ -19,7 +19,7 @@ let
 
       { imports =
           [ ./hardware-configuration.nix
-            ${if flake
+            ${if !withTestInstrumentation
               then "" # Still included, but via installer/flake.nix
               else "<nixpkgs/nixos/modules/testing/test-instrumentation.nix>"}
           ];
@@ -81,7 +81,7 @@ let
   # partitions and filesystems.
   testScriptFun = { bootLoader, createPartitions, grubDevice, grubUseEfi, grubIdentifier
                   , postInstallCommands, postBootCommands, extraConfig
-                  , testSpecialisationConfig, testFlakeSwitch, clevisTest, clevisFallbackTest
+                  , testSpecialisationConfig, testFlakeSwitch, testByAttrSwitch, clevisTest, clevisFallbackTest
                   , disableFileSystems
                   }:
     let
@@ -249,12 +249,11 @@ let
       with subtest("Check whether nixos-rebuild works"):
           target.succeed("nixos-rebuild switch >&2")
 
-      # FIXME: Nix 2.4 broke nixos-option, someone has to fix it.
-      # with subtest("Test nixos-option"):
-      #     kernel_modules = target.succeed("nixos-option boot.initrd.kernelModules")
-      #     assert "virtio_console" in kernel_modules
-      #     assert "List of modules" in kernel_modules
-      #     assert "qemu-guest.nix" in kernel_modules
+      with subtest("Test nixos-option"):
+          kernel_modules = target.succeed("nixos-option boot.initrd.kernelModules")
+          assert "virtio_console" in kernel_modules
+          assert "List of modules" in kernel_modules
+          assert "qemu-guest.nix" in kernel_modules
 
       target.shutdown()
 
@@ -317,6 +316,119 @@ let
 
       target.shutdown()
     ''
+    + optionalString testByAttrSwitch ''
+      with subtest("Configure system with attribute set"):
+        target.succeed("""
+          mkdir /root/my-config
+          mv /etc/nixos/hardware-configuration.nix /root/my-config/
+          rm /etc/nixos/configuration.nix
+        """)
+        target.copy_from_host_via_shell(
+          "${makeConfig {
+               inherit bootLoader grubDevice grubIdentifier grubUseEfi extraConfig clevisTest;
+               forceGrubReinstallCount = 1;
+               withTestInstrumentation = false;
+            }}",
+          "/root/my-config/configuration.nix",
+        )
+        target.copy_from_host_via_shell(
+          "${./installer/byAttrWithChannel.nix}",
+          "/root/my-config/default.nix",
+        )
+      with subtest("Switch to attribute set based config with channels"):
+        target.succeed("nixos-rebuild switch --file /root/my-config/default.nix")
+
+      target.shutdown()
+
+      ${startTarget}
+
+      target.succeed("""
+        rm /root/my-config/default.nix
+      """)
+      target.copy_from_host_via_shell(
+        "${./installer/byAttrNoChannel.nix}",
+        "/root/my-config/default.nix",
+      )
+
+      target.succeed("""
+        pkgs=$(readlink -f /nix/var/nix/profiles/per-user/root/channels)/nixos
+        if ! [[ -e $pkgs/pkgs/top-level/default.nix ]]; then
+          echo 1>&2 "$pkgs does not seem to be a nixpkgs source. Please fix the test so that pkgs points to a nixpkgs source.";
+          exit 1;
+        fi
+        sed -e s^@nixpkgs@^$pkgs^ -i /root/my-config/default.nix
+
+      """)
+
+      with subtest("Switch to attribute set based config without channels"):
+        target.succeed("nixos-rebuild switch --file /root/my-config/default.nix")
+
+      target.shutdown()
+
+      ${startTarget}
+
+      with subtest("nix-channel command is not available anymore"):
+        target.succeed("! which nix-channel")
+
+      with subtest("builtins.nixPath is now empty"):
+        target.succeed("""
+          [[ "[ ]" == "$(nix-instantiate builtins.nixPath --eval --expr)" ]]
+        """)
+
+      with subtest("<nixpkgs> does not resolve"):
+        target.succeed("""
+          ! nix-instantiate '<nixpkgs>' --eval --expr
+        """)
+
+      with subtest("Evaluate attribute set based config in fresh env without nix-channel"):
+        target.succeed("nixos-rebuild switch --file /root/my-config/default.nix")
+
+      with subtest("Evaluate attribute set based config in fresh env without channel profiles"):
+        target.succeed("""
+          (
+            exec 1>&2
+            mkdir -p /root/restore
+            mv -v /root/.nix-channels /root/restore/
+            mv -v ~/.nix-defexpr /root/restore/
+            mkdir -p /root/restore/channels
+            mv -v /nix/var/nix/profiles/per-user/root/channels* /root/restore/channels/
+          )
+        """)
+        target.succeed("nixos-rebuild switch --file /root/my-config/default.nix")
+    ''
+    + optionalString (testByAttrSwitch && testFlakeSwitch) ''
+      with subtest("Restore channel profiles"):
+        target.succeed("""
+          (
+            exec 1>&2
+            mv -v /root/restore/.nix-channels /root/
+            mv -v /root/restore/.nix-defexpr ~/.nix-defexpr
+            mv -v /root/restore/channels/* /nix/var/nix/profiles/per-user/root/
+            rm -vrf /root/restore
+          )
+        """)
+
+      with subtest("Restore /etc/nixos"):
+        target.succeed("""
+          mv -v /root/my-config/hardware-configuration.nix /etc/nixos/
+        """)
+        target.copy_from_host_via_shell(
+          "${makeConfig {
+               inherit bootLoader grubDevice grubIdentifier grubUseEfi extraConfig clevisTest;
+               forceGrubReinstallCount = 1;
+            }}",
+          "/etc/nixos/configuration.nix",
+        )
+
+      with subtest("Restore /root/my-config"):
+        target.succeed("""
+          rm -vrf /root/my-config
+        """)
+
+    ''
+    + optionalString (testByAttrSwitch && !testFlakeSwitch) ''
+      target.shutdown()
+    ''
     + optionalString testFlakeSwitch ''
       ${startTarget}
 
@@ -331,7 +443,7 @@ let
           "${makeConfig {
                inherit bootLoader grubDevice grubIdentifier grubUseEfi extraConfig clevisTest;
                forceGrubReinstallCount = 1;
-               flake = true;
+               withTestInstrumentation = false;
             }}",
           "/root/my-config/configuration.nix",
         )
@@ -351,7 +463,32 @@ let
         """)
 
       with subtest("Switch to flake based config"):
-        target.succeed("nixos-rebuild switch --flake /root/my-config#xyz")
+        target.succeed("nixos-rebuild switch --flake /root/my-config#xyz 2>&1 | tee activation-log >&2")
+
+        target.succeed("""
+          cat -n activation-log >&2
+        """)
+
+        target.succeed("""
+          grep -F '/root/.nix-defexpr/channels exists, but channels have been disabled.' activation-log
+        """)
+        target.succeed("""
+          grep -F '/nix/var/nix/profiles/per-user/root/channels exists, but channels have been disabled.' activation-log
+        """)
+        target.succeed("""
+          grep -F '/root/.nix-defexpr/channels exists, but channels have been disabled.' activation-log
+        """)
+        target.succeed("""
+          grep -F 'Due to https://github.com/NixOS/nix/issues/9574, Nix may still use these channels when NIX_PATH is unset.' activation-log
+        """)
+        target.succeed("rm activation-log")
+
+        # Perform the suggested cleanups we've just seen in the log
+        # TODO after https://github.com/NixOS/nix/issues/9574: don't remove them yet
+        target.succeed("""
+          rm -rf /root/.nix-defexpr/channels /nix/var/nix/profiles/per-user/root/channels /root/.nix-defexpr/channels
+        """)
+
 
       target.shutdown()
 
@@ -362,10 +499,20 @@ let
 
       # Note that the channel profile is still present on disk, but configured
       # not to be used.
-      with subtest("builtins.nixPath is now empty"):
-        target.succeed("""
-          [[ "[ ]" == "$(nix-instantiate builtins.nixPath --eval --expr)" ]]
-        """)
+      # TODO after issue https://github.com/NixOS/nix/issues/9574: re-enable this assertion
+      # I believe what happens is
+      #   - because of the issue, we've removed the `nix-path =` line from nix.conf
+      #   - the "backdoor" shell is not a proper session and does not have `NIX_PATH=""` set
+      #   - seeing no nix path settings at all, Nix loads its hardcoded default value,
+      #     which is unfortunately non-empty
+      # Or maybe it's the new default NIX_PATH?? :(
+      # with subtest("builtins.nixPath is now empty"):
+      #   target.succeed("""
+      #     (
+      #       set -x;
+      #       [[ "[ ]" == "$(nix-instantiate builtins.nixPath --eval --expr)" ]];
+      #     )
+      #   """)
 
       with subtest("<nixpkgs> does not resolve"):
         target.succeed("""
@@ -379,12 +526,16 @@ let
         target.succeed("""
           (
             exec 1>&2
-            rm -v /root/.nix-channels
+            rm -vf /root/.nix-channels
             rm -vrf ~/.nix-defexpr
             rm -vrf /nix/var/nix/profiles/per-user/root/channels*
           )
         """)
-        target.succeed("nixos-rebuild switch --flake /root/my-config#xyz")
+        target.succeed("nixos-rebuild switch --flake /root/my-config#xyz | tee activation-log >&2")
+        target.succeed("cat -n activation-log >&2")
+        target.succeed("! grep -F '/root/.nix-defexpr/channels' activation-log")
+        target.succeed("! grep -F 'but channels have been disabled' activation-log")
+        target.succeed("! grep -F 'https://github.com/NixOS/nix/issues/9574' activation-log")
 
       target.shutdown()
     '';
@@ -400,6 +551,7 @@ let
     , enableOCR ? false, meta ? {}
     , testSpecialisationConfig ? false
     , testFlakeSwitch ? false
+    , testByAttrSwitch ? false
     , clevisTest ? false
     , clevisFallbackTest ? false
     , disableFileSystems ? false
@@ -474,12 +626,16 @@ let
             libxslt.bin
             nixos-artwork.wallpapers.simple-dark-gray-bottom
             ntp
+            perlPackages.ConfigIniFiles
+            perlPackages.FileSlurp
+            perlPackages.JSON
             perlPackages.ListCompare
             perlPackages.XMLLibXML
             # make-options-doc/default.nix
             (python3.withPackages (p: [ p.mistune ]))
             shared-mime-info
             sudo
+            switch-to-configuration-ng
             texinfo
             unionfs-fuse
             xorg.lndir
@@ -493,6 +649,10 @@ let
           in [
             (pkgs.grub2.override { inherit zfsSupport; })
             (pkgs.grub2_efi.override { inherit zfsSupport; })
+            pkgs.nixos-artwork.wallpapers.simple-dark-gray-bootloader
+            pkgs.perlPackages.FileCopyRecursive
+            pkgs.perlPackages.XMLSAX
+            pkgs.perlPackages.XMLSAXBase
           ])
           ++ optionals (bootLoader == "systemd-boot") [
             pkgs.zstd.bin
@@ -534,7 +694,7 @@ let
       testScript = testScriptFun {
         inherit bootLoader createPartitions postInstallCommands postBootCommands
                 grubDevice grubIdentifier grubUseEfi extraConfig
-                testSpecialisationConfig testFlakeSwitch clevisTest clevisFallbackTest
+                testSpecialisationConfig testFlakeSwitch testByAttrSwitch clevisTest clevisFallbackTest
                 disableFileSystems;
       };
     };
@@ -590,6 +750,15 @@ let
     testFlakeSwitch = true;
   };
 
+  simple-test-config-by-attr = simple-test-config // {
+    testByAttrSwitch = true;
+  };
+
+  simple-test-config-from-by-attr-to-flake = simple-test-config // {
+    testByAttrSwitch = true;
+    testFlakeSwitch = true;
+  };
+
   simple-uefi-grub-config = {
     createPartitions = ''
       installer.succeed(
@@ -715,7 +884,7 @@ let
     '';
   };
 
-  mkClevisZfsTest = { fallback ? false }: makeInstallerTest "clevis-zfs${optionalString fallback "-fallback"}" {
+  mkClevisZfsTest = { fallback ? false, parentDataset ? false }: makeInstallerTest "clevis-zfs${optionalString parentDataset "-parent-dataset"}${optionalString fallback "-fallback"}" {
     clevisTest = true;
     clevisFallbackTest = fallback;
     enableOCR = fallback;
@@ -732,17 +901,27 @@ let
         "udevadm settle",
         "mkswap /dev/vda2 -L swap",
         "swapon -L swap",
+    '' + optionalString (!parentDataset) ''
         "zpool create -O mountpoint=legacy rpool /dev/vda3",
         "echo -n password | zfs create"
         + " -o encryption=aes-256-gcm -o keyformat=passphrase rpool/root",
+    '' + optionalString (parentDataset) ''
+        "echo -n password | zpool create -O mountpoint=none -O encryption=on -O keyformat=passphrase rpool /dev/vda3",
+        "zfs create -o mountpoint=legacy rpool/root",
+    '' +
+    ''
         "mount -t zfs rpool/root /mnt",
         "mkfs.ext3 -L boot /dev/vda1",
         "mkdir -p /mnt/boot",
         "mount LABEL=boot /mnt/boot",
         "udevadm settle")
     '';
-    extraConfig = ''
+    extraConfig = optionalString (!parentDataset) ''
       boot.initrd.clevis.devices."rpool/root".secretFile = "/etc/nixos/clevis-secret.jwe";
+    '' + optionalString (parentDataset) ''
+      boot.initrd.clevis.devices."rpool".secretFile = "/etc/nixos/clevis-secret.jwe";
+    '' +
+    ''
       boot.zfs.requestEncryptionCredentials = true;
 
 
@@ -773,6 +952,10 @@ in {
 
   switchToFlake = makeInstallerTest "switch-to-flake" simple-test-config-flake;
 
+  switchToByAttr = makeInstallerTest "switch-to-by-attr" simple-test-config-by-attr;
+
+  switchFromByAttrToFlake = makeInstallerTest "switch-from-by-attr-to-flake" simple-test-config-from-by-attr-to-flake;
+
   # Test cloned configurations with the simple grub configuration
   simpleSpecialised = makeInstallerTest "simpleSpecialised" (simple-test-config // specialisation-test-extraconfig);
 
@@ -975,6 +1158,9 @@ in {
           "mount LABEL=nixos /mnt",
       )
     '';
+    extraConfig = optionalString systemdStage1 ''
+      boot.initrd.services.lvm.enable = true;
+    '';
   };
 
   # Boot off an encrypted root partition with the default LUKS header format
@@ -1357,6 +1543,8 @@ in {
   clevisLuksFallback = mkClevisLuksTest { fallback = true; };
   clevisZfs = mkClevisZfsTest { };
   clevisZfsFallback = mkClevisZfsTest { fallback = true; };
+  clevisZfsParentDataset = mkClevisZfsTest { parentDataset = true; };
+  clevisZfsParentDatasetFallback = mkClevisZfsTest { parentDataset = true; fallback = true; };
 } // optionalAttrs systemdStage1 {
   stratisRoot = makeInstallerTest "stratisRoot" {
     createPartitions = ''
diff --git a/nixos/tests/installer/byAttrNoChannel.nix b/nixos/tests/installer/byAttrNoChannel.nix
new file mode 100644
index 0000000000000..03293cd4a0e35
--- /dev/null
+++ b/nixos/tests/installer/byAttrNoChannel.nix
@@ -0,0 +1,18 @@
+# This file gets copied into the installation
+
+let
+  nixpkgs = "@nixpkgs@";
+in
+
+{ evalConfig ? import "${nixpkgs}/nixos/lib/eval-config.nix" }:
+
+evalConfig {
+  modules = [
+    ./configuration.nix
+    ( import "${nixpkgs}/nixos/modules/testing/test-instrumentation.nix" )
+    {
+      # Disable nix channels
+      nix.channel.enable = false;
+    }
+  ];
+}
diff --git a/nixos/tests/installer/byAttrWithChannel.nix b/nixos/tests/installer/byAttrWithChannel.nix
new file mode 100644
index 0000000000000..951231dcba3e7
--- /dev/null
+++ b/nixos/tests/installer/byAttrWithChannel.nix
@@ -0,0 +1,10 @@
+# This file gets copied into the installation
+
+{ evalConfig ? import <nixpkgs/nixos/lib/eval-config.nix> }:
+
+evalConfig {
+  modules = [
+    ./configuration.nix
+    ( import <nixpkgs/nixos/modules/testing/test-instrumentation.nix> )
+  ];
+}
diff --git a/nixos/tests/invidious.nix b/nixos/tests/invidious.nix
index 372b47b56c345..05b43c5556060 100644
--- a/nixos/tests/invidious.nix
+++ b/nixos/tests/invidious.nix
@@ -20,7 +20,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       };
       networking.firewall.allowedTCPPorts = [ config.services.postgresql.settings.port ];
     };
-    machine = { config, lib, pkgs, ... }: {
+    machine = { lib, pkgs, ... }: {
       services.invidious = {
         enable = true;
       };
@@ -37,6 +37,19 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           };
           networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
         };
+        nginx-sig-helper.configuration = {
+          services.invidious = {
+            nginx.enable = true;
+            domain = "invidious.example.com";
+            sig-helper.enable = true;
+            settings.log_level = "Trace";
+          };
+          services.nginx.virtualHosts."invidious.example.com" = {
+            forceSSL = false;
+            enableACME = false;
+          };
+          networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
+        };
         nginx-scale.configuration = {
           services.invidious = {
             nginx.enable = true;
@@ -81,11 +94,11 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
 
     def activate_specialisation(name: str):
-        machine.succeed(f"${nodes.machine.config.system.build.toplevel}/specialisation/{name}/bin/switch-to-configuration test >&2")
+        machine.succeed(f"${nodes.machine.system.build.toplevel}/specialisation/{name}/bin/switch-to-configuration test >&2")
 
 
-    url = "http://localhost:${toString nodes.machine.config.services.invidious.port}"
-    port = ${toString nodes.machine.config.services.invidious.port}
+    url = "http://localhost:${toString nodes.machine.services.invidious.port}"
+    port = ${toString nodes.machine.services.invidious.port}
 
     # start postgres vm now
     postgres_tcp.start()
@@ -116,6 +129,14 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     curl_assert_status_code("http://invidious.example.com/vi/dQw4w9WgXcQ/mqdefault.jpg", 502)
     machine.succeed("journalctl -eu http3-ytproxy.service | grep -o 'dQw4w9WgXcQ'")
 
+    activate_specialisation("nginx-sig-helper")
+    machine.wait_for_unit("invidious-sig-helper.service")
+    # we can't really test the sig helper that well without internet connection...
+    # invidious does connect to the sig helper though and crashes when the sig helper is not available
+    machine.wait_for_open_port(80)
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+    machine.succeed("journalctl -eu invidious.service | grep -o \"SigHelper: Using helper at 'tcp://127.0.0.1:2999'\"")
+
     postgres_tcp.wait_for_unit("postgresql.service")
     activate_specialisation("postgres-tcp")
     machine.wait_for_open_port(port)
diff --git a/nixos/tests/ipv6.nix b/nixos/tests/ipv6.nix
index 75faa6f602010..8fa7eec8ffb2a 100644
--- a/nixos/tests/ipv6.nix
+++ b/nixos/tests/ipv6.nix
@@ -4,7 +4,7 @@
 import ./make-test-python.nix ({ pkgs, lib, ...} : {
   name = "ipv6";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco ];
+    maintainers = [ ];
   };
 
   nodes =
@@ -39,6 +39,8 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
         { services.httpd.enable = true;
           services.httpd.adminAddr = "foo@example.org";
           networking.firewall.allowedTCPPorts = [ 80 ];
+          # disable testing driver's default IPv6 address.
+          networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ ];
         };
 
       router =
diff --git a/nixos/tests/iscsi-root.nix b/nixos/tests/iscsi-root.nix
index 0d7c48464eecc..6953b6ce9a065 100644
--- a/nixos/tests/iscsi-root.nix
+++ b/nixos/tests/iscsi-root.nix
@@ -59,7 +59,7 @@ import ./make-test-python.nix (
                         ];
                         portals = [
                           {
-                            ip_address = "0.0.0.0";
+                            ip_address = "[::]";
                             iser = false;
                             offload = false;
                             port = 3260;
@@ -93,7 +93,7 @@ import ./make-test-python.nix (
               xfsprogs
             ];
 
-            system.extraDependencies = [ nodes.initiatorRootDisk.config.system.build.toplevel ];
+            system.extraDependencies = [ nodes.initiatorRootDisk.system.build.toplevel ];
 
             nix.settings = {
               substituters = lib.mkForce [];
@@ -108,7 +108,7 @@ import ./make-test-python.nix (
               [
                 "boot.shell_on_fail"
                 "console=tty1"
-                "ip=${config.networking.primaryIPAddress}:::255.255.255.0::ens9:none"
+                "ip=${config.networking.primaryIPAddress}:::255.255.255.0::eth1:none"
               ]
             );
 
diff --git a/nixos/tests/jackett.nix b/nixos/tests/jackett.nix
index bc8b724e8b4b6..4e65cb61d17a7 100644
--- a/nixos/tests/jackett.nix
+++ b/nixos/tests/jackett.nix
@@ -1,17 +1,21 @@
 import ./make-test-python.nix ({ lib, ... }:
 
-{
+let
+  jackettPort = 9117;
+in {
   name = "jackett";
   meta.maintainers = with lib.maintainers; [ etu ];
 
   nodes.machine =
-    { pkgs, ... }:
-    { services.jackett.enable = true; };
+    { pkgs, ... }: {
+      services.jackett.enable = true;
+      services.jackett.port = jackettPort;
+    };
 
   testScript = ''
     machine.start()
     machine.wait_for_unit("jackett.service")
-    machine.wait_for_open_port(9117)
-    machine.succeed("curl --fail http://localhost:9117/")
+    machine.wait_for_open_port(${toString jackettPort})
+    machine.succeed("curl --fail http://localhost:${toString jackettPort}/")
   '';
 })
diff --git a/nixos/tests/jenkins.nix b/nixos/tests/jenkins.nix
index a8f6210006547..d7394c866c143 100644
--- a/nixos/tests/jenkins.nix
+++ b/nixos/tests/jenkins.nix
@@ -7,7 +7,7 @@
 import ./make-test-python.nix ({ pkgs, ...} : {
   name = "jenkins";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ bjornfor coconnor domenkozar eelco ];
+    maintainers = [ bjornfor coconnor domenkozar ];
   };
 
   nodes = {
diff --git a/nixos/tests/jool.nix b/nixos/tests/jool.nix
index 93575f07b1c8c..37a4ad6ce0111 100644
--- a/nixos/tests/jool.nix
+++ b/nixos/tests/jool.nix
@@ -165,9 +165,12 @@ in
       virtualisation.vlans = [ 1 ];
 
       networking.interfaces.eth1.ipv6 = {
-        addresses = [ { address = "2001:db8::8"; prefixLength = 96; } ];
-        routes    = [ { address = "64:ff9b::";   prefixLength = 96;
-                        via = "2001:db8::1"; } ];
+        addresses = lib.mkForce [ { address = "2001:db8::8"; prefixLength = 96; } ];
+        routes = lib.mkForce [ {
+          address = "64:ff9b::";
+          prefixLength = 96;
+          via = "2001:db8::1";
+        } ];
       };
     };
 
@@ -177,9 +180,12 @@ in
 
       virtualisation.vlans = [ 1 ];
       networking.interfaces.eth1.ipv6 = {
-        addresses = [ { address = "2001:db8::9"; prefixLength = 96; } ];
-        routes    = [ { address = "64:ff9b::";   prefixLength = 96;
-                        via = "2001:db8::1"; } ];
+        addresses = lib.mkForce [ { address = "2001:db8::9"; prefixLength = 96; } ];
+        routes    = lib.mkForce [ {
+          address = "64:ff9b::";
+          prefixLength = 96;
+          via = "2001:db8::1";
+        } ];
       };
     };
 
diff --git a/nixos/tests/jotta-cli.nix b/nixos/tests/jotta-cli.nix
index 5eefe65c1d385..0df23ee2cba5c 100644
--- a/nixos/tests/jotta-cli.nix
+++ b/nixos/tests/jotta-cli.nix
@@ -4,7 +4,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
   meta.maintainers = with pkgs.lib.maintainers; [ evenbrenden ];
 
   nodes.machine = { pkgs, ... }: {
-    user.services.jotta-cli.enable = true;
+    services.jotta-cli.enable = true;
     imports = [ ./common/user-account.nix ];
   };
 
diff --git a/nixos/tests/k3s/airgap-images.nix b/nixos/tests/k3s/airgap-images.nix
new file mode 100644
index 0000000000000..ade04c99840ae
--- /dev/null
+++ b/nixos/tests/k3s/airgap-images.nix
@@ -0,0 +1,42 @@
+# A test that imports k3s airgapped images and verifies that all expected images are present
+import ../make-test-python.nix (
+  { lib, k3s, ... }:
+  {
+    name = "${k3s.name}-airgap-images";
+    meta.maintainers = lib.teams.k3s.members;
+
+    nodes.machine = _: {
+      # k3s uses enough resources the default vm fails.
+      virtualisation.memorySize = 1536;
+      virtualisation.diskSize = 4096;
+
+      services.k3s = {
+        enable = true;
+        role = "server";
+        package = k3s;
+        # Slightly reduce resource usage
+        extraFlags = [
+          "--disable coredns"
+          "--disable local-storage"
+          "--disable metrics-server"
+          "--disable servicelb"
+          "--disable traefik"
+        ];
+        images = [ k3s.airgapImages ];
+      };
+    };
+
+    testScript = ''
+      import json
+
+      start_all()
+      machine.wait_for_unit("k3s")
+      machine.wait_until_succeeds("journalctl -r --no-pager -u k3s | grep \"Imported images from /var/lib/rancher/k3s/agent/images/\"", timeout=120)
+      images = json.loads(machine.succeed("crictl img -o json"))
+      image_names = [i["repoTags"][0] for i in images["images"]]
+      with open("${k3s.imagesList}") as expected_images:
+        for line in expected_images:
+          assert line.rstrip() in image_names, f"The image {line.rstrip()} is not present in the airgap images archive"
+    '';
+  }
+)
diff --git a/nixos/tests/k3s/auto-deploy.nix b/nixos/tests/k3s/auto-deploy.nix
new file mode 100644
index 0000000000000..c25503ac10874
--- /dev/null
+++ b/nixos/tests/k3s/auto-deploy.nix
@@ -0,0 +1,125 @@
+# Tests whether container images are imported and auto deploying manifests work
+import ../make-test-python.nix (
+  {
+    pkgs,
+    lib,
+    k3s,
+    ...
+  }:
+  let
+    pauseImageEnv = pkgs.buildEnv {
+      name = "k3s-pause-image-env";
+      paths = with pkgs; [
+        tini
+        (hiPrio coreutils)
+        busybox
+      ];
+    };
+    pauseImage = pkgs.dockerTools.buildImage {
+      name = "test.local/pause";
+      tag = "local";
+      copyToRoot = pauseImageEnv;
+      config.Entrypoint = [
+        "/bin/tini"
+        "--"
+        "/bin/sleep"
+        "inf"
+      ];
+    };
+    helloImage = pkgs.dockerTools.buildImage {
+      name = "test.local/hello";
+      tag = "local";
+      copyToRoot = pkgs.hello;
+      config.Entrypoint = [ "${pkgs.hello}/bin/hello" ];
+    };
+  in
+  {
+    name = "${k3s.name}-auto-deploy";
+
+    nodes.machine =
+      { pkgs, ... }:
+      {
+        environment.systemPackages = [ k3s ];
+
+        # k3s uses enough resources the default vm fails.
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.k3s.enable = true;
+        services.k3s.role = "server";
+        services.k3s.package = k3s;
+        # Slightly reduce resource usage
+        services.k3s.extraFlags = [
+          "--disable coredns"
+          "--disable local-storage"
+          "--disable metrics-server"
+          "--disable servicelb"
+          "--disable traefik"
+          "--pause-image test.local/pause:local"
+        ];
+        services.k3s.images = [
+          pauseImage
+          helloImage
+        ];
+        services.k3s.manifests = {
+          absent = {
+            enable = false;
+            content = {
+              apiVersion = "v1";
+              kind = "Namespace";
+              metadata.name = "absent";
+            };
+          };
+
+          present = {
+            target = "foo-namespace.yaml";
+            content = {
+              apiVersion = "v1";
+              kind = "Namespace";
+              metadata.name = "foo";
+            };
+          };
+
+          hello.content = {
+            apiVersion = "batch/v1";
+            kind = "Job";
+            metadata.name = "hello";
+            spec = {
+              template.spec = {
+                containers = [
+                  {
+                    name = "hello";
+                    image = "test.local/hello:local";
+                  }
+                ];
+                restartPolicy = "OnFailure";
+              };
+            };
+          };
+        };
+      };
+
+    testScript = ''
+      start_all()
+
+      machine.wait_for_unit("k3s")
+      # check existence of the manifest files
+      machine.fail("ls /var/lib/rancher/k3s/server/manifests/absent.yaml")
+      machine.succeed("ls /var/lib/rancher/k3s/server/manifests/foo-namespace.yaml")
+      machine.succeed("ls /var/lib/rancher/k3s/server/manifests/hello.yaml")
+
+      # check if container images got imported
+      machine.wait_until_succeeds("crictl img | grep 'test\.local/pause'")
+      machine.wait_until_succeeds("crictl img | grep 'test\.local/hello'")
+
+      # check if resources of manifests got created
+      machine.wait_until_succeeds("kubectl get ns foo")
+      machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello")
+      machine.fail("kubectl get ns absent")
+
+      machine.shutdown()
+    '';
+
+    meta.maintainers = lib.teams.k3s.members;
+  }
+)
diff --git a/nixos/tests/k3s/containerd-config.nix b/nixos/tests/k3s/containerd-config.nix
new file mode 100644
index 0000000000000..ffc449b03abac
--- /dev/null
+++ b/nixos/tests/k3s/containerd-config.nix
@@ -0,0 +1,58 @@
+# A test that containerdConfigTemplate settings get written to containerd/config.toml
+import ../make-test-python.nix (
+  {
+    pkgs,
+    lib,
+    k3s,
+    ...
+  }:
+  let
+    nodeName = "test";
+  in
+  {
+    name = "${k3s.name}-containerd-config";
+    nodes.machine =
+      { ... }:
+      {
+        environment.systemPackages = [ pkgs.jq ];
+        # k3s uses enough resources the default vm fails.
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.k3s = {
+          enable = true;
+          package = k3s;
+          # Slightly reduce resource usage
+          extraFlags = [
+            "--disable coredns"
+            "--disable local-storage"
+            "--disable metrics-server"
+            "--disable servicelb"
+            "--disable traefik"
+            "--node-name ${nodeName}"
+          ];
+          containerdConfigTemplate = ''
+            # Base K3s config
+            {{ template "base" . }}
+
+            # MAGIC COMMENT
+          '';
+        };
+      };
+
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("k3s")
+      # wait until the node is ready
+      machine.wait_until_succeeds(r"""kubectl get node ${nodeName} -ojson | jq -e '.status.conditions[] | select(.type == "Ready") | .status == "True"'""")
+      # test whether the config template file contains the magic comment
+      out=machine.succeed("cat /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl")
+      assert "MAGIC COMMENT" in out, "the containerd config template does not contain the magic comment"
+      # test whether the config file contains the magic comment
+      out=machine.succeed("cat /var/lib/rancher/k3s/agent/etc/containerd/config.toml")
+      assert "MAGIC COMMENT" in out, "the containerd config does not contain the magic comment"
+    '';
+
+    meta.maintainers = lib.teams.k3s.members;
+  }
+)
diff --git a/nixos/tests/k3s/default.nix b/nixos/tests/k3s/default.nix
index 512dc06ee77ec..7edaf6f38ed28 100644
--- a/nixos/tests/k3s/default.nix
+++ b/nixos/tests/k3s/default.nix
@@ -1,18 +1,29 @@
-{ system ? builtins.currentSystem
-, pkgs ? import ../../.. { inherit system; }
-, lib ? pkgs.lib
+{
+  system ? builtins.currentSystem,
+  pkgs ? import ../../.. { inherit system; },
+  lib ? pkgs.lib,
 }:
 let
   allK3s = lib.filterAttrs (n: _: lib.strings.hasPrefix "k3s_" n) pkgs;
 in
 {
-  # Testing K3s with Etcd backend
-  etcd = lib.mapAttrs (_: k3s: import ./etcd.nix {
-    inherit system pkgs k3s;
-    inherit (pkgs) etcd;
-  }) allK3s;
-  # Run a single node k3s cluster and verify a pod can run
-  single-node = lib.mapAttrs (_: k3s: import ./single-node.nix { inherit system pkgs k3s; }) allK3s;
-  # Run a multi-node k3s cluster and verify pod networking works across nodes
+  airgap-images = lib.mapAttrs (
+    _: k3s: import ./airgap-images.nix { inherit system pkgs k3s; }
+  ) allK3s;
+  auto-deploy = lib.mapAttrs (_: k3s: import ./auto-deploy.nix { inherit system pkgs k3s; }) allK3s;
+  containerd-config = lib.mapAttrs (
+    _: k3s: import ./containerd-config.nix { inherit system pkgs k3s; }
+  ) allK3s;
+  etcd = lib.mapAttrs (
+    _: k3s:
+    import ./etcd.nix {
+      inherit system pkgs k3s;
+      inherit (pkgs) etcd;
+    }
+  ) allK3s;
+  kubelet-config = lib.mapAttrs (
+    _: k3s: import ./kubelet-config.nix { inherit system pkgs k3s; }
+  ) allK3s;
   multi-node = lib.mapAttrs (_: k3s: import ./multi-node.nix { inherit system pkgs k3s; }) allK3s;
+  single-node = lib.mapAttrs (_: k3s: import ./single-node.nix { inherit system pkgs k3s; }) allK3s;
 }
diff --git a/nixos/tests/k3s/etcd.nix b/nixos/tests/k3s/etcd.nix
index d6e9a294adb13..2616ab02a6092 100644
--- a/nixos/tests/k3s/etcd.nix
+++ b/nixos/tests/k3s/etcd.nix
@@ -1,100 +1,125 @@
-import ../make-test-python.nix ({ pkgs, lib, k3s, etcd, ... }:
-
-{
-  name = "${k3s.name}-etcd";
-
-  nodes = {
-
-    etcd = { ... }: {
-      services.etcd = {
-        enable = true;
-        openFirewall = true;
-        listenClientUrls = [ "http://192.168.1.1:2379" "http://127.0.0.1:2379" ];
-        listenPeerUrls = [ "http://192.168.1.1:2380" ];
-        initialAdvertisePeerUrls = [ "http://192.168.1.1:2380" ];
-        initialCluster = [ "etcd=http://192.168.1.1:2380" ];
-      };
-      networking = {
-        useDHCP = false;
-        defaultGateway = "192.168.1.1";
-        interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
-          { address = "192.168.1.1"; prefixLength = 24; }
-        ];
-      };
-    };
+# Tests K3s with Etcd backend
+import ../make-test-python.nix (
+  {
+    pkgs,
+    lib,
+    k3s,
+    etcd,
+    ...
+  }:
+
+  {
+    name = "${k3s.name}-etcd";
+
+    nodes = {
+
+      etcd =
+        { ... }:
+        {
+          services.etcd = {
+            enable = true;
+            openFirewall = true;
+            listenClientUrls = [
+              "http://192.168.1.1:2379"
+              "http://127.0.0.1:2379"
+            ];
+            listenPeerUrls = [ "http://192.168.1.1:2380" ];
+            initialAdvertisePeerUrls = [ "http://192.168.1.1:2380" ];
+            initialCluster = [ "etcd=http://192.168.1.1:2380" ];
+          };
+          networking = {
+            useDHCP = false;
+            defaultGateway = "192.168.1.1";
+            interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+              {
+                address = "192.168.1.1";
+                prefixLength = 24;
+              }
+            ];
+          };
+        };
 
-    k3s = { pkgs, ... }: {
-      environment.systemPackages = with pkgs; [ jq ];
-      # k3s uses enough resources the default vm fails.
-      virtualisation.memorySize = 1536;
-      virtualisation.diskSize = 4096;
-
-      services.k3s = {
-        enable = true;
-        role = "server";
-        extraFlags = builtins.toString [
-          "--datastore-endpoint=\"http://192.168.1.1:2379\""
-          "--disable" "coredns"
-          "--disable" "local-storage"
-          "--disable" "metrics-server"
-          "--disable" "servicelb"
-          "--disable" "traefik"
-          "--node-ip" "192.168.1.2"
-        ];
-      };
-
-      networking = {
-        firewall = {
-          allowedTCPPorts = [ 2379 2380 6443 ];
-          allowedUDPPorts = [ 8472 ];
+      k3s =
+        { pkgs, ... }:
+        {
+          environment.systemPackages = with pkgs; [ jq ];
+          # k3s uses enough resources the default vm fails.
+          virtualisation.memorySize = 1536;
+          virtualisation.diskSize = 4096;
+
+          services.k3s = {
+            enable = true;
+            role = "server";
+            extraFlags = [
+              "--datastore-endpoint=\"http://192.168.1.1:2379\""
+              "--disable coredns"
+              "--disable local-storage"
+              "--disable metrics-server"
+              "--disable servicelb"
+              "--disable traefik"
+              "--node-ip 192.168.1.2"
+            ];
+          };
+
+          networking = {
+            firewall = {
+              allowedTCPPorts = [
+                2379
+                2380
+                6443
+              ];
+              allowedUDPPorts = [ 8472 ];
+            };
+            useDHCP = false;
+            defaultGateway = "192.168.1.2";
+            interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+              {
+                address = "192.168.1.2";
+                prefixLength = 24;
+              }
+            ];
+          };
         };
-        useDHCP = false;
-        defaultGateway = "192.168.1.2";
-        interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
-          { address = "192.168.1.2"; prefixLength = 24; }
-        ];
-      };
     };
 
-  };
-
-  testScript = ''
-    with subtest("should start etcd"):
-        etcd.start()
-        etcd.wait_for_unit("etcd.service")
+    testScript = ''
+      with subtest("should start etcd"):
+          etcd.start()
+          etcd.wait_for_unit("etcd.service")
 
-    with subtest("should wait for etcdctl endpoint status to succeed"):
-        etcd.wait_until_succeeds("etcdctl endpoint status")
+      with subtest("should wait for etcdctl endpoint status to succeed"):
+          etcd.wait_until_succeeds("etcdctl endpoint status")
 
-    with subtest("should start k3s"):
-        k3s.start()
-        k3s.wait_for_unit("k3s")
+      with subtest("should start k3s"):
+          k3s.start()
+          k3s.wait_for_unit("k3s")
 
-    with subtest("should test if kubectl works"):
-        k3s.wait_until_succeeds("k3s kubectl get node")
+      with subtest("should test if kubectl works"):
+          k3s.wait_until_succeeds("k3s kubectl get node")
 
-    with subtest("should wait for service account to show up; takes a sec"):
-        k3s.wait_until_succeeds("k3s kubectl get serviceaccount default")
+      with subtest("should wait for service account to show up; takes a sec"):
+          k3s.wait_until_succeeds("k3s kubectl get serviceaccount default")
 
-    with subtest("should create a sample secret object"):
-        k3s.succeed("k3s kubectl create secret generic nixossecret --from-literal thesecret=abacadabra")
+      with subtest("should create a sample secret object"):
+          k3s.succeed("k3s kubectl create secret generic nixossecret --from-literal thesecret=abacadabra")
 
-    with subtest("should check if secret is correct"):
-        k3s.wait_until_succeeds("[[ $(kubectl get secrets nixossecret -o json | jq -r .data.thesecret | base64 -d) == abacadabra ]]")
+      with subtest("should check if secret is correct"):
+          k3s.wait_until_succeeds("[[ $(kubectl get secrets nixossecret -o json | jq -r .data.thesecret | base64 -d) == abacadabra ]]")
 
-    with subtest("should have a secret in database"):
-        etcd.wait_until_succeeds("[[ $(etcdctl get /registry/secrets/default/nixossecret | head -c1 | wc -c) -ne 0 ]]")
+      with subtest("should have a secret in database"):
+          etcd.wait_until_succeeds("[[ $(etcdctl get /registry/secrets/default/nixossecret | head -c1 | wc -c) -ne 0 ]]")
 
-    with subtest("should delete the secret"):
-        k3s.succeed("k3s kubectl delete secret nixossecret")
+      with subtest("should delete the secret"):
+          k3s.succeed("k3s kubectl delete secret nixossecret")
 
-    with subtest("should not have a secret in database"):
-        etcd.wait_until_fails("[[ $(etcdctl get /registry/secrets/default/nixossecret | head -c1 | wc -c) -ne 0 ]]")
+      with subtest("should not have a secret in database"):
+          etcd.wait_until_fails("[[ $(etcdctl get /registry/secrets/default/nixossecret | head -c1 | wc -c) -ne 0 ]]")
 
-    with subtest("should shutdown k3s and etcd"):
-        k3s.shutdown()
-        etcd.shutdown()
-  '';
+      with subtest("should shutdown k3s and etcd"):
+          k3s.shutdown()
+          etcd.shutdown()
+    '';
 
-  meta.maintainers = etcd.meta.maintainers ++ k3s.meta.maintainers;
-})
+    meta.maintainers = etcd.meta.maintainers ++ lib.teams.k3s.members;
+  }
+)
diff --git a/nixos/tests/k3s/kubelet-config.nix b/nixos/tests/k3s/kubelet-config.nix
new file mode 100644
index 0000000000000..031c9f823a63b
--- /dev/null
+++ b/nixos/tests/k3s/kubelet-config.nix
@@ -0,0 +1,80 @@
+# A test that sets extra kubelet configuration and enables graceful node shutdown
+import ../make-test-python.nix (
+  {
+    pkgs,
+    lib,
+    k3s,
+    ...
+  }:
+  let
+    nodeName = "test";
+    shutdownGracePeriod = "1m13s";
+    shutdownGracePeriodCriticalPods = "13s";
+    podsPerCore = 3;
+    memoryThrottlingFactor = 0.69;
+    containerLogMaxSize = "5Mi";
+  in
+  {
+    name = "${k3s.name}-kubelet-config";
+    nodes.machine =
+      { pkgs, ... }:
+      {
+        environment.systemPackages = [ pkgs.jq ];
+
+        # k3s uses enough resources the default vm fails.
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.k3s = {
+          enable = true;
+          package = k3s;
+          # Slightly reduce resource usage
+          extraFlags = [
+            "--disable coredns"
+            "--disable local-storage"
+            "--disable metrics-server"
+            "--disable servicelb"
+            "--disable traefik"
+            "--node-name ${nodeName}"
+          ];
+          gracefulNodeShutdown = {
+            enable = true;
+            inherit shutdownGracePeriod shutdownGracePeriodCriticalPods;
+          };
+          extraKubeletConfig = {
+            inherit podsPerCore memoryThrottlingFactor containerLogMaxSize;
+          };
+        };
+      };
+
+    testScript = ''
+      import json
+
+      start_all()
+      machine.wait_for_unit("k3s")
+      # wait until the node is ready
+      machine.wait_until_succeeds(r"""kubectl get node ${nodeName} -ojson | jq -e '.status.conditions[] | select(.type == "Ready") | .status == "True"'""")
+      # test whether the kubelet registered an inhibitor lock
+      machine.succeed("systemd-inhibit --list --no-legend | grep \"kubelet.*k3s-server.*shutdown\"")
+      # run kubectl proxy in the background, close stdout through redirection to not wait for the command to finish
+      machine.execute("kubectl proxy --address 127.0.0.1 --port=8001 >&2 &")
+      machine.wait_until_succeeds("nc -z 127.0.0.1 8001")
+      # get the kubeletconfig
+      kubelet_config=json.loads(machine.succeed("curl http://127.0.0.1:8001/api/v1/nodes/${nodeName}/proxy/configz | jq '.kubeletconfig'"))
+
+      with subtest("Kubelet config values are set correctly"):
+        assert kubelet_config["shutdownGracePeriod"] == "${shutdownGracePeriod}", \
+          f"unexpected value for shutdownGracePeriod: {kubelet_config["shutdownGracePeriod"]}"
+        assert kubelet_config["shutdownGracePeriodCriticalPods"] == "${shutdownGracePeriodCriticalPods}", \
+          f"unexpected value for shutdownGracePeriodCriticalPods: {kubelet_config["shutdownGracePeriodCriticalPods"]}"
+        assert kubelet_config["podsPerCore"] == ${toString podsPerCore}, \
+          f"unexpected value for podsPerCore: {kubelet_config["podsPerCore"]}"
+        assert kubelet_config["memoryThrottlingFactor"] == ${toString memoryThrottlingFactor}, \
+          f"unexpected value for memoryThrottlingFactor: {kubelet_config["memoryThrottlingFactor"]}"
+        assert kubelet_config["containerLogMaxSize"] == "${containerLogMaxSize}", \
+          f"unexpected value for containerLogMaxSize: {kubelet_config["containerLogMaxSize"]}"
+    '';
+
+    meta.maintainers = lib.teams.k3s.members;
+  }
+)
diff --git a/nixos/tests/k3s/multi-node.nix b/nixos/tests/k3s/multi-node.nix
index 20279f3ca4b93..bc06ad858d8cb 100644
--- a/nixos/tests/k3s/multi-node.nix
+++ b/nixos/tests/k3s/multi-node.nix
@@ -1,14 +1,31 @@
-import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
+# A test that runs a multi-node k3s cluster and verify pod networking works across nodes
+import ../make-test-python.nix (
+  {
+    pkgs,
+    lib,
+    k3s,
+    ...
+  }:
   let
     imageEnv = pkgs.buildEnv {
       name = "k3s-pause-image-env";
-      paths = with pkgs; [ tini bashInteractive coreutils socat ];
+      paths = with pkgs; [
+        tini
+        bashInteractive
+        coreutils
+        socat
+      ];
     };
     pauseImage = pkgs.dockerTools.streamLayeredImage {
       name = "test.local/pause";
       tag = "local";
       contents = imageEnv;
-      config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
+      config.Entrypoint = [
+        "/bin/tini"
+        "--"
+        "/bin/sleep"
+        "inf"
+      ];
     };
     # A daemonset that responds 'server' on port 8000
     networkTestDaemonset = pkgs.writeText "test.yml" ''
@@ -42,94 +59,130 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
     name = "${k3s.name}-multi-node";
 
     nodes = {
-      server = { pkgs, ... }: {
-        environment.systemPackages = with pkgs; [ gzip jq ];
-        # k3s uses enough resources the default vm fails.
-        virtualisation.memorySize = 1536;
-        virtualisation.diskSize = 4096;
-
-        services.k3s = {
-          inherit tokenFile;
-          enable = true;
-          role = "server";
-          package = k3s;
-          clusterInit = true;
-          extraFlags = builtins.toString [
-            "--disable" "coredns"
-            "--disable" "local-storage"
-            "--disable" "metrics-server"
-            "--disable" "servicelb"
-            "--disable" "traefik"
-            "--node-ip" "192.168.1.1"
-            "--pause-image" "test.local/pause:local"
+      server =
+        { pkgs, ... }:
+        {
+          environment.systemPackages = with pkgs; [
+            gzip
+            jq
+          ];
+          # k3s uses enough resources the default vm fails.
+          virtualisation.memorySize = 1536;
+          virtualisation.diskSize = 4096;
+
+          services.k3s = {
+            inherit tokenFile;
+            enable = true;
+            role = "server";
+            package = k3s;
+            clusterInit = true;
+            extraFlags = [
+              "--disable coredns"
+              "--disable local-storage"
+              "--disable metrics-server"
+              "--disable servicelb"
+              "--disable traefik"
+              "--node-ip 192.168.1.1"
+              "--pause-image test.local/pause:local"
+            ];
+          };
+          networking.firewall.allowedTCPPorts = [
+            2379
+            2380
+            6443
+          ];
+          networking.firewall.allowedUDPPorts = [ 8472 ];
+          networking.firewall.trustedInterfaces = [ "flannel.1" ];
+          networking.useDHCP = false;
+          networking.defaultGateway = "192.168.1.1";
+          networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+            {
+              address = "192.168.1.1";
+              prefixLength = 24;
+            }
           ];
         };
-        networking.firewall.allowedTCPPorts = [ 2379 2380 6443 ];
-        networking.firewall.allowedUDPPorts = [ 8472 ];
-        networking.firewall.trustedInterfaces = [ "flannel.1" ];
-        networking.useDHCP = false;
-        networking.defaultGateway = "192.168.1.1";
-        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
-          { address = "192.168.1.1"; prefixLength = 24; }
-        ];
-      };
-
-      server2 = { pkgs, ... }: {
-        environment.systemPackages = with pkgs; [ gzip jq ];
-        virtualisation.memorySize = 1536;
-        virtualisation.diskSize = 4096;
-
-        services.k3s = {
-          inherit tokenFile;
-          enable = true;
-          serverAddr = "https://192.168.1.1:6443";
-          clusterInit = false;
-          extraFlags = builtins.toString [
-            "--disable" "coredns"
-            "--disable" "local-storage"
-            "--disable" "metrics-server"
-            "--disable" "servicelb"
-            "--disable" "traefik"
-            "--node-ip" "192.168.1.3"
-            "--pause-image" "test.local/pause:local"
+
+      server2 =
+        { pkgs, ... }:
+        {
+          environment.systemPackages = with pkgs; [
+            gzip
+            jq
+          ];
+          virtualisation.memorySize = 1536;
+          virtualisation.diskSize = 4096;
+
+          services.k3s = {
+            inherit tokenFile;
+            enable = true;
+            serverAddr = "https://192.168.1.1:6443";
+            clusterInit = false;
+            extraFlags = builtins.toString [
+              "--disable"
+              "coredns"
+              "--disable"
+              "local-storage"
+              "--disable"
+              "metrics-server"
+              "--disable"
+              "servicelb"
+              "--disable"
+              "traefik"
+              "--node-ip"
+              "192.168.1.3"
+              "--pause-image"
+              "test.local/pause:local"
+            ];
+          };
+          networking.firewall.allowedTCPPorts = [
+            2379
+            2380
+            6443
+          ];
+          networking.firewall.allowedUDPPorts = [ 8472 ];
+          networking.firewall.trustedInterfaces = [ "flannel.1" ];
+          networking.useDHCP = false;
+          networking.defaultGateway = "192.168.1.3";
+          networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+            {
+              address = "192.168.1.3";
+              prefixLength = 24;
+            }
           ];
         };
-        networking.firewall.allowedTCPPorts = [ 2379 2380 6443 ];
-        networking.firewall.allowedUDPPorts = [ 8472 ];
-        networking.firewall.trustedInterfaces = [ "flannel.1" ];
-        networking.useDHCP = false;
-        networking.defaultGateway = "192.168.1.3";
-        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
-          { address = "192.168.1.3"; prefixLength = 24; }
-        ];
-      };
-
-      agent = { pkgs, ... }: {
-        virtualisation.memorySize = 1024;
-        virtualisation.diskSize = 2048;
-        services.k3s = {
-          inherit tokenFile;
-          enable = true;
-          role = "agent";
-          serverAddr = "https://192.168.1.3:6443";
-          extraFlags = lib.concatStringsSep " " [
-            "--pause-image" "test.local/pause:local"
-            "--node-ip" "192.168.1.2"
+
+      agent =
+        { pkgs, ... }:
+        {
+          virtualisation.memorySize = 1024;
+          virtualisation.diskSize = 2048;
+          services.k3s = {
+            inherit tokenFile;
+            enable = true;
+            role = "agent";
+            serverAddr = "https://192.168.1.3:6443";
+            extraFlags = lib.concatStringsSep " " [
+              "--pause-image"
+              "test.local/pause:local"
+              "--node-ip"
+              "192.168.1.2"
+            ];
+          };
+          networking.firewall.allowedTCPPorts = [ 6443 ];
+          networking.firewall.allowedUDPPorts = [ 8472 ];
+          networking.firewall.trustedInterfaces = [ "flannel.1" ];
+          networking.useDHCP = false;
+          networking.defaultGateway = "192.168.1.2";
+          networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+            {
+              address = "192.168.1.2";
+              prefixLength = 24;
+            }
           ];
         };
-        networking.firewall.allowedTCPPorts = [ 6443 ];
-        networking.firewall.allowedUDPPorts = [ 8472 ];
-        networking.firewall.trustedInterfaces = [ "flannel.1" ];
-        networking.useDHCP = false;
-        networking.defaultGateway = "192.168.1.2";
-        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
-          { address = "192.168.1.2"; prefixLength = 24; }
-        ];
-      };
     };
 
-    meta.maintainers = k3s.meta.maintainers;
-
     testScript = ''
       machines = [server, server2, agent]
       for m in machines:
@@ -142,9 +195,7 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
       server.wait_until_succeeds("k3s kubectl get node agent")
 
       for m in machines:
-          # Fix-Me: Tests fail for 'aarch64-linux' as: "CONFIG_CGROUP_FREEZER: missing (fail)"
-          if not is_aarch64:
-              m.succeed("k3s check-config")
+          m.succeed("k3s check-config")
           m.succeed(
               "${pauseImage} | k3s ctr image import -"
           )
@@ -178,4 +229,7 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
       for m in machines:
           m.shutdown()
     '';
-  })
+
+    meta.maintainers = lib.teams.k3s.members;
+  }
+)
diff --git a/nixos/tests/k3s/single-node.nix b/nixos/tests/k3s/single-node.nix
index fd64a050e61ef..55a15324c88f5 100644
--- a/nixos/tests/k3s/single-node.nix
+++ b/nixos/tests/k3s/single-node.nix
@@ -1,14 +1,30 @@
-import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
+# A test that runs a single node k3s cluster and verify a pod can run
+import ../make-test-python.nix (
+  {
+    pkgs,
+    lib,
+    k3s,
+    ...
+  }:
   let
     imageEnv = pkgs.buildEnv {
       name = "k3s-pause-image-env";
-      paths = with pkgs; [ tini (hiPrio coreutils) busybox ];
+      paths = with pkgs; [
+        tini
+        (hiPrio coreutils)
+        busybox
+      ];
     };
     pauseImage = pkgs.dockerTools.streamLayeredImage {
       name = "test.local/pause";
       tag = "local";
       contents = imageEnv;
-      config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
+      config.Entrypoint = [
+        "/bin/tini"
+        "--"
+        "/bin/sleep"
+        "inf"
+      ];
     };
     testPodYaml = pkgs.writeText "test.yml" ''
       apiVersion: v1
@@ -25,36 +41,40 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
   in
   {
     name = "${k3s.name}-single-node";
-    meta.maintainers = k3s.meta.maintainers;
 
-    nodes.machine = { pkgs, ... }: {
-      environment.systemPackages = with pkgs; [ k3s gzip ];
+    nodes.machine =
+      { pkgs, ... }:
+      {
+        environment.systemPackages = with pkgs; [
+          k3s
+          gzip
+        ];
 
-      # k3s uses enough resources the default vm fails.
-      virtualisation.memorySize = 1536;
-      virtualisation.diskSize = 4096;
+        # k3s uses enough resources the default vm fails.
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
 
-      services.k3s.enable = true;
-      services.k3s.role = "server";
-      services.k3s.package = k3s;
-      # Slightly reduce resource usage
-      services.k3s.extraFlags = builtins.toString [
-        "--disable" "coredns"
-        "--disable" "local-storage"
-        "--disable" "metrics-server"
-        "--disable" "servicelb"
-        "--disable" "traefik"
-        "--pause-image" "test.local/pause:local"
-      ];
+        services.k3s.enable = true;
+        services.k3s.role = "server";
+        services.k3s.package = k3s;
+        # Slightly reduce resource usage
+        services.k3s.extraFlags = [
+          "--disable coredns"
+          "--disable local-storage"
+          "--disable metrics-server"
+          "--disable servicelb"
+          "--disable traefik"
+          "--pause-image test.local/pause:local"
+        ];
 
-      users.users = {
-        noprivs = {
-          isNormalUser = true;
-          description = "Can't access k3s by default";
-          password = "*";
+        users.users = {
+          noprivs = {
+            isNormalUser = true;
+            description = "Can't access k3s by default";
+            password = "*";
+          };
         };
       };
-    };
 
     testScript = ''
       start_all()
@@ -62,9 +82,7 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
       machine.wait_for_unit("k3s")
       machine.succeed("kubectl cluster-info")
       machine.fail("sudo -u noprivs kubectl cluster-info")
-      '' # Fix-Me: Tests fail for 'aarch64-linux' as: "CONFIG_CGROUP_FREEZER: missing (fail)"
-      + lib.optionalString (!pkgs.stdenv.isAarch64) ''machine.succeed("k3s check-config")'' + ''
-
+      machine.succeed("k3s check-config")
       machine.succeed(
           "${pauseImage} | ctr image import -"
       )
@@ -78,6 +96,21 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
       # regression test for #176445
       machine.fail("journalctl -o cat -u k3s.service | grep 'ipset utility not found'")
 
+      with subtest("Run k3s-killall"):
+          # Call the killall script with a clean path to assert that
+          # all required commands are wrapped
+          output = machine.succeed("PATH= ${k3s}/bin/k3s-killall.sh 2>&1 | tee /dev/stderr")
+          assert "command not found" not in output, "killall script contains unknown command"
+
+          # Check that killall cleaned up properly
+          machine.fail("systemctl is-active k3s.service")
+          machine.fail("systemctl list-units | grep containerd")
+          machine.fail("ip link show | awk -F': ' '{print $2}' | grep -e flannel -e cni0")
+          machine.fail("ip netns show | grep cni-")
+
       machine.shutdown()
     '';
-  })
+
+    meta.maintainers = lib.teams.k3s.members;
+  }
+)
diff --git a/nixos/tests/kafka.nix b/nixos/tests/kafka.nix
index f4f9827ab7b5f..5390e9d7f79f1 100644
--- a/nixos/tests/kafka.nix
+++ b/nixos/tests/kafka.nix
@@ -103,13 +103,9 @@ let
   }) { inherit system; });
 
 in with pkgs; {
-  kafka_2_8 = makeKafkaTest "kafka_2_8" { kafkaPackage = apacheKafka_2_8; };
-  kafka_3_0 = makeKafkaTest "kafka_3_0" { kafkaPackage = apacheKafka_3_0; };
-  kafka_3_1 = makeKafkaTest "kafka_3_1" { kafkaPackage = apacheKafka_3_1; };
-  kafka_3_2 = makeKafkaTest "kafka_3_2" { kafkaPackage = apacheKafka_3_2; };
-  kafka_3_3 = makeKafkaTest "kafka_3_3" { kafkaPackage = apacheKafka_3_3; };
-  kafka_3_4 = makeKafkaTest "kafka_3_4" { kafkaPackage = apacheKafka_3_4; };
-  kafka_3_5 = makeKafkaTest "kafka_3_5" { kafkaPackage = apacheKafka_3_5; };
+  kafka_3_6 = makeKafkaTest "kafka_3_6" { kafkaPackage = apacheKafka_3_6; };
+  kafka_3_7 = makeKafkaTest "kafka_3_7" { kafkaPackage = apacheKafka_3_7; };
+  kafka_3_8 = makeKafkaTest "kafka_3_8" { kafkaPackage = apacheKafka_3_8; };
   kafka = makeKafkaTest "kafka" { kafkaPackage = apacheKafka; };
   kafka_kraft = makeKafkaTest "kafka_kraft" { kafkaPackage = apacheKafka; mode = "kraft"; };
 }
diff --git a/nixos/tests/kanidm-provisioning.nix b/nixos/tests/kanidm-provisioning.nix
new file mode 100644
index 0000000000000..27176c2086fec
--- /dev/null
+++ b/nixos/tests/kanidm-provisioning.nix
@@ -0,0 +1,518 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+  let
+    certs = import ./common/acme/server/snakeoil-certs.nix;
+    serverDomain = certs.domain;
+
+    # copy certs to store to work around mount namespacing
+    certsPath = pkgs.runCommandNoCC "snakeoil-certs" { } ''
+      mkdir $out
+      cp ${certs."${serverDomain}".cert} $out/snakeoil.crt
+      cp ${certs."${serverDomain}".key} $out/snakeoil.key
+    '';
+
+    provisionAdminPassword = "very-strong-password-for-admin";
+    provisionIdmAdminPassword = "very-strong-password-for-idm-admin";
+    provisionIdmAdminPassword2 = "very-strong-alternative-password-for-idm-admin";
+  in
+  {
+    name = "kanidm-provisioning";
+    meta.maintainers = with pkgs.lib.maintainers; [ oddlama ];
+
+    nodes.provision =
+      { pkgs, lib, ... }:
+      {
+        services.kanidm = {
+          package = pkgs.kanidm.withSecretProvisioning;
+          enableServer = true;
+          serverSettings = {
+            origin = "https://${serverDomain}";
+            domain = serverDomain;
+            bindaddress = "[::]:443";
+            ldapbindaddress = "[::1]:636";
+            tls_chain = "${certsPath}/snakeoil.crt";
+            tls_key = "${certsPath}/snakeoil.key";
+          };
+          # So we can check whether provisioning did what we wanted
+          enableClient = true;
+          clientSettings = {
+            uri = "https://${serverDomain}";
+            verify_ca = true;
+            verify_hostnames = true;
+          };
+        };
+
+        specialisation.credentialProvision.configuration =
+          { ... }:
+          {
+            services.kanidm.provision = lib.mkForce {
+              enable = true;
+              adminPasswordFile = pkgs.writeText "admin-pw" provisionAdminPassword;
+              idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword;
+            };
+          };
+
+        specialisation.changedCredential.configuration =
+          { ... }:
+          {
+            services.kanidm.provision = lib.mkForce {
+              enable = true;
+              idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword2;
+            };
+          };
+
+        specialisation.addEntities.configuration =
+          { ... }:
+          {
+            services.kanidm.provision = lib.mkForce {
+              enable = true;
+              # Test whether credential recovery works without specific idmAdmin password
+              #idmAdminPasswordFile =
+
+              groups.supergroup1 = {
+                members = [ "testgroup1" ];
+              };
+
+              groups.testgroup1 = { };
+
+              persons.testuser1 = {
+                displayName = "Test User";
+                legalName = "Jane Doe";
+                mailAddresses = [ "jane.doe@example.com" ];
+                groups = [
+                  "testgroup1"
+                  "service1-access"
+                ];
+              };
+
+              persons.testuser2 = {
+                displayName = "Powerful Test User";
+                legalName = "Ryouiki Tenkai";
+                groups = [ "service1-admin" ];
+              };
+
+              groups.service1-access = { };
+              groups.service1-admin = { };
+              systems.oauth2.service1 = {
+                displayName = "Service One";
+                originUrl = "https://one.example.com/";
+                originLanding = "https://one.example.com/landing";
+                basicSecretFile = pkgs.writeText "bs-service1" "very-strong-secret-for-service1";
+                scopeMaps.service1-access = [
+                  "openid"
+                  "email"
+                  "profile"
+                ];
+                supplementaryScopeMaps.service1-admin = [ "admin" ];
+                claimMaps.groups = {
+                  valuesByGroup.service1-admin = [ "admin" ];
+                };
+              };
+
+              systems.oauth2.service2 = {
+                displayName = "Service Two";
+                originUrl = "https://two.example.com/";
+                originLanding = "https://landing2.example.com/";
+                # Test not setting secret
+                # basicSecretFile =
+                allowInsecureClientDisablePkce = true;
+                preferShortUsername = true;
+              };
+            };
+          };
+
+        specialisation.changeAttributes.configuration =
+          { ... }:
+          {
+            services.kanidm.provision = lib.mkForce {
+              enable = true;
+              # Changing admin credentials at any time should not be a problem:
+              idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword;
+
+              groups.supergroup1 = {
+                #members = ["testgroup1"];
+              };
+
+              groups.testgroup1 = { };
+
+              persons.testuser1 = {
+                displayName = "Test User (changed)";
+                legalName = "Jane Doe (changed)";
+                mailAddresses = [
+                  "jane.doe@example.com"
+                  "second.doe@example.com"
+                ];
+                groups = [
+                  #"testgroup1"
+                  "service1-access"
+                ];
+              };
+
+              persons.testuser2 = {
+                displayName = "Powerful Test User (changed)";
+                legalName = "Ryouiki Tenkai (changed)";
+                groups = [ "service1-admin" ];
+              };
+
+              groups.service1-access = { };
+              groups.service1-admin = { };
+              systems.oauth2.service1 = {
+                displayName = "Service One (changed)";
+                # multiple origin urls
+                originUrl = [
+                  "https://changed-one.example.com/"
+                  "https://changed-one.example.org/"
+                ];
+                originLanding = "https://changed-one.example.com/landing-changed";
+                basicSecretFile = pkgs.writeText "bs-service1" "changed-very-strong-secret-for-service1";
+                scopeMaps.service1-access = [
+                  "openid"
+                  "email"
+                  #"profile"
+                ];
+                supplementaryScopeMaps.service1-admin = [ "adminchanged" ];
+                claimMaps.groups = {
+                  valuesByGroup.service1-admin = [ "adminchanged" ];
+                };
+              };
+
+              systems.oauth2.service2 = {
+                displayName = "Service Two (changed)";
+                originUrl = "https://changed-two.example.com/";
+                originLanding = "https://changed-landing2.example.com/";
+                # Test not setting secret
+                # basicSecretFile =
+                allowInsecureClientDisablePkce = false;
+                preferShortUsername = false;
+              };
+            };
+          };
+
+        specialisation.removeAttributes.configuration =
+          { ... }:
+          {
+            services.kanidm.provision = lib.mkForce {
+              enable = true;
+              idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword;
+
+              groups.supergroup1 = { };
+
+              persons.testuser1 = {
+                displayName = "Test User (changed)";
+              };
+
+              persons.testuser2 = {
+                displayName = "Powerful Test User (changed)";
+                groups = [ "service1-admin" ];
+              };
+
+              groups.service1-access = { };
+              groups.service1-admin = { };
+              systems.oauth2.service1 = {
+                displayName = "Service One (changed)";
+                originUrl = "https://changed-one.example.com/";
+                originLanding = "https://changed-one.example.com/landing-changed";
+                basicSecretFile = pkgs.writeText "bs-service1" "changed-very-strong-secret-for-service1";
+                # Removing maps requires setting them to the empty list
+                scopeMaps.service1-access = [ ];
+                supplementaryScopeMaps.service1-admin = [ ];
+              };
+
+              systems.oauth2.service2 = {
+                displayName = "Service Two (changed)";
+                originUrl = "https://changed-two.example.com/";
+                originLanding = "https://changed-landing2.example.com/";
+              };
+            };
+          };
+
+        specialisation.removeEntities.configuration =
+          { ... }:
+          {
+            services.kanidm.provision = lib.mkForce {
+              enable = true;
+              idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword;
+            };
+          };
+
+        security.pki.certificateFiles = [ certs.ca.cert ];
+
+        networking.hosts."::1" = [ serverDomain ];
+        networking.firewall.allowedTCPPorts = [ 443 ];
+
+        users.users.kanidm.shell = pkgs.bashInteractive;
+
+        environment.systemPackages = with pkgs; [
+          kanidm
+          openldap
+          ripgrep
+          jq
+        ];
+      };
+
+    testScript =
+      { nodes, ... }:
+      let
+        # We need access to the config file in the test script.
+        filteredConfig = pkgs.lib.converge (pkgs.lib.filterAttrsRecursive (
+          _: v: v != null
+        )) nodes.provision.services.kanidm.serverSettings;
+        serverConfigFile = (pkgs.formats.toml { }).generate "server.toml" filteredConfig;
+
+        specialisations = "${nodes.provision.system.build.toplevel}/specialisation";
+      in
+      ''
+        import re
+
+        def assert_contains(haystack, needle):
+            if needle not in haystack:
+                print("The haystack that will cause the following exception is:")
+                print("---")
+                print(haystack)
+                print("---")
+                raise Exception(f"Expected string '{needle}' was not found")
+
+        def assert_matches(haystack, expr):
+            if not re.search(expr, haystack):
+                print("The haystack that will cause the following exception is:")
+                print("---")
+                print(haystack)
+                print("---")
+                raise Exception(f"Expected regex '{expr}' did not match")
+
+        def assert_lacks(haystack, needle):
+            if needle in haystack:
+                print("The haystack that will cause the following exception is:")
+                print("---")
+                print(haystack, end="")
+                print("---")
+                raise Exception(f"Unexpected string '{needle}' was found")
+
+        provision.start()
+
+        def provision_login(pw):
+            provision.wait_for_unit("kanidm.service")
+            provision.wait_until_succeeds("curl -Lsf https://${serverDomain} | grep Kanidm")
+            if pw is None:
+                pw = provision.succeed("su - kanidm -c 'kanidmd recover-account -c ${serverConfigFile} idm_admin 2>&1 | rg -o \'[A-Za-z0-9]{48}\' '").strip().removeprefix("'").removesuffix("'")
+            out = provision.succeed(f"KANIDM_PASSWORD={pw} kanidm login -D idm_admin")
+            assert_contains(out, "Login Success for idm_admin")
+
+        with subtest("Test Provisioning - setup"):
+            provision_login(None)
+            provision.succeed("kanidm logout -D idm_admin")
+
+        with subtest("Test Provisioning - credentialProvision"):
+            provision.succeed('${specialisations}/credentialProvision/bin/switch-to-configuration test')
+            provision_login("${provisionIdmAdminPassword}")
+
+            # Test provisioned admin pw
+            out = provision.succeed("KANIDM_PASSWORD=${provisionAdminPassword} kanidm login -D admin")
+            assert_contains(out, "Login Success for admin")
+            provision.succeed("kanidm logout -D admin")
+            provision.succeed("kanidm logout -D idm_admin")
+
+        with subtest("Test Provisioning - changedCredential"):
+            provision.succeed('${specialisations}/changedCredential/bin/switch-to-configuration test')
+            provision_login("${provisionIdmAdminPassword2}")
+            provision.succeed("kanidm logout -D idm_admin")
+
+        with subtest("Test Provisioning - addEntities"):
+            provision.succeed('${specialisations}/addEntities/bin/switch-to-configuration test')
+            # Unspecified idm admin password
+            provision_login(None)
+
+            out = provision.succeed("kanidm group get testgroup1")
+            assert_contains(out, "name: testgroup1")
+
+            out = provision.succeed("kanidm group get supergroup1")
+            assert_contains(out, "name: supergroup1")
+            assert_contains(out, "member: testgroup1")
+
+            out = provision.succeed("kanidm person get testuser1")
+            assert_contains(out, "name: testuser1")
+            assert_contains(out, "displayname: Test User")
+            assert_contains(out, "legalname: Jane Doe")
+            assert_contains(out, "mail: jane.doe@example.com")
+            assert_contains(out, "memberof: testgroup1")
+            assert_contains(out, "memberof: service1-access")
+
+            out = provision.succeed("kanidm person get testuser2")
+            assert_contains(out, "name: testuser2")
+            assert_contains(out, "displayname: Powerful Test User")
+            assert_contains(out, "legalname: Ryouiki Tenkai")
+            assert_contains(out, "memberof: service1-admin")
+            assert_lacks(out, "mail:")
+
+            out = provision.succeed("kanidm group get service1-access")
+            assert_contains(out, "name: service1-access")
+
+            out = provision.succeed("kanidm group get service1-admin")
+            assert_contains(out, "name: service1-admin")
+
+            out = provision.succeed("kanidm system oauth2 get service1")
+            assert_contains(out, "name: service1")
+            assert_contains(out, "displayname: Service One")
+            assert_contains(out, "oauth2_rs_origin: https://one.example.com/")
+            assert_contains(out, "oauth2_rs_origin_landing: https://one.example.com/landing")
+            assert_matches(out, 'oauth2_rs_scope_map: service1-access.*{"email", "openid", "profile"}')
+            assert_matches(out, 'oauth2_rs_sup_scope_map: service1-admin.*{"admin"}')
+            assert_matches(out, 'oauth2_rs_claim_map: groups:.*"admin"')
+
+            out = provision.succeed("kanidm system oauth2 show-basic-secret service1")
+            assert_contains(out, "very-strong-secret-for-service1")
+
+            out = provision.succeed("kanidm system oauth2 get service2")
+            assert_contains(out, "name: service2")
+            assert_contains(out, "displayname: Service Two")
+            assert_contains(out, "oauth2_rs_origin: https://two.example.com/")
+            assert_contains(out, "oauth2_rs_origin_landing: https://landing2.example.com/")
+            assert_contains(out, "oauth2_allow_insecure_client_disable_pkce: true")
+            assert_contains(out, "oauth2_prefer_short_username: true")
+
+            provision.succeed("kanidm logout -D idm_admin")
+
+        with subtest("Test Provisioning - changeAttributes"):
+            provision.succeed('${specialisations}/changeAttributes/bin/switch-to-configuration test')
+            provision_login("${provisionIdmAdminPassword}")
+
+            out = provision.succeed("kanidm group get testgroup1")
+            assert_contains(out, "name: testgroup1")
+
+            out = provision.succeed("kanidm group get supergroup1")
+            assert_contains(out, "name: supergroup1")
+            assert_lacks(out, "member: testgroup1")
+
+            out = provision.succeed("kanidm person get testuser1")
+            assert_contains(out, "name: testuser1")
+            assert_contains(out, "displayname: Test User (changed)")
+            assert_contains(out, "legalname: Jane Doe (changed)")
+            assert_contains(out, "mail: jane.doe@example.com")
+            assert_contains(out, "mail: second.doe@example.com")
+            assert_lacks(out, "memberof: testgroup1")
+            assert_contains(out, "memberof: service1-access")
+
+            out = provision.succeed("kanidm person get testuser2")
+            assert_contains(out, "name: testuser2")
+            assert_contains(out, "displayname: Powerful Test User (changed)")
+            assert_contains(out, "legalname: Ryouiki Tenkai (changed)")
+            assert_contains(out, "memberof: service1-admin")
+            assert_lacks(out, "mail:")
+
+            out = provision.succeed("kanidm group get service1-access")
+            assert_contains(out, "name: service1-access")
+
+            out = provision.succeed("kanidm group get service1-admin")
+            assert_contains(out, "name: service1-admin")
+
+            out = provision.succeed("kanidm system oauth2 get service1")
+            assert_contains(out, "name: service1")
+            assert_contains(out, "displayname: Service One (changed)")
+            assert_contains(out, "oauth2_rs_origin: https://changed-one.example.com/")
+            assert_contains(out, "oauth2_rs_origin: https://changed-one.example.org/")
+            assert_contains(out, "oauth2_rs_origin_landing: https://changed-one.example.com/landing")
+            assert_matches(out, 'oauth2_rs_scope_map: service1-access.*{"email", "openid"}')
+            assert_matches(out, 'oauth2_rs_sup_scope_map: service1-admin.*{"adminchanged"}')
+            assert_matches(out, 'oauth2_rs_claim_map: groups:.*"adminchanged"')
+
+            out = provision.succeed("kanidm system oauth2 show-basic-secret service1")
+            assert_contains(out, "changed-very-strong-secret-for-service1")
+
+            out = provision.succeed("kanidm system oauth2 get service2")
+            assert_contains(out, "name: service2")
+            assert_contains(out, "displayname: Service Two (changed)")
+            assert_contains(out, "oauth2_rs_origin: https://changed-two.example.com/")
+            assert_contains(out, "oauth2_rs_origin_landing: https://changed-landing2.example.com/")
+            assert_lacks(out, "oauth2_allow_insecure_client_disable_pkce: true")
+            assert_lacks(out, "oauth2_prefer_short_username: true")
+
+            provision.succeed("kanidm logout -D idm_admin")
+
+        with subtest("Test Provisioning - removeAttributes"):
+            provision.succeed('${specialisations}/removeAttributes/bin/switch-to-configuration test')
+            provision_login("${provisionIdmAdminPassword}")
+
+            out = provision.succeed("kanidm group get testgroup1")
+            assert_lacks(out, "name: testgroup1")
+
+            out = provision.succeed("kanidm group get supergroup1")
+            assert_contains(out, "name: supergroup1")
+            assert_lacks(out, "member: testgroup1")
+
+            out = provision.succeed("kanidm person get testuser1")
+            assert_contains(out, "name: testuser1")
+            assert_contains(out, "displayname: Test User (changed)")
+            assert_lacks(out, "legalname: Jane Doe (changed)")
+            assert_lacks(out, "mail: jane.doe@example.com")
+            assert_lacks(out, "mail: second.doe@example.com")
+            assert_lacks(out, "memberof: testgroup1")
+            assert_lacks(out, "memberof: service1-access")
+
+            out = provision.succeed("kanidm person get testuser2")
+            assert_contains(out, "name: testuser2")
+            assert_contains(out, "displayname: Powerful Test User (changed)")
+            assert_lacks(out, "legalname: Ryouiki Tenkai (changed)")
+            assert_contains(out, "memberof: service1-admin")
+            assert_lacks(out, "mail:")
+
+            out = provision.succeed("kanidm group get service1-access")
+            assert_contains(out, "name: service1-access")
+
+            out = provision.succeed("kanidm group get service1-admin")
+            assert_contains(out, "name: service1-admin")
+
+            out = provision.succeed("kanidm system oauth2 get service1")
+            assert_contains(out, "name: service1")
+            assert_contains(out, "displayname: Service One (changed)")
+            assert_contains(out, "oauth2_rs_origin: https://changed-one.example.com/")
+            assert_lacks(out, "oauth2_rs_origin: https://changed-one.example.org/")
+            assert_contains(out, "oauth2_rs_origin_landing: https://changed-one.example.com/landing")
+            assert_lacks(out, "oauth2_rs_scope_map")
+            assert_lacks(out, "oauth2_rs_sup_scope_map")
+            assert_lacks(out, "oauth2_rs_claim_map")
+
+            out = provision.succeed("kanidm system oauth2 show-basic-secret service1")
+            assert_contains(out, "changed-very-strong-secret-for-service1")
+
+            out = provision.succeed("kanidm system oauth2 get service2")
+            assert_contains(out, "name: service2")
+            assert_contains(out, "displayname: Service Two (changed)")
+            assert_contains(out, "oauth2_rs_origin: https://changed-two.example.com/")
+            assert_contains(out, "oauth2_rs_origin_landing: https://changed-landing2.example.com/")
+            assert_lacks(out, "oauth2_allow_insecure_client_disable_pkce: true")
+            assert_lacks(out, "oauth2_prefer_short_username: true")
+
+            provision.succeed("kanidm logout -D idm_admin")
+
+        with subtest("Test Provisioning - removeEntities"):
+            provision.succeed('${specialisations}/removeEntities/bin/switch-to-configuration test')
+            provision_login("${provisionIdmAdminPassword}")
+
+            out = provision.succeed("kanidm group get testgroup1")
+            assert_lacks(out, "name: testgroup1")
+
+            out = provision.succeed("kanidm group get supergroup1")
+            assert_lacks(out, "name: supergroup1")
+
+            out = provision.succeed("kanidm person get testuser1")
+            assert_lacks(out, "name: testuser1")
+
+            out = provision.succeed("kanidm person get testuser2")
+            assert_lacks(out, "name: testuser2")
+
+            out = provision.succeed("kanidm group get service1-access")
+            assert_lacks(out, "name: service1-access")
+
+            out = provision.succeed("kanidm group get service1-admin")
+            assert_lacks(out, "name: service1-admin")
+
+            out = provision.succeed("kanidm system oauth2 get service1")
+            assert_lacks(out, "name: service1")
+
+            out = provision.succeed("kanidm system oauth2 get service2")
+            assert_lacks(out, "name: service2")
+
+            provision.succeed("kanidm logout -D idm_admin")
+      '';
+  }
+)
diff --git a/nixos/tests/kanidm.nix b/nixos/tests/kanidm.nix
index 8ed9af63f1d41..7e2fce20857a0 100644
--- a/nixos/tests/kanidm.nix
+++ b/nixos/tests/kanidm.nix
@@ -6,12 +6,19 @@ import ./make-test-python.nix ({ pkgs, ... }:
     testCredentials = {
       password = "Password1_cZPEwpCWvrReripJmAZdmVIZd8HHoHcl";
     };
+
+    # copy certs to store to work around mount namespacing
+    certsPath = pkgs.runCommandNoCC "snakeoil-certs" { } ''
+      mkdir $out
+      cp ${certs."${serverDomain}".cert} $out/snakeoil.crt
+      cp ${certs."${serverDomain}".key} $out/snakeoil.key
+    '';
   in
   {
     name = "kanidm";
-    meta.maintainers = with pkgs.lib.maintainers; [ erictapen Flakebi ];
+    meta.maintainers = with pkgs.lib.maintainers; [ erictapen Flakebi oddlama ];
 
-    nodes.server = { config, pkgs, lib, ... }: {
+    nodes.server = { pkgs, ... }: {
       services.kanidm = {
         enableServer = true;
         serverSettings = {
@@ -19,8 +26,8 @@ import ./make-test-python.nix ({ pkgs, ... }:
           domain = serverDomain;
           bindaddress = "[::]:443";
           ldapbindaddress = "[::1]:636";
-          tls_chain = certs."${serverDomain}".cert;
-          tls_key = certs."${serverDomain}".key;
+          tls_chain = "${certsPath}/snakeoil.crt";
+          tls_key = "${certsPath}/snakeoil.key";
         };
       };
 
@@ -34,7 +41,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
       environment.systemPackages = with pkgs; [ kanidm openldap ripgrep ];
     };
 
-    nodes.client = { pkgs, nodes, ... }: {
+    nodes.client = { nodes, ... }: {
       services.kanidm = {
         enableClient = true;
         clientSettings = {
@@ -62,10 +69,10 @@ import ./make-test-python.nix ({ pkgs, ... }:
           (pkgs.lib.filterAttrsRecursive (_: v: v != null))
           nodes.server.services.kanidm.serverSettings;
         serverConfigFile = (pkgs.formats.toml { }).generate "server.toml" filteredConfig;
-
       in
       ''
-        start_all()
+        server.start()
+        client.start()
         server.wait_for_unit("kanidm.service")
         client.systemctl("start network-online.target")
         client.wait_for_unit("network-online.target")
@@ -122,5 +129,8 @@ import ./make-test-python.nix ({ pkgs, ... }:
             client.wait_until_succeeds("systemctl is-active user@$(id -u testuser).service")
             client.send_chars("touch done\n")
             client.wait_for_file("/home/testuser@${serverDomain}/done")
+
+        server.shutdown()
+        client.shutdown()
       '';
   })
diff --git a/nixos/tests/kea.nix b/nixos/tests/kea.nix
index 98a8e93a07609..653e280ec8b7e 100644
--- a/nixos/tests/kea.nix
+++ b/nixos/tests/kea.nix
@@ -57,6 +57,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...}: {
           };
 
           subnet4 = [ {
+            id = 1;
             subnet = "10.0.0.0/29";
             pools = [ {
               pool = "10.0.0.3 - 10.0.0.3";
diff --git a/nixos/tests/keepalived.nix b/nixos/tests/keepalived.nix
index 16564511d85dc..052b36266d037 100644
--- a/nixos/tests/keepalived.nix
+++ b/nixos/tests/keepalived.nix
@@ -4,8 +4,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
 
   nodes = {
     node1 = { pkgs, ... }: {
-      networking.firewall.extraCommands = "iptables -A INPUT -p vrrp -j ACCEPT";
       services.keepalived.enable = true;
+      services.keepalived.openFirewall = true;
       services.keepalived.vrrpInstances.test = {
         interface = "eth1";
         state = "MASTER";
@@ -16,8 +16,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       environment.systemPackages = [ pkgs.tcpdump ];
     };
     node2 = { pkgs, ... }: {
-      networking.firewall.extraCommands = "iptables -A INPUT -p vrrp -j ACCEPT";
       services.keepalived.enable = true;
+      services.keepalived.openFirewall = true;
       services.keepalived.vrrpInstances.test = {
         interface = "eth1";
         state = "MASTER";
diff --git a/nixos/tests/kerberos/heimdal.nix b/nixos/tests/kerberos/heimdal.nix
index 393289f7a92ca..098080a84592e 100644
--- a/nixos/tests/kerberos/heimdal.nix
+++ b/nixos/tests/kerberos/heimdal.nix
@@ -4,7 +4,7 @@ import ../make-test-python.nix ({pkgs, ...}: {
   nodes.machine = { config, libs, pkgs, ...}:
   { services.kerberos_server =
     { enable = true;
-      realms = {
+      settings.realms = {
         "FOO.BAR".acl = [{principal = "admin"; access = ["add" "cpw"];}];
       };
     };
diff --git a/nixos/tests/kerberos/mit.nix b/nixos/tests/kerberos/mit.nix
index 1191d047abbf0..172261f95fe6b 100644
--- a/nixos/tests/kerberos/mit.nix
+++ b/nixos/tests/kerberos/mit.nix
@@ -4,7 +4,7 @@ import ../make-test-python.nix ({pkgs, ...}: {
   nodes.machine = { config, libs, pkgs, ...}:
   { services.kerberos_server =
     { enable = true;
-      realms = {
+      settings.realms = {
         "FOO.BAR".acl = [{principal = "admin"; access = ["add" "cpw"];}];
       };
     };
diff --git a/nixos/tests/kernel-generic.nix b/nixos/tests/kernel-generic.nix
index 5f0e7b3e37cd7..e5d3b36642e72 100644
--- a/nixos/tests/kernel-generic.nix
+++ b/nixos/tests/kernel-generic.nix
@@ -23,9 +23,8 @@ let
         assert "${linuxPackages.kernel.modDirVersion}" in machine.succeed("uname -a")
       '';
   }) args);
-  kernels = (removeAttrs pkgs.linuxKernel.vanillaPackages ["__attrsFailEvaluation"]) // {
+  kernels = pkgs.linuxKernel.vanillaPackages // {
     inherit (pkgs.linuxKernel.packages)
-      linux_4_19_hardened
       linux_5_4_hardened
       linux_5_10_hardened
       linux_5_15_hardened
@@ -35,6 +34,7 @@ let
       linux_rt_5_10
       linux_rt_5_15
       linux_rt_6_1
+      linux_rt_6_6
       linux_libre
 
       linux_testing;
@@ -44,6 +44,9 @@ in mapAttrs (_: lP: testsForLinuxPackages lP) kernels // {
   passthru = {
     inherit testsForLinuxPackages;
 
+    # Useful for development testing of all Kernel configs without building full Kernel
+    configfiles = mapAttrs (_: lP: lP.kernel.configfile) kernels;
+
     testsForKernel = kernel: testsForLinuxPackages (pkgs.linuxPackagesFor kernel);
   };
 }
diff --git a/nixos/tests/knot.nix b/nixos/tests/knot.nix
index eec94a22f2fa7..4441fed6ef507 100644
--- a/nixos/tests/knot.nix
+++ b/nixos/tests/knot.nix
@@ -190,6 +190,10 @@ in {
     primary.wait_for_unit("knot.service")
     secondary.wait_for_unit("knot.service")
 
+    for zone in ("example.com.", "sub.example.com."):
+        secondary.wait_until_succeeds(
+          f"knotc zone-status {zone} | grep -q 'serial: 2019031302'"
+        )
 
     def test(host, query_type, query, pattern):
         out = client.succeed(f"khost -t {query_type} {query} {host}").strip()
diff --git a/nixos/tests/kubernetes/base.nix b/nixos/tests/kubernetes/base.nix
index ba7b2d9b1d2de..870142017a31b 100644
--- a/nixos/tests/kubernetes/base.nix
+++ b/nixos/tests/kubernetes/base.nix
@@ -47,7 +47,7 @@ let
                   '') (attrValues nodes);
                 };
               };
-              programs.bash.enableCompletion = true;
+              programs.bash.completion.enable = true;
               environment.systemPackages = [ wrapKubectl ];
               services.flannel.iface = "eth1";
               services.kubernetes = {
@@ -59,6 +59,10 @@ let
                   securePort = 443;
                   advertiseAddress = master.ip;
                 };
+                # NOTE: what featureGates are useful for testing might change in
+                # the future, see link below to find new ones
+                # https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/
+                featureGates = {AnonymousAuthConfigurableEndpoints = true; ConsistentListFromCache = false;};
                 masterAddress = "${masterName}.${config.networking.domain}";
               };
             }
diff --git a/nixos/tests/kubo/default.nix b/nixos/tests/kubo/default.nix
index d8c0c69dc1fbd..629922fc366db 100644
--- a/nixos/tests/kubo/default.nix
+++ b/nixos/tests/kubo/default.nix
@@ -1,7 +1,5 @@
 { recurseIntoAttrs, runTest }:
 recurseIntoAttrs {
   kubo = runTest ./kubo.nix;
-  # The FUSE functionality is completely broken since Kubo v0.24.0
-  # See https://github.com/ipfs/kubo/issues/10242
-  # kubo-fuse = runTest ./kubo-fuse.nix;
+  kubo-fuse = runTest ./kubo-fuse.nix;
 }
diff --git a/nixos/tests/kubo/kubo-fuse.nix b/nixos/tests/kubo/kubo-fuse.nix
index 71a5bf61649f6..c8c273fc0dfc7 100644
--- a/nixos/tests/kubo/kubo-fuse.nix
+++ b/nixos/tests/kubo/kubo-fuse.nix
@@ -23,7 +23,7 @@
 
     with subtest("FUSE mountpoint"):
         machine.fail("echo a | su bob -l -c 'ipfs add --quieter'")
-        # The FUSE mount functionality is broken as of v0.13.0 and v0.17.0.
+        # The FUSE mount functionality is broken as of v0.13.0. This is still the case with v0.29.0.
         # See https://github.com/ipfs/kubo/issues/9044.
         # Workaround: using CID Version 1 avoids that.
         ipfs_hash = machine.succeed(
diff --git a/nixos/tests/ladybird.nix b/nixos/tests/ladybird.nix
index 8ed0f47887c7d..85c23353a668a 100644
--- a/nixos/tests/ladybird.nix
+++ b/nixos/tests/ladybird.nix
@@ -10,9 +10,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     ];
 
     services.xserver.enable = true;
-    environment.systemPackages = [
-      pkgs.ladybird
-    ];
+    programs.ladybird.enable = true;
   };
 
   enableOCR = true;
diff --git a/nixos/tests/lemmy.nix b/nixos/tests/lemmy.nix
index d93df3646837f..66bdaffbe29e3 100644
--- a/nixos/tests/lemmy.nix
+++ b/nixos/tests/lemmy.nix
@@ -51,8 +51,8 @@ in
 
     with subtest("the backend starts and responds"):
         server.wait_for_open_port(${toString backendPort})
-        # wait until succeeds, it just needs few seconds for migrations, but lets give it 10s max
-        server.wait_until_succeeds("curl --fail localhost:${toString backendPort}/api/v3/site", 10)
+        # wait until succeeds, it just needs few seconds for migrations, but lets give it 50s max
+        server.wait_until_succeeds("curl --fail localhost:${toString backendPort}/api/v3/site", 50)
 
     with subtest("the UI starts and responds"):
         server.wait_for_unit("lemmy-ui.service")
@@ -77,7 +77,7 @@ in
         server.execute("systemctl stop lemmy-ui.service")
 
         def assert_http_code(url, expected_http_code, extra_curl_args=""):
-            _, http_code = server.execute(f'curl --silent -o /dev/null {extra_curl_args} --fail --write-out "%{{http_code}}" {url}')
+            _, http_code = server.execute(f'curl --location --silent -o /dev/null {extra_curl_args} --fail --write-out "%{{http_code}}" {url}')
             assert http_code == str(expected_http_code), f"expected http code {expected_http_code}, got {http_code}"
 
         # Caddy responds with HTTP code 502 if it cannot handle the requested path
diff --git a/nixos/tests/libreddit.nix b/nixos/tests/libreddit.nix
deleted file mode 100644
index ecf347b9e12e4..0000000000000
--- a/nixos/tests/libreddit.nix
+++ /dev/null
@@ -1,19 +0,0 @@
-import ./make-test-python.nix ({ lib, ... }:
-
-{
-  name = "libreddit";
-  meta.maintainers = with lib.maintainers; [ fab ];
-
-  nodes.machine = {
-    services.libreddit.enable = true;
-    # Test CAP_NET_BIND_SERVICE
-    services.libreddit.port = 80;
-  };
-
-  testScript = ''
-    machine.wait_for_unit("libreddit.service")
-    machine.wait_for_open_port(80)
-    # Query a page that does not require Internet access
-    machine.succeed("curl --fail http://localhost:80/settings")
-  '';
-})
diff --git a/nixos/tests/librenms.nix b/nixos/tests/librenms.nix
index c59f56a323161..14035a01ce87d 100644
--- a/nixos/tests/librenms.nix
+++ b/nixos/tests/librenms.nix
@@ -3,7 +3,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
 let
   api_token = "f87f42114e44b63ad1b9e3c3d33d6fbe"; # random md5 hash
   wrong_api_token = "e68ba041fcf1eab923a7a6de3af5f726"; # another random md5 hash
-in {
+in
+{
   name = "librenms";
   meta.maintainers = lib.teams.wdz.members;
 
@@ -49,6 +50,9 @@ in {
         API_USER_NAME=api
         API_TOKEN=${api_token} # random md5 hash
 
+        # seeding database to get the admin roles
+        ${pkgs.librenms}/artisan db:seed --force --no-interaction
+
         # we don't need to know the password, it just has to exist
         API_USER_PASS=$(${pkgs.pwgen}/bin/pwgen -s 64 1)
         ${pkgs.librenms}/artisan user:add $API_USER_NAME -r admin -p $API_USER_PASS
@@ -60,37 +64,29 @@ in {
   };
 
   nodes.snmphost = {
-    networking.firewall.allowedUDPPorts = [ 161 ];
 
-    systemd.services.snmpd = {
-      description = "snmpd";
-      after = [ "network-online.target" ];
-      wants = [ "network-online.target" ];
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig = {
-        Type = "forking";
-        User = "root";
-        Group = "root";
-        ExecStart = let
-          snmpd-config = pkgs.writeText "snmpd-config" ''
-            com2sec readonly default public
-
-            group MyROGroup v2c        readonly
-            view all    included  .1                               80
-            access MyROGroup ""      any       noauth    exact  all    none   none
-
-            syslocation Testcity, Testcountry
-            syscontact Testi mc Test <test@example.com>
-          '';
-        in "${pkgs.net-snmp}/bin/snmpd -c ${snmpd-config} -C";
-      };
+    services.snmpd = {
+      enable = true;
+      openFirewall = true;
+
+      configText = ''
+        com2sec readonly default public
+
+        group MyROGroup v2c        readonly
+        view all    included  .1                               80
+        access MyROGroup ""      any       noauth    exact  all    none   none
+
+        syslocation Testcity, Testcountry
+        syscontact Testi mc Test <test@example.com>
+      '';
+
     };
   };
 
   testScript = ''
     start_all()
 
-    snmphost.wait_until_succeeds("pgrep snmpd")
+    snmphost.wait_for_unit("snmpd.service")
 
     librenms.wait_for_unit("lnms-api-init.service")
     librenms.wait_for_open_port(80)
diff --git a/nixos/tests/libreswan-nat.nix b/nixos/tests/libreswan-nat.nix
new file mode 100644
index 0000000000000..973e304f9e3a3
--- /dev/null
+++ b/nixos/tests/libreswan-nat.nix
@@ -0,0 +1,238 @@
+# This test sets up an IPsec VPN server that allows a client behind an IPv4 NAT
+# router to access the IPv6 internet. We check that the client initially can't
+# ping an IPv6 hosts and its connection to the server can be eavesdropped by
+# the router, but once the IPsec tunnel is enstablished it can talk to an
+# IPv6-only host and the connection is secure.
+#
+# Notes:
+#   - the VPN is implemented using policy-based routing.
+#   - the client is assigned an IPv6 address from the same /64 subnet
+#     of the server, without DHCPv6 or SLAAC.
+#   - the server acts as NDP proxy for the client, so that the latter
+#     becomes reachable at its assigned IPv6 via the server.
+#   - the client falls back to TCP if UDP is blocked
+
+{ lib, pkgs, ... }:
+
+let
+
+  # Common network setup
+  baseNetwork = {
+    # shared hosts file
+    networking.extraHosts = lib.mkVMOverride ''
+      203.0.113.1 router
+      203.0.113.2 server
+      2001:db8::2 inner
+      192.168.1.1 client
+    '';
+    # open a port for testing
+    networking.firewall.allowedUDPPorts = [ 1234 ];
+  };
+
+  # Common IPsec configuration
+  baseTunnel = {
+    services.libreswan.enable = true;
+    environment.etc."ipsec.d/tunnel.secrets" =
+      { text = ''@server %any : PSK "j1JbIi9WY07rxwcNQ6nbyThKCf9DGxWOyokXIQcAQUnafsNTUJxfsxwk9WYK8fHj"'';
+        mode = "600";
+      };
+  };
+
+  # Helpers to add a static IP address on an interface
+  setAddress4 = iface: addr: {
+    networking.interfaces.${iface}.ipv4.addresses =
+      lib.mkVMOverride [ { address = addr; prefixLength = 24; } ];
+  };
+  setAddress6 = iface: addr: {
+    networking.interfaces.${iface}.ipv6.addresses =
+      lib.mkVMOverride [ { address = addr; prefixLength = 64; } ];
+  };
+
+in
+
+{
+  name = "libreswan-nat";
+  meta = with lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  nodes.router = { pkgs, ... }: lib.mkMerge [
+    baseNetwork
+    (setAddress4 "eth1" "203.0.113.1")
+    (setAddress4 "eth2" "192.168.1.1")
+    {
+      virtualisation.vlans = [ 1 2 ];
+      environment.systemPackages = [ pkgs.tcpdump ];
+      networking.nat = {
+        enable = true;
+        externalInterface = "eth1";
+        internalInterfaces = [ "eth2" ];
+      };
+      networking.firewall.trustedInterfaces = [ "eth2" ];
+    }
+  ];
+
+  nodes.inner = lib.mkMerge [
+    baseNetwork
+    (setAddress6 "eth1" "2001:db8::2")
+    { virtualisation.vlans = [ 3 ]; }
+  ];
+
+  nodes.server = lib.mkMerge [
+    baseNetwork
+    baseTunnel
+    (setAddress4 "eth1" "203.0.113.2")
+    (setAddress6 "eth2" "2001:db8::1")
+    {
+      virtualisation.vlans = [ 1 3 ];
+      networking.firewall.allowedUDPPorts = [ 500 4500 ];
+      networking.firewall.allowedTCPPorts = [ 993 ];
+
+      # see https://github.com/NixOS/nixpkgs/pull/310857
+      networking.firewall.checkReversePath = false;
+
+      boot.kernel.sysctl = {
+        # enable forwarding packets
+        "net.ipv6.conf.all.forwarding" = 1;
+        "net.ipv4.conf.all.forwarding" = 1;
+        # enable NDP proxy for VPN clients
+        "net.ipv6.conf.all.proxy_ndp" = 1;
+      };
+
+      services.libreswan.configSetup = "listen-tcp=yes";
+      services.libreswan.connections.tunnel = ''
+        # server
+        left=203.0.113.2
+        leftid=@server
+        leftsubnet=::/0
+        leftupdown=${pkgs.writeScript "updown" ''
+          # act as NDP proxy for VPN clients
+          if test "$PLUTO_VERB" = up-client-v6; then
+            ip neigh add proxy "$PLUTO_PEER_CLIENT_NET" dev eth2
+          fi
+          if test "$PLUTO_VERB" = down-client-v6; then
+            ip neigh del proxy "$PLUTO_PEER_CLIENT_NET" dev eth2
+          fi
+        ''}
+
+        # clients
+        right=%any
+        rightaddresspool=2001:db8:0:0:c::/97
+        modecfgdns=2001:db8::1
+
+        # clean up vanished clients
+        dpddelay=30
+
+        auto=add
+        keyexchange=ikev2
+        rekey=no
+        narrowing=yes
+        fragmentation=yes
+        authby=secret
+
+        leftikeport=993
+        retransmit-timeout=10s
+      '';
+    }
+  ];
+
+  nodes.client = lib.mkMerge [
+    baseNetwork
+    baseTunnel
+    (setAddress4 "eth1" "192.168.1.2")
+    {
+      virtualisation.vlans = [ 2 ];
+      networking.defaultGateway = {
+        address = "192.168.1.1";
+        interface = "eth1";
+      };
+      services.libreswan.connections.tunnel = ''
+        # client
+        left=%defaultroute
+        leftid=@client
+        leftmodecfgclient=yes
+        leftsubnet=::/0
+
+        # server
+        right=203.0.113.2
+        rightid=@server
+        rightsubnet=::/0
+
+        auto=add
+        narrowing=yes
+        rekey=yes
+        fragmentation=yes
+        authby=secret
+
+        # fallback when UDP is blocked
+        enable-tcp=fallback
+        tcp-remoteport=993
+        retransmit-timeout=5s
+      '';
+    }
+  ];
+
+  testScript =
+    ''
+      def client_to_host(machine, msg: str):
+          """
+          Sends a message from client to server
+          """
+          machine.execute("nc -lu :: 1234 >/tmp/msg &")
+          client.sleep(1)
+          client.succeed(f"echo '{msg}' | nc -uw 0 {machine.name} 1234")
+          client.sleep(1)
+          machine.succeed(f"grep '{msg}' /tmp/msg")
+
+
+      def eavesdrop():
+          """
+          Starts eavesdropping on the router
+          """
+          match = "udp port 1234"
+          router.execute(f"tcpdump -i eth1 -c 1 -Avv {match} >/tmp/log &")
+
+
+      start_all()
+
+      with subtest("Network is up"):
+          client.wait_until_succeeds("ping -c1 server")
+          client.succeed("systemctl restart ipsec")
+          server.succeed("systemctl restart ipsec")
+
+      with subtest("Router can eavesdrop cleartext traffic"):
+          eavesdrop()
+          client_to_host(server, "I secretly love turnip")
+          router.sleep(1)
+          router.succeed("grep turnip /tmp/log")
+
+      with subtest("Libreswan is ready"):
+          client.wait_for_unit("ipsec")
+          server.wait_for_unit("ipsec")
+          client.succeed("ipsec checkconfig")
+          server.succeed("ipsec checkconfig")
+
+      with subtest("Client can't ping VPN host"):
+          client.fail("ping -c1 inner")
+
+      with subtest("Client can start the tunnel"):
+          client.succeed("ipsec start tunnel")
+          client.succeed("ip -6 addr show lo | grep -q 2001:db8:0:0:c")
+
+      with subtest("Client can ping VPN host"):
+          client.wait_until_succeeds("ping -c1 2001:db8::1")
+          client.succeed("ping -c1 inner")
+
+      with subtest("Eve no longer can eavesdrop"):
+          eavesdrop()
+          client_to_host(inner, "Just kidding, I actually like rhubarb")
+          router.sleep(1)
+          router.fail("grep rhubarb /tmp/log")
+
+      with subtest("TCP fallback is available"):
+          server.succeed("iptables -I nixos-fw -p udp -j DROP")
+          client.succeed("ipsec restart")
+          client.execute("ipsec start tunnel")
+          client.wait_until_succeeds("ping -c1 inner")
+    '';
+}
diff --git a/nixos/tests/libreswan.nix b/nixos/tests/libreswan.nix
index c798a04645bc0..1a25c1e6d9af7 100644
--- a/nixos/tests/libreswan.nix
+++ b/nixos/tests/libreswan.nix
@@ -3,7 +3,7 @@
 # Eve can eavesdrop the plaintext traffic between Alice and Bob, but once they
 # enable the secure tunnel Eve's spying becomes ineffective.
 
-import ./make-test-python.nix ({ lib, pkgs, ... }:
+{ lib, pkgs, ... }:
 
 let
 
@@ -37,6 +37,8 @@ let
     useDHCP = false;
     interfaces.eth1.ipv4.addresses = lib.mkVMOverride [];
     interfaces.eth2.ipv4.addresses = lib.mkVMOverride [];
+    interfaces.eth1.ipv6.addresses = lib.mkVMOverride [];
+    interfaces.eth2.ipv6.addresses = lib.mkVMOverride [];
     # open a port for testing
     firewall.allowedUDPPorts = [ 1234 ];
   };
@@ -133,4 +135,4 @@ in
           eve.sleep(1)
           eve.fail("grep rhubarb /tmp/log")
     '';
-})
+}
diff --git a/nixos/tests/libvirtd.nix b/nixos/tests/libvirtd.nix
index df80dcc21a2eb..27ffaac3e62d1 100644
--- a/nixos/tests/libvirtd.nix
+++ b/nixos/tests/libvirtd.nix
@@ -20,6 +20,11 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         networking.hostId = "deadbeef"; # needed for zfs
         security.polkit.enable = true;
         environment.systemPackages = with pkgs; [ virt-manager ];
+
+        # This adds `resolve` to the `hosts` line of /etc/nsswitch.conf; NSS modules placed after it
+        # will not be consulted. Therefore this tests that the libvirtd NSS modules will be
+        # be placed early enough for name resolution to work.
+        services.resolved.enable = true;
       };
   };
 
diff --git a/nixos/tests/limesurvey.nix b/nixos/tests/limesurvey.nix
index 9a3193991f352..87e9fe1cdc149 100644
--- a/nixos/tests/limesurvey.nix
+++ b/nixos/tests/limesurvey.nix
@@ -1,6 +1,6 @@
-import ./make-test-python.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
   name = "limesurvey";
-  meta.maintainers = [ pkgs.lib.maintainers.aanderse ];
+  meta.maintainers = [ lib.maintainers.aanderse ];
 
   nodes.machine = { ... }: {
     services.limesurvey = {
@@ -9,6 +9,8 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         hostName = "example.local";
         adminAddr = "root@example.local";
       };
+      encryptionKeyFile = pkgs.writeText "key" (lib.strings.replicate 32 "0");
+      encryptionNonceFile = pkgs.writeText "nonce" (lib.strings.replicate 24 "0");
     };
 
     # limesurvey won't work without a dot in the hostname
diff --git a/nixos/tests/livebook-service.nix b/nixos/tests/livebook-service.nix
index f428412e16448..2d699efb1e3ee 100644
--- a/nixos/tests/livebook-service.nix
+++ b/nixos/tests/livebook-service.nix
@@ -11,9 +11,6 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
         enableUserService = true;
         environment = {
           LIVEBOOK_PORT = 20123;
-          LIVEBOOK_COOKIE = "chocolate chip";
-          LIVEBOOK_TOKEN_ENABLED = true;
-
         };
         environmentFile = pkgs.writeText "livebook.env" ''
           LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
@@ -38,7 +35,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
 
       machine.succeed("loginctl enable-linger alice")
       machine.wait_until_succeeds("${sudo} systemctl --user is-active livebook.service")
-      machine.wait_for_open_port(20123)
+      machine.wait_for_open_port(20123, timeout=10)
 
       machine.succeed("curl -L localhost:20123 | grep 'Type password'")
     '';
diff --git a/nixos/tests/localsend.nix b/nixos/tests/localsend.nix
new file mode 100644
index 0000000000000..8c0a6ac681900
--- /dev/null
+++ b/nixos/tests/localsend.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix (
+  { ... }:
+  {
+    name = "localsend";
+
+    nodes.machine =
+      { ... }:
+      {
+        imports = [ ./common/x11.nix ];
+        programs.localsend.enable = true;
+      };
+
+    testScript = ''
+      machine.wait_for_x()
+      machine.succeed("localsend_app >&2 &")
+      machine.wait_for_open_port(53317)
+      machine.wait_for_window("LocalSend", 10)
+      machine.succeed("netstat --listening --program --tcp | grep -P 'tcp.*53317.*localsend'")
+    '';
+  }
+)
diff --git a/nixos/tests/login.nix b/nixos/tests/login.nix
index 67f5764a0a162..e3b1b877940ac 100644
--- a/nixos/tests/login.nix
+++ b/nixos/tests/login.nix
@@ -3,13 +3,12 @@ import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
 {
   name = "login";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco ];
+    maintainers = [ ];
   };
 
   nodes.machine =
     { pkgs, lib, ... }:
     { boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest;
-      sound.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
     };
 
   testScript = ''
diff --git a/nixos/tests/logrotate.nix b/nixos/tests/logrotate.nix
index bcbe89c259ae5..8d5887abec941 100644
--- a/nixos/tests/logrotate.nix
+++ b/nixos/tests/logrotate.nix
@@ -16,52 +16,60 @@ import ./make-test-python.nix ({ pkgs, ... }: rec {
   };
 
   nodes = {
-    defaultMachine = { ... }: { };
+    defaultMachine = { ... }: {
+      services.logrotate.enable = true;
+    };
     failingMachine = { ... }: {
-      services.logrotate.configFile = pkgs.writeText "logrotate.conf" ''
-        # self-written config file
-        su notarealuser notagroupeither
-      '';
+      services.logrotate = {
+        enable = true;
+        configFile = pkgs.writeText "logrotate.conf" ''
+          # self-written config file
+          su notarealuser notagroupeither
+        '';
+      };
     };
     machine = { config, ... }: {
       imports = [ importTest ];
 
-      services.logrotate.settings = {
-        # remove default frequency header and add another
-        header = {
-          frequency = null;
-          delaycompress = true;
-        };
-        # extra global setting... affecting nothing
-        last_line = {
-          global = true;
-          priority = 2000;
-          shred = true;
-        };
-        # using mail somewhere should add --mail to logrotate invocation
-        sendmail = {
-          mail = "user@domain.tld";
-        };
-        # postrotate should be suffixed by 'endscript'
-        postrotate = {
-          postrotate = "touch /dev/null";
-        };
-        # check checkConfig works as expected: there is nothing to check here
-        # except that the file build passes
-        checkConf = {
-          su = "root utmp";
-          createolddir = "0750 root utmp";
-          create = "root utmp";
-          "create " = "0750 root utmp";
-        };
-        # multiple paths should be aggregated
-        multipath = {
-          files = [ "file1" "file2" ];
-        };
-        # overriding imported path should keep existing attributes
-        # (e.g. olddir is still set)
-        import = {
-          notifempty = true;
+      services.logrotate = {
+        enable = true;
+        settings = {
+          # remove default frequency header and add another
+          header = {
+            frequency = null;
+            delaycompress = true;
+          };
+          # extra global setting... affecting nothing
+          last_line = {
+            global = true;
+            priority = 2000;
+            shred = true;
+          };
+          # using mail somewhere should add --mail to logrotate invocation
+          sendmail = {
+            mail = "user@domain.tld";
+          };
+          # postrotate should be suffixed by 'endscript'
+          postrotate = {
+            postrotate = "touch /dev/null";
+          };
+          # check checkConfig works as expected: there is nothing to check here
+          # except that the file build passes
+          checkConf = {
+            su = "root utmp";
+            createolddir = "0750 root utmp";
+            create = "root utmp";
+            "create " = "0750 root utmp";
+          };
+          # multiple paths should be aggregated
+          multipath = {
+            files = [ "file1" "file2" ];
+          };
+          # overriding imported path should keep existing attributes
+          # (e.g. olddir is still set)
+          import = {
+            notifempty = true;
+          };
         };
       };
     };
@@ -119,5 +127,7 @@ import ./make-test-python.nix ({ pkgs, ... }: rec {
           if info["ActiveState"] != "failed":
               raise Exception('logrotate-checkconf.service was not failed')
 
+      machine.log(machine.execute("systemd-analyze security logrotate.service | grep -v ✓")[1])
+
     '';
 })
diff --git a/nixos/tests/lomiri-calculator-app.nix b/nixos/tests/lomiri-calculator-app.nix
new file mode 100644
index 0000000000000..14d8073611f23
--- /dev/null
+++ b/nixos/tests/lomiri-calculator-app.nix
@@ -0,0 +1,59 @@
+{ pkgs, lib, ... }:
+{
+  name = "lomiri-calculator-app-standalone";
+  meta.maintainers = lib.teams.lomiri.members;
+
+  nodes.machine =
+    { config, pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+
+      services.xserver.enable = true;
+
+      environment = {
+        systemPackages = with pkgs.lomiri; [
+          suru-icon-theme
+          lomiri-calculator-app
+        ];
+        variables = {
+          UITK_ICON_THEME = "suru";
+        };
+      };
+
+      i18n.supportedLocales = [ "all" ];
+
+      fonts.packages = with pkgs; [
+        # Intended font & helps with OCR
+        ubuntu-classic
+      ];
+    };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.wait_for_x()
+
+    with subtest("lomiri calculator launches"):
+        machine.execute("lomiri-calculator-app >&2 &")
+        machine.wait_for_text("Calculator")
+        machine.screenshot("lomiri-calculator")
+
+    with subtest("lomiri calculator works"):
+        machine.send_key("tab") # Fix focus
+
+        machine.send_chars("22*16\n")
+        machine.wait_for_text("352")
+        machine.screenshot("lomiri-calculator_caninfactdobasicmath")
+
+    machine.succeed("pkill -f lomiri-calculator-app")
+
+    with subtest("lomiri calculator localisation works"):
+        machine.execute("env LANG=de_DE.UTF-8 lomiri-calculator-app >&2 &")
+        machine.wait_for_text("Rechner")
+        machine.screenshot("lomiri-calculator_localised")
+
+    # History of previous run should have loaded
+    with subtest("lomiri calculator history works"):
+        machine.wait_for_text("352")
+  '';
+}
diff --git a/nixos/tests/lomiri-camera-app.nix b/nixos/tests/lomiri-camera-app.nix
new file mode 100644
index 0000000000000..ccd53a37135b2
--- /dev/null
+++ b/nixos/tests/lomiri-camera-app.nix
@@ -0,0 +1,135 @@
+{ lib, ... }:
+{
+  name = "lomiri-camera-app-standalone";
+  meta.maintainers = lib.teams.lomiri.members;
+
+  nodes.machine =
+    { config, pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+
+      services.xserver.enable = true;
+
+      environment = {
+        systemPackages =
+          with pkgs;
+          [
+            feh # view photo result
+            ffmpeg # fake webcam stream
+            gnome-text-editor # somewhere to paste QR result
+            (imagemagick.override { ghostscriptSupport = true; }) # add label for OCR
+            qrtool # generate QR code
+            xdotool # clicking on QR button
+          ]
+          ++ (with pkgs.lomiri; [
+            suru-icon-theme
+            lomiri-camera-app
+          ]);
+        variables = {
+          UITK_ICON_THEME = "suru";
+        };
+      };
+
+      i18n.supportedLocales = [ "all" ];
+
+      fonts = {
+        packages = with pkgs; [
+          # Intended font & helps with OCR
+          ubuntu-classic
+        ];
+      };
+
+      # Fake camera
+      boot.extraModulePackages = with config.boot.kernelPackages; [ v4l2loopback ];
+    };
+
+  enableOCR = true;
+
+  testScript =
+    let
+      qrLabel = "Image";
+      qrContent = "Test";
+    in
+    ''
+      machine.wait_for_x()
+
+      with subtest("lomiri camera launches"):
+          machine.succeed("lomiri-camera-app >&2 &")
+          machine.wait_for_text("Cannot access")
+          machine.screenshot("lomiri-camera_open")
+
+      machine.succeed("pkill -f lomiri-camera-app")
+
+      # Setup fake v4l2 camera
+      machine.succeed("modprobe v4l2loopback video_nr=10 card_label=Video-Loopback exclusive_caps=1")
+      machine.succeed("qrtool encode '${qrContent}' -s 20 -m 10 > qr.png")
+      # Horizontal flip, add text, flip back. Camera displays image mirrored, so need reversed text for OCR
+      machine.succeed("magick qr.png -flop -pointsize 70 -fill black -annotate +100+100 '${qrLabel}' -flop output.png")
+      machine.succeed("ffmpeg -re -loop 1 -i output.png -vf format=yuv420p -f v4l2 /dev/video10 -loglevel fatal >&2 &")
+
+      with subtest("lomiri camera uses camera"):
+          machine.succeed("lomiri-camera-app >&2 &")
+          machine.wait_for_text("${qrLabel}")
+          machine.screenshot("lomiri-camera_feed")
+
+          machine.succeed("xdotool mousemove 320 610 click 1") # take photo
+          machine.wait_until_succeeds("find /root/Pictures/camera.ubports -name '*.jpg'")
+
+          # Check that the image is correct
+          machine.send_key("ctrl-alt-right")
+          machine.succeed("magick /root/Pictures/camera.ubports/IMG_00000001.jpg -flop photo_flip.png")
+          machine.succeed("feh photo_flip.png >&2 &")
+          machine.wait_for_text("${qrLabel}")
+          machine.screenshot("lomiri-camera_photo")
+
+      machine.succeed("pkill -f feh")
+      machine.send_key("ctrl-alt-left")
+      machine.succeed("pkill -f lomiri-camera-app")
+
+      with subtest("lomiri barcode scanner uses camera"):
+          machine.succeed("lomiri-camera-app --mode=barcode-reader >&2 &")
+          machine.wait_for_text("${qrLabel}")
+          machine.succeed("xdotool mousemove 320 610 click 1") # open up QR decode result
+
+          # OCR is struggling to recognise the text. Click the clipboard button and paste the result somewhere else
+          machine.sleep(5)
+          machine.screenshot("lomiri-barcode_decode")
+          machine.succeed("xdotool mousemove 350 530 click 1")
+          machine.sleep(5)
+
+          # Need to make a new window without closing camera app, otherwise clipboard content gets lost?
+          machine.send_key("ctrl-alt-right")
+          machine.succeed("gnome-text-editor >&2 &")
+          machine.wait_for_text("New")
+
+          # Font size up to help with OCR
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+          machine.send_key("ctrl-kp_add")
+
+          machine.send_key("ctrl-v")
+          machine.wait_for_text("${qrContent}")
+
+      machine.succeed("pkill -f gnome-text-editor")
+      machine.send_key("ctrl-alt-left")
+      machine.succeed("pkill -f lomiri-camera-app")
+
+      with subtest("lomiri camera localisation works"):
+          machine.succeed("env LANG=de_DE.UTF-8 lomiri-camera-app >&2 &")
+          machine.wait_for_text("Kamera")
+          machine.screenshot("lomiri-camera_localised")
+    '';
+}
diff --git a/nixos/tests/lomiri-clock-app.nix b/nixos/tests/lomiri-clock-app.nix
new file mode 100644
index 0000000000000..9db5cee49cf7b
--- /dev/null
+++ b/nixos/tests/lomiri-clock-app.nix
@@ -0,0 +1,48 @@
+{ pkgs, lib, ... }:
+{
+  name = "lomiri-clock-app-standalone";
+  meta.maintainers = lib.teams.lomiri.members;
+
+  nodes.machine =
+    { config, pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+
+      services.xserver.enable = true;
+
+      environment = {
+        systemPackages = with pkgs.lomiri; [
+          suru-icon-theme
+          lomiri-clock-app
+        ];
+        variables = {
+          UITK_ICON_THEME = "suru";
+        };
+      };
+
+      i18n.supportedLocales = [ "all" ];
+
+      fonts.packages = with pkgs; [
+        # Intended font & helps with OCR
+        ubuntu-classic
+      ];
+    };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.wait_for_x()
+
+    with subtest("lomiri clock launches"):
+        machine.execute("lomiri-clock-app >&2 &")
+        machine.wait_for_text(r"(clock.ubports|City|Alarms)")
+        machine.screenshot("lomiri-clock_open")
+
+    machine.succeed("pkill -f lomiri-clock-app")
+
+    with subtest("lomiri clock localisation works"):
+        machine.execute("env LANG=de_DE.UTF-8 lomiri-clock-app >&2 &")
+        machine.wait_for_text(r"(Stadt|Weckzeiten)")
+        machine.screenshot("lomiri-clock_localised")
+  '';
+}
diff --git a/nixos/tests/lomiri-docviewer-app.nix b/nixos/tests/lomiri-docviewer-app.nix
new file mode 100644
index 0000000000000..c21a121f6c022
--- /dev/null
+++ b/nixos/tests/lomiri-docviewer-app.nix
@@ -0,0 +1,84 @@
+{ lib, ... }:
+let
+  exampleText = "Lorem ipsum dolor sit amet";
+in
+{
+  name = "lomiri-docviewer-app-standalone";
+  meta.maintainers = lib.teams.lomiri.members;
+
+  nodes.machine =
+    { config, pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+
+      services.xserver.enable = true;
+
+      environment = {
+        etc."docviewer-sampletext.txt".text = exampleText;
+        systemPackages =
+          with pkgs;
+          [
+            libreoffice # txt -> odf to test LibreOfficeKit integration
+          ]
+          ++ (with pkgs.lomiri; [
+            suru-icon-theme
+            lomiri-docviewer-app
+          ]);
+        variables = {
+          UITK_ICON_THEME = "suru";
+        };
+      };
+
+      i18n.supportedLocales = [ "all" ];
+
+      fonts = {
+        packages = with pkgs; [
+          # Intended font & helps with OCR
+          ubuntu-classic
+        ];
+      };
+    };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.wait_for_x()
+
+    with subtest("lomiri docviewer launches"):
+        machine.succeed("lomiri-docviewer-app >&2 &")
+        machine.wait_for_text("No documents")
+        machine.screenshot("lomiri-docviewer_open")
+
+    machine.succeed("pkill -f lomiri-docviewer-app")
+
+    # Setup different document types
+    machine.succeed("soffice --convert-to odt --outdir /root/ /etc/docviewer-sampletext.txt")
+    machine.succeed("soffice --convert-to pdf --outdir /root/ /etc/docviewer-sampletext.txt")
+
+    with subtest("lomiri docviewer txt works"):
+        machine.succeed("lomiri-docviewer-app /etc/docviewer-sampletext.txt >&2 &")
+        machine.wait_for_text("${exampleText}")
+        machine.screenshot("lomiri-docviewer_txt")
+
+    machine.succeed("pkill -f lomiri-docviewer-app")
+
+    with subtest("lomiri docviewer odt works"):
+        machine.succeed("lomiri-docviewer-app /root/docviewer-sampletext.odt >&2 &")
+        machine.wait_for_text("${exampleText}")
+        machine.screenshot("lomiri-docviewer_odt")
+
+    machine.succeed("pkill -f lomiri-docviewer-app")
+
+    with subtest("lomiri docviewer pdf works"):
+        machine.succeed("lomiri-docviewer-app /root/docviewer-sampletext.pdf >&2 &")
+        machine.wait_for_text("${exampleText}")
+        machine.screenshot("lomiri-docviewer_pdf")
+
+    machine.succeed("pkill -f lomiri-docviewer-app")
+
+    with subtest("lomiri docviewer localisation works"):
+        machine.succeed("env LANG=de_DE.UTF-8 lomiri-docviewer-app >&2 &")
+        machine.wait_for_text("Keine Dokumente")
+        machine.screenshot("lomiri-docviewer_localised")
+  '';
+}
diff --git a/nixos/tests/lomiri-filemanager-app.nix b/nixos/tests/lomiri-filemanager-app.nix
new file mode 100644
index 0000000000000..efde3a01c1145
--- /dev/null
+++ b/nixos/tests/lomiri-filemanager-app.nix
@@ -0,0 +1,48 @@
+{ pkgs, lib, ... }:
+{
+  name = "lomiri-filemanager-app-standalone";
+  meta.maintainers = lib.teams.lomiri.members;
+
+  nodes.machine =
+    { config, pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+
+      services.xserver.enable = true;
+
+      environment = {
+        systemPackages = with pkgs.lomiri; [
+          suru-icon-theme
+          lomiri-filemanager-app
+        ];
+        variables = {
+          UITK_ICON_THEME = "suru";
+        };
+      };
+
+      i18n.supportedLocales = [ "all" ];
+
+      fonts.packages = with pkgs; [
+        # Intended font & helps with OCR
+        ubuntu-classic
+      ];
+    };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.wait_for_x()
+
+    with subtest("lomiri filemanager launches"):
+        machine.execute("lomiri-filemanager-app >&2 &")
+        machine.wait_for_text(r"(filemanager.ubports|alice|items|directories|files|folder)")
+        machine.screenshot("lomiri-filemanager_open")
+
+    machine.succeed("pkill -f lomiri-filemanager-app")
+
+    with subtest("lomiri filemanager localisation works"):
+        machine.execute("env LANG=de_DE.UTF-8 lomiri-filemanager-app >&2 &")
+        machine.wait_for_text(r"(Elemente|Verzeichnisse|Dateien|Ordner)")
+        machine.screenshot("lomiri-filemanager_localised")
+  '';
+}
diff --git a/nixos/tests/lomiri-gallery-app.nix b/nixos/tests/lomiri-gallery-app.nix
new file mode 100644
index 0000000000000..d8cf7466656ab
--- /dev/null
+++ b/nixos/tests/lomiri-gallery-app.nix
@@ -0,0 +1,156 @@
+{ lib, ... }:
+{
+  name = "lomiri-gallery-app-standalone";
+  meta.maintainers = lib.teams.lomiri.members;
+
+  nodes.machine =
+    { config, pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+
+      services.xserver.enable = true;
+
+      environment = {
+        systemPackages =
+          with pkgs;
+          [
+            ffmpeg # make a video from the image
+            (imagemagick.override { ghostscriptSupport = true; }) # example image creation
+            mpv # URI dispatching for video support
+            xdotool # mouse movement
+          ]
+          ++ (with pkgs.lomiri; [
+            suru-icon-theme
+            lomiri-gallery-app
+            lomiri-thumbnailer # finds new images & generates thumbnails
+          ]);
+        variables = {
+          UITK_ICON_THEME = "suru";
+        };
+      };
+
+      i18n.supportedLocales = [ "all" ];
+
+      fonts = {
+        packages = with pkgs; [
+          # Intended font & helps with OCR
+          ubuntu-classic
+        ];
+      };
+    };
+
+  enableOCR = true;
+
+  testScript =
+    let
+      imageLabel = "Image";
+    in
+    ''
+      machine.wait_for_x()
+
+      with subtest("lomiri gallery launches"):
+          machine.succeed("lomiri-gallery-app >&2 &")
+          machine.sleep(2)
+          machine.wait_for_text(r"(Albums|Events|Photos)")
+          machine.screenshot("lomiri-gallery_open")
+
+      machine.succeed("pkill -f lomiri-gallery-app")
+
+      machine.succeed("mkdir /root/Pictures /root/Videos")
+      # Setup example data, OCR-friendly:
+      # - White square, black text
+      # - uppercase extension
+      machine.succeed("magick -size 500x500 -background white -fill black canvas:white -pointsize 70 -annotate +100+300 '${imageLabel}' /root/Pictures/output.PNG")
+
+      # Different image formats
+      machine.succeed("magick /root/Pictures/output.PNG /root/Pictures/output.JPG")
+      machine.succeed("magick /root/Pictures/output.PNG /root/Pictures/output.BMP")
+      machine.succeed("magick /root/Pictures/output.PNG /root/Pictures/output.GIF")
+
+      # Video for dispatching
+      machine.succeed("ffmpeg -loop 1 -r 1 -i /root/Pictures/output.PNG -t 100 -pix_fmt yuv420p /root/Videos/output.MP4")
+
+      with subtest("lomiri gallery handles files"):
+          machine.succeed("lomiri-gallery-app >&2 &")
+          machine.sleep(2)
+          machine.wait_for_text(r"(Albums|Events|Photos|${imageLabel})")
+
+          machine.succeed("xdotool mousemove 30 40 click 1") # burger menu for categories
+          machine.sleep(2)
+          machine.succeed("xdotool mousemove 30 180 click 1") # photos
+          machine.sleep(2)
+          machine.wait_for_text("${imageLabel}") # should see thumbnail of at least one of them
+          machine.screenshot("lomiri-gallery_photos")
+
+          machine.succeed("xdotool mousemove 80 140 click 1") # select newest one
+          machine.sleep(2)
+          machine.succeed("xdotool mousemove 80 140 click 1") # enable top-bar
+          machine.sleep(2)
+
+          with subtest("lomiri gallery handles mp4"):
+              machine.succeed("xdotool mousemove 870 50 click 1") # open media information
+              machine.sleep(2)
+              machine.wait_for_text("MP4") # make sure we're looking at the right file
+              machine.screenshot("lomiri-gallery_mp4_info")
+              machine.send_key("esc")
+
+              machine.wait_for_text("${imageLabel}") # make sure thumbnail rendering worked
+
+              machine.succeed("xdotool mousemove 450 350 click 1") # dispatch to system's video handler
+              machine.wait_until_succeeds("pgrep -u root -f mpv") # wait for video to start
+              machine.sleep(10)
+              machine.succeed("pgrep -u root -f mpv") # should still be playing
+              machine.screenshot("lomiri-gallery_mp4_dispatch")
+
+              machine.send_key("q")
+              machine.wait_until_fails("pgrep mpv") # wait for video to stop
+
+              machine.send_key("right")
+
+          with subtest("lomiri gallery handles gif"):
+              machine.succeed("xdotool mousemove 870 50 click 1") # open media information
+              machine.sleep(2)
+              machine.wait_for_text("GIF") # make sure we're looking at the right file
+              machine.screenshot("lomiri-gallery_gif_info")
+              machine.send_key("esc")
+
+              machine.wait_for_text("${imageLabel}") # make sure media shows fine
+              machine.send_key("right")
+
+          with subtest("lomiri gallery handles bmp"):
+              machine.succeed("xdotool mousemove 840 50 click 1") # open media information (extra icon, different location)
+              machine.sleep(2)
+              machine.wait_for_text("BMP") # make sure we're looking at the right file
+              machine.screenshot("lomiri-gallery_bmp_info")
+              machine.send_key("esc")
+
+              machine.wait_for_text("${imageLabel}") # make sure media shows fine
+              machine.send_key("right")
+
+          with subtest("lomiri gallery handles jpg"):
+              machine.succeed("xdotool mousemove 840 50 click 1") # open media information (extra icon, different location)
+              machine.sleep(2)
+              machine.wait_for_text("JPG") # make sure we're looking at the right file
+              machine.screenshot("lomiri-gallery_jpg_info")
+              machine.send_key("esc")
+
+              machine.wait_for_text("${imageLabel}") # make sure media shows fine
+              machine.send_key("right")
+
+          with subtest("lomiri gallery handles png"):
+              machine.succeed("xdotool mousemove 840 50 click 1") # open media information (extra icon, different location)
+              machine.sleep(2)
+              machine.wait_for_text("PNG") # make sure we're looking at the right file
+              machine.screenshot("lomiri-gallery_png_info")
+              machine.send_key("esc")
+
+              machine.wait_for_text("${imageLabel}") # make sure media shows fine
+
+      machine.succeed("pkill -f lomiri-gallery-app")
+
+      with subtest("lomiri gallery localisation works"):
+          machine.succeed("env LANG=de_DE.UTF-8 lomiri-gallery-app >&2 &")
+          machine.wait_for_text(r"(Alben|Ereignisse|Fotos)")
+          machine.screenshot("lomiri-gallery_localised")
+    '';
+}
diff --git a/nixos/tests/lomiri-system-settings.nix b/nixos/tests/lomiri-system-settings.nix
index 867fc14797e77..fac5184847520 100644
--- a/nixos/tests/lomiri-system-settings.nix
+++ b/nixos/tests/lomiri-system-settings.nix
@@ -23,7 +23,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
 
     fonts.packages = with pkgs; [
       # Intended font & helps with OCR
-      ubuntu_font_family
+      ubuntu-classic
     ];
 
     services.upower.enable = true;
diff --git a/nixos/tests/lomiri.nix b/nixos/tests/lomiri.nix
index 9d6337e9977cb..b146cba93fe62 100644
--- a/nixos/tests/lomiri.nix
+++ b/nixos/tests/lomiri.nix
@@ -1,294 +1,703 @@
-import ./make-test-python.nix ({ pkgs, lib, ... }: let
+let
+  makeTest = import ./make-test-python.nix;
   # Just to make sure everything is the same, need it for OCR & navigating greeter
   user = "alice";
   description = "Alice Foobar";
   password = "foobar";
-in {
-  name = "lomiri";
-
-  meta = {
-    maintainers = lib.teams.lomiri.members;
-  };
-
-  nodes.machine = { config, ... }: {
-    imports = [
-      ./common/user-account.nix
-    ];
-
-    users.users.${user} = {
-      inherit description password;
-    };
-
-    services.desktopManager.lomiri.enable = lib.mkForce true;
-    services.displayManager.defaultSession = lib.mkForce "lomiri";
-
-    fonts.packages = [ pkgs.inconsolata ];
-
-    environment = {
-      # Help with OCR
-      etc."xdg/alacritty/alacritty.yml".text = lib.generators.toYAML { } {
-        font = rec {
-          normal.family = "Inconsolata";
-          bold.family = normal.family;
-          italic.family = normal.family;
-          bold_italic.family = normal.family;
-          size = 16;
+in
+{
+  greeter = makeTest (
+    { pkgs, lib, ... }:
+    {
+      name = "lomiri-greeter";
+
+      meta = {
+        maintainers = lib.teams.lomiri.members;
+      };
+
+      nodes.machine =
+        { config, ... }:
+        {
+          imports = [ ./common/user-account.nix ];
+
+          virtualisation.memorySize = 2047;
+
+          users.users.${user} = {
+            inherit description password;
+          };
+
+          services.desktopManager.lomiri.enable = lib.mkForce true;
+          services.displayManager.defaultSession = lib.mkForce "lomiri";
+
+          # Help with OCR
+          fonts.packages = [ pkgs.inconsolata ];
         };
-        colors = rec {
-          primary = {
-            foreground = "0x000000";
-            background = "0xffffff";
+
+      enableOCR = true;
+
+      testScript =
+        { nodes, ... }:
+        ''
+          def wait_for_text(text):
+              """
+              Wait for on-screen text, and try to optimise retry count for slow hardware.
+              """
+              machine.sleep(10)
+              machine.wait_for_text(text)
+
+          start_all()
+          machine.wait_for_unit("multi-user.target")
+
+          # Lomiri in greeter mode should work & be able to start a session
+          with subtest("lomiri greeter works"):
+              machine.wait_for_unit("display-manager.service")
+              machine.wait_until_succeeds("pgrep -u lightdm -f 'lomiri --mode=greeter'")
+
+              # Start page shows current time
+              wait_for_text(r"(AM|PM)")
+              machine.screenshot("lomiri_greeter_launched")
+
+              # Advance to login part
+              machine.send_key("ret")
+              wait_for_text("${description}")
+              machine.screenshot("lomiri_greeter_login")
+
+              # Login
+              machine.send_chars("${password}\n")
+              machine.wait_until_succeeds("pgrep -u ${user} -f 'lomiri --mode=full-shell'")
+
+              # Output rendering from Lomiri has started when it starts printing performance diagnostics
+              machine.wait_for_console_text("Last frame took")
+              # Look for datetime's clock, one of the last elements to load
+              wait_for_text(r"(AM|PM)")
+              machine.screenshot("lomiri_launched")
+        '';
+    }
+  );
+
+  desktop-basics = makeTest (
+    { pkgs, lib, ... }:
+    {
+      name = "lomiri-desktop-basics";
+
+      meta = {
+        maintainers = lib.teams.lomiri.members;
+      };
+
+      nodes.machine =
+        { config, ... }:
+        {
+          imports = [
+            ./common/auto.nix
+            ./common/user-account.nix
+          ];
+
+          virtualisation.memorySize = 2047;
+
+          users.users.${user} = {
+            inherit description password;
           };
-          normal = {
-            green = primary.foreground;
+
+          test-support.displayManager.auto = {
+            enable = true;
+            inherit user;
+          };
+
+          # To control mouse via scripting
+          programs.ydotool.enable = true;
+
+          services.desktopManager.lomiri.enable = lib.mkForce true;
+          services.displayManager.defaultSession = lib.mkForce "lomiri";
+
+          # Help with OCR
+          fonts.packages = [ pkgs.inconsolata ];
+
+          environment = {
+            # Help with OCR
+            etc."xdg/alacritty/alacritty.yml".text = lib.generators.toYAML { } {
+              font = rec {
+                normal.family = "Inconsolata";
+                bold.family = normal.family;
+                italic.family = normal.family;
+                bold_italic.family = normal.family;
+                size = 16;
+              };
+              colors = rec {
+                primary = {
+                  foreground = "0x000000";
+                  background = "0xffffff";
+                };
+                normal = {
+                  green = primary.foreground;
+                };
+              };
+            };
+
+            systemPackages = with pkgs; [
+              # Forcing alacritty to run as an X11 app when opened from the starter menu
+              (symlinkJoin {
+                name = "x11-${alacritty.name}";
+
+                paths = [ alacritty ];
+
+                nativeBuildInputs = [ makeWrapper ];
+
+                postBuild = ''
+                  wrapProgram $out/bin/alacritty \
+                    --set WINIT_UNIX_BACKEND x11 \
+                    --set WAYLAND_DISPLAY ""
+                '';
+
+                inherit (alacritty) meta;
+              })
+            ];
           };
+
+          # Help with OCR
+          systemd.tmpfiles.settings =
+            let
+              white = "255, 255, 255";
+              black = "0, 0, 0";
+              colorSection = color: {
+                Color = color;
+                Bold = true;
+                Transparency = false;
+              };
+              terminalColors = pkgs.writeText "customized.colorscheme" (
+                lib.generators.toINI { } {
+                  Background = colorSection white;
+                  Foreground = colorSection black;
+                  Color2 = colorSection black;
+                  Color2Intense = colorSection black;
+                }
+              );
+              terminalConfig = pkgs.writeText "terminal.ubports.conf" (
+                lib.generators.toINI { } {
+                  General = {
+                    colorScheme = "customized";
+                    fontSize = "16";
+                    fontStyle = "Inconsolata";
+                  };
+                }
+              );
+              confBase = "${config.users.users.${user}.home}/.config";
+              userDirArgs = {
+                mode = "0700";
+                user = user;
+                group = "users";
+              };
+            in
+            {
+              "10-lomiri-test-setup" = {
+                "${confBase}".d = userDirArgs;
+                "${confBase}/terminal.ubports".d = userDirArgs;
+                "${confBase}/terminal.ubports/customized.colorscheme".L.argument = "${terminalColors}";
+                "${confBase}/terminal.ubports/terminal.ubports.conf".L.argument = "${terminalConfig}";
+              };
+            };
         };
-      };
 
-      variables = {
-        # So we can test what content-hub is working behind the scenes
-        CONTENT_HUB_LOGGING_LEVEL = "2";
+      enableOCR = true;
+
+      testScript =
+        { nodes, ... }:
+        ''
+          def wait_for_text(text):
+              """
+              Wait for on-screen text, and try to optimise retry count for slow hardware.
+              """
+              machine.sleep(10)
+              machine.wait_for_text(text)
+
+          def mouse_click(xpos, ypos):
+              """
+              Move the mouse to a screen location and hit left-click.
+              """
+
+              # Need to reset to top-left, --absolute doesn't work?
+              machine.execute("ydotool mousemove -- -10000 -10000")
+              machine.sleep(2)
+
+              # Move
+              machine.execute(f"ydotool mousemove -- {xpos} {ypos}")
+              machine.sleep(2)
+
+              # Click (C0 - left button: down & up)
+              machine.execute("ydotool click 0xC0")
+              machine.sleep(2)
+
+          def open_starter():
+              """
+              Open the starter, and ensure it's opened.
+              """
+
+              # Using the keybind has a chance of instantly closing the menu again? Just click the button
+              mouse_click(20, 30)
+
+          start_all()
+          machine.wait_for_unit("multi-user.target")
+
+          # The session should start, and not be stuck in i.e. a crash loop
+          with subtest("lomiri starts"):
+              machine.wait_until_succeeds("pgrep -u ${user} -f 'lomiri --mode=full-shell'")
+              # Output rendering from Lomiri has started when it starts printing performance diagnostics
+              machine.wait_for_console_text("Last frame took")
+              # Look for datetime's clock, one of the last elements to load
+              wait_for_text(r"(AM|PM)")
+              machine.screenshot("lomiri_launched")
+
+          # Working terminal keybind is good
+          with subtest("terminal keybind works"):
+              machine.send_key("ctrl-alt-t")
+              wait_for_text(r"(${user}|machine)")
+              machine.screenshot("terminal_opens")
+
+              # lomiri-terminal-app has a separate VM test to test its basic functionality
+
+              machine.send_key("alt-f4")
+
+          # We want the ability to launch applications
+          with subtest("starter menu works"):
+              open_starter()
+              machine.screenshot("starter_opens")
+
+              # Just try the terminal again, we know that it should work
+              machine.send_chars("Terminal\n")
+              wait_for_text(r"(${user}|machine)")
+              machine.send_key("alt-f4")
+
+          # We want support for X11 apps
+          with subtest("xwayland support works"):
+              open_starter()
+              machine.send_chars("Alacritty\n")
+              wait_for_text(r"(${user}|machine)")
+              machine.screenshot("alacritty_opens")
+              machine.send_key("alt-f4")
+
+          # Morph is how we go online
+          with subtest("morph browser works"):
+              open_starter()
+              machine.send_chars("Morph\n")
+              wait_for_text(r"(Bookmarks|address|site|visited any)")
+              machine.screenshot("morph_open")
+
+              # morph-browser has a separate VM test to test its basic functionalities
+
+              machine.send_key("alt-f4")
+
+          # LSS provides DE settings
+          with subtest("system settings open"):
+              open_starter()
+              machine.send_chars("System Settings\n")
+              wait_for_text("Rotation Lock")
+              machine.screenshot("settings_open")
+
+              # lomiri-system-settings has a separate VM test to test its basic functionalities
+
+              machine.send_key("alt-f4")
+        '';
+    }
+  );
+
+  desktop-appinteractions = makeTest (
+    { pkgs, lib, ... }:
+    {
+      name = "lomiri-desktop-appinteractions";
+
+      meta = {
+        maintainers = lib.teams.lomiri.members;
       };
 
-      systemPackages = with pkgs; [
-        # For a convenient way of kicking off content-hub peer collection
-        lomiri.content-hub.examples
-
-        # Forcing alacritty to run as an X11 app when opened from the starter menu
-        (symlinkJoin {
-          name = "x11-${alacritty.name}";
-
-          paths = [ alacritty ];
-
-          nativeBuildInputs = [ makeWrapper ];
-
-          postBuild = ''
-            wrapProgram $out/bin/alacritty \
-              --set WINIT_UNIX_BACKEND x11 \
-              --set WAYLAND_DISPLAY ""
-          '';
-
-          inherit (alacritty) meta;
-        })
-      ];
-    };
-
-    # Help with OCR
-    systemd.tmpfiles.settings = let
-      white = "255, 255, 255";
-      black = "0, 0, 0";
-      colorSection = color: {
-        Color = color;
-        Bold = true;
-        Transparency = false;
-      };
-      terminalColors = pkgs.writeText "customized.colorscheme" (lib.generators.toINI {} {
-        Background = colorSection white;
-        Foreground = colorSection black;
-        Color2 = colorSection black;
-        Color2Intense = colorSection black;
-      });
-      terminalConfig = pkgs.writeText "terminal.ubports.conf" (lib.generators.toINI {} {
-        General = {
-          colorScheme = "customized";
-          fontSize = "16";
-          fontStyle = "Inconsolata";
+      nodes.machine =
+        { config, ... }:
+        {
+          imports = [
+            ./common/auto.nix
+            ./common/user-account.nix
+          ];
+
+          virtualisation.memorySize = 2047;
+
+          users.users.${user} = {
+            inherit description password;
+            # polkit agent test
+            extraGroups = [ "wheel" ];
+          };
+
+          test-support.displayManager.auto = {
+            enable = true;
+            inherit user;
+          };
+
+          # To control mouse via scripting
+          programs.ydotool.enable = true;
+
+          services.desktopManager.lomiri.enable = lib.mkForce true;
+          services.displayManager.defaultSession = lib.mkForce "lomiri";
+
+          # Help with OCR
+          fonts.packages = [ pkgs.inconsolata ];
+
+          environment = {
+            # Help with OCR
+            etc."xdg/alacritty/alacritty.yml".text = lib.generators.toYAML { } {
+              font = rec {
+                normal.family = "Inconsolata";
+                bold.family = normal.family;
+                italic.family = normal.family;
+                bold_italic.family = normal.family;
+                size = 16;
+              };
+              colors = rec {
+                primary = {
+                  foreground = "0x000000";
+                  background = "0xffffff";
+                };
+                normal = {
+                  green = primary.foreground;
+                };
+              };
+            };
+
+            variables = {
+              # So we can test what content-hub is working behind the scenes
+              CONTENT_HUB_LOGGING_LEVEL = "2";
+            };
+
+            systemPackages = with pkgs; [
+              # For a convenient way of kicking off content-hub peer collection
+              lomiri.content-hub.examples
+            ];
+          };
+
+          # Help with OCR
+          systemd.tmpfiles.settings =
+            let
+              white = "255, 255, 255";
+              black = "0, 0, 0";
+              colorSection = color: {
+                Color = color;
+                Bold = true;
+                Transparency = false;
+              };
+              terminalColors = pkgs.writeText "customized.colorscheme" (
+                lib.generators.toINI { } {
+                  Background = colorSection white;
+                  Foreground = colorSection black;
+                  Color2 = colorSection black;
+                  Color2Intense = colorSection black;
+                }
+              );
+              terminalConfig = pkgs.writeText "terminal.ubports.conf" (
+                lib.generators.toINI { } {
+                  General = {
+                    colorScheme = "customized";
+                    fontSize = "16";
+                    fontStyle = "Inconsolata";
+                  };
+                }
+              );
+              confBase = "${config.users.users.${user}.home}/.config";
+              userDirArgs = {
+                mode = "0700";
+                user = user;
+                group = "users";
+              };
+            in
+            {
+              "10-lomiri-test-setup" = {
+                "${confBase}".d = userDirArgs;
+                "${confBase}/terminal.ubports".d = userDirArgs;
+                "${confBase}/terminal.ubports/customized.colorscheme".L.argument = "${terminalColors}";
+                "${confBase}/terminal.ubports/terminal.ubports.conf".L.argument = "${terminalConfig}";
+              };
+            };
         };
-      });
-      confBase = "${config.users.users.${user}.home}/.config";
-      userDirArgs = {
-        mode = "0700";
-        user = user;
-        group = "users";
-      };
-    in {
-      "10-lomiri-test-setup" = {
-        "${confBase}".d = userDirArgs;
-        "${confBase}/terminal.ubports".d = userDirArgs;
-        "${confBase}/terminal.ubports/customized.colorscheme".L.argument = "${terminalColors}";
-        "${confBase}/terminal.ubports/terminal.ubports.conf".L.argument = "${terminalConfig}";
+
+      enableOCR = true;
+
+      testScript =
+        { nodes, ... }:
+        ''
+          def wait_for_text(text):
+              """
+              Wait for on-screen text, and try to optimise retry count for slow hardware.
+              """
+              machine.sleep(10)
+              machine.wait_for_text(text)
+
+          def toggle_maximise():
+              """
+              Maximise the current window.
+              """
+              machine.send_key("ctrl-meta_l-up")
+
+              # For some reason, Lomiri in these VM tests very frequently opens the starter menu a few seconds after sending the above.
+              # Because this isn't 100% reproducible all the time, and there is no command to await when OCR doesn't pick up some text,
+              # the best we can do is send some Escape input after waiting some arbitrary time and hope that it works out fine.
+              machine.sleep(5)
+              machine.send_key("esc")
+              machine.sleep(5)
+
+          def mouse_click(xpos, ypos):
+              """
+              Move the mouse to a screen location and hit left-click.
+              """
+
+              # Need to reset to top-left, --absolute doesn't work?
+              machine.execute("ydotool mousemove -- -10000 -10000")
+              machine.sleep(2)
+
+              # Move
+              machine.execute(f"ydotool mousemove -- {xpos} {ypos}")
+              machine.sleep(2)
+
+              # Click (C0 - left button: down & up)
+              machine.execute("ydotool click 0xC0")
+              machine.sleep(2)
+
+          def open_starter():
+              """
+              Open the starter, and ensure it's opened.
+              """
+
+              # Using the keybind has a chance of instantly closing the menu again? Just click the button
+              mouse_click(20, 30)
+
+          start_all()
+          machine.wait_for_unit("multi-user.target")
+
+          # The session should start, and not be stuck in i.e. a crash loop
+          with subtest("lomiri starts"):
+              machine.wait_until_succeeds("pgrep -u ${user} -f 'lomiri --mode=full-shell'")
+              # Output rendering from Lomiri has started when it starts printing performance diagnostics
+              machine.wait_for_console_text("Last frame took")
+              # Look for datetime's clock, one of the last elements to load
+              wait_for_text(r"(AM|PM)")
+              machine.screenshot("lomiri_launched")
+
+          # Working terminal keybind is good
+          with subtest("terminal keybind works"):
+              machine.send_key("ctrl-alt-t")
+              wait_for_text(r"(${user}|machine)")
+              machine.screenshot("terminal_opens")
+
+              # lomiri-terminal-app has a separate VM test to test its basic functionality
+
+              # for the LSS content-hub test to work reliably, we need to kick off peer collecting
+              machine.send_chars("content-hub-test-importer\n")
+              wait_for_text(r"(/build/source|hub.cpp|handler.cpp|void|virtual|const)") # awaiting log messages from content-hub
+              machine.send_key("ctrl-c")
+
+              # Doing this here, since we need an in-session shell & separately starting a terminal again wastes time
+              with subtest("polkit agent works"):
+                  machine.send_chars("pkexec touch /tmp/polkit-test\n")
+                  # There's an authentication notification here that gains focus, but we struggle with OCRing it
+                  # Just hope that it's up after a short wait
+                  machine.sleep(10)
+                  machine.screenshot("polkit_agent")
+                  machine.send_chars("${password}")
+                  machine.sleep(2) # Hopefully enough delay to make sure all the password characters have been registered? Maybe just placebo
+                  machine.send_chars("\n")
+                  machine.wait_for_file("/tmp/polkit-test", 10)
+
+              machine.send_key("alt-f4")
+
+          # LSS provides DE settings
+          with subtest("system settings open"):
+              open_starter()
+              machine.send_chars("System Settings\n")
+              wait_for_text("Rotation Lock")
+              machine.screenshot("settings_open")
+
+              # lomiri-system-settings has a separate VM test, only test Lomiri-specific content-hub functionalities here
+
+              # Make fullscreen, can't navigate to Background plugin via keyboard unless window has non-phone-like aspect ratio
+              toggle_maximise()
+
+              # Load Background plugin
+              machine.send_key("tab")
+              machine.send_key("tab")
+              machine.send_key("tab")
+              machine.send_key("tab")
+              machine.send_key("tab")
+              machine.send_key("tab")
+              machine.send_key("ret")
+              wait_for_text("Background image")
+
+              # Try to load custom background
+              machine.send_key("shift-tab")
+              machine.send_key("shift-tab")
+              machine.send_key("shift-tab")
+              machine.send_key("shift-tab")
+              machine.send_key("shift-tab")
+              machine.send_key("shift-tab")
+              machine.send_key("ret")
+
+              # Peers should be loaded
+              wait_for_text("Morph") # or Gallery, but Morph is already packaged
+              machine.screenshot("settings_content-hub_peers")
+
+              # Select Morph as content source
+              mouse_click(370, 100)
+
+              # Expect Morph to be brought into the foreground, with its Downloads page open
+              wait_for_text("No downloads")
+
+              # If content-hub encounters a problem, it may have crashed the original application issuing the request.
+              # Check that it's still alive
+              machine.succeed("pgrep -u ${user} -f lomiri-system-settings")
+
+              machine.screenshot("content-hub_exchange")
+
+              # Testing any more would require more applications & setup, the fact that it's already being attempted is a good sign
+              machine.send_key("esc")
+
+              machine.sleep(2) # sleep a tiny bit so morph can close & the focus can return to LSS
+              machine.send_key("alt-f4")
+        '';
+    }
+  );
+
+  desktop-ayatana-indicators = makeTest (
+    { pkgs, lib, ... }:
+    {
+      name = "lomiri-desktop-ayatana-indicators";
+
+      meta = {
+        maintainers = lib.teams.lomiri.members;
       };
-    };
-  };
-
-  enableOCR = true;
-
-  testScript = { nodes, ... }: ''
-    def open_starter():
-        """
-        Open the starter, and ensure it's opened.
-        """
-        machine.send_key("meta_l-a")
-        # Look for any of the default apps
-        machine.wait_for_text(r"(Search|System|Settings|Morph|Browser|Terminal|Alacritty)")
-
-    def toggle_maximise():
-        """
-        Send the keybind to maximise the current window.
-        """
-        machine.send_key("ctrl-meta_l-up")
-
-        # For some reason, Lomiri in these VM tests very frequently opens the starter menu a few seconds after sending the above.
-        # Because this isn't 100% reproducible all the time, and there is no command to await when OCR doesn't pick up some text,
-        # the best we can do is send some Escape input after waiting some arbitrary time and hope that it works out fine.
-        machine.sleep(5)
-        machine.send_key("esc")
-        machine.sleep(5)
-
-    start_all()
-    machine.wait_for_unit("multi-user.target")
-
-    # Lomiri in greeter mode should work & be able to start a session
-    with subtest("lomiri greeter works"):
-        machine.wait_for_unit("display-manager.service")
-        # Start page shows current tie
-        machine.wait_for_text(r"(AM|PM)")
-        machine.screenshot("lomiri_greeter_launched")
-
-        # Advance to login part
-        machine.send_key("ret")
-        machine.wait_for_text("${description}")
-        machine.screenshot("lomiri_greeter_login")
-
-        # Login
-        machine.send_chars("${password}\n")
-        # Best way I can think of to differenciate "Lomiri in LightDM greeter mode" from "Lomiri in user shell mode"
-        machine.wait_until_succeeds("pgrep -u ${user} -f 'lomiri --mode=full-shell'")
-
-    # The session should start, and not be stuck in i.e. a crash loop
-    with subtest("lomiri starts"):
-        # Output rendering from Lomiri has started when it starts printing performance diagnostics
-        machine.wait_for_console_text("Last frame took")
-        # Look for datetime's clock, one of the last elements to load
-        machine.wait_for_text(r"(AM|PM)")
-        machine.screenshot("lomiri_launched")
-
-    # Working terminal keybind is good
-    with subtest("terminal keybind works"):
-        machine.send_key("ctrl-alt-t")
-        machine.wait_for_text(r"(${user}|machine)")
-        machine.screenshot("terminal_opens")
-
-        # lomiri-terminal-app has a separate VM test to test its basic functionality
-
-        # for the LSS content-hub test to work reliably, we need to kick off peer collecting
-        machine.send_chars("content-hub-test-importer\n")
-        machine.wait_for_text(r"(/build/source|hub.cpp|handler.cpp|void|virtual|const)") # awaiting log messages from content-hub
-        machine.send_key("ctrl-c")
-
-        machine.send_key("alt-f4")
-
-    # We want the ability to launch applications
-    with subtest("starter menu works"):
-        open_starter()
-        machine.screenshot("starter_opens")
-
-        # Just try the terminal again, we know that it should work
-        machine.send_chars("Terminal\n")
-        machine.wait_for_text(r"(${user}|machine)")
-        machine.send_key("alt-f4")
-
-    # We want support for X11 apps
-    with subtest("xwayland support works"):
-        open_starter()
-        machine.send_chars("Alacritty\n")
-        machine.wait_for_text(r"(${user}|machine)")
-        machine.screenshot("alacritty_opens")
-        machine.send_key("alt-f4")
-
-    # LSS provides DE settings
-    with subtest("system settings open"):
-        open_starter()
-        machine.send_chars("System Settings\n")
-        machine.wait_for_text("Rotation Lock")
-        machine.screenshot("settings_open")
-
-        # lomiri-system-settings has a separate VM test, only test Lomiri-specific content-hub functionalities here
-
-        # Make fullscreen, can't navigate to Background plugin via keyboard unless window has non-phone-like aspect ratio
-        toggle_maximise()
-
-        # Load Background plugin
-        machine.send_key("tab")
-        machine.send_key("tab")
-        machine.send_key("tab")
-        machine.send_key("tab")
-        machine.send_key("tab")
-        machine.send_key("tab")
-        machine.send_key("ret")
-        machine.wait_for_text("Background image")
-
-        # Try to load custom background
-        machine.send_key("shift-tab")
-        machine.send_key("shift-tab")
-        machine.send_key("shift-tab")
-        machine.send_key("shift-tab")
-        machine.send_key("shift-tab")
-        machine.send_key("shift-tab")
-        machine.send_key("ret")
-
-        # Peers should be loaded
-        machine.wait_for_text("Morph") # or Gallery, but Morph is already packaged
-        machine.screenshot("settings_content-hub_peers")
-
-        # Sadly, it doesn't seem possible to actually select a peer and attempt a content-hub data exchange with just the keyboard
-
-        machine.send_key("alt-f4")
-
-    # Morph is how we go online
-    with subtest("morph browser works"):
-        open_starter()
-        machine.send_chars("Morph\n")
-        machine.wait_for_text(r"(Bookmarks|address|site|visited any)")
-        machine.screenshot("morph_open")
-
-        # morph-browser has a separate VM test, there isn't anything new we could test here
-
-        machine.send_key("alt-f4")
-
-    # The ayatana indicators are an important part of the experience, and they hold the only graphical way of exiting the session.
-    # Reaching them via the intended way requires wayland mouse control, but ydotool lacks a module for its daemon:
-    # https://github.com/NixOS/nixpkgs/issues/183659
-    # Luckily, there's a test app that also displays their contents, but it's abit inconsistent. Hopefully this is *good-enough*.
-    with subtest("ayatana indicators work"):
-        open_starter()
-        machine.send_chars("Indicators\n")
-        machine.wait_for_text(r"(Indicators|Client|List|network|datetime|session)")
-        machine.screenshot("indicators_open")
-
-        # Element tab order within the indicator menus is not fully deterministic
-        # Only check that the indicators are listed & their items load
-
-        with subtest("lomiri indicator network works"):
-            # Select indicator-network
-            machine.send_key("tab")
-            # Don't go further down, first entry
-            machine.send_key("ret")
-            machine.wait_for_text(r"(Flight|Wi-Fi)")
-            machine.screenshot("indicators_network")
-
-        machine.send_key("shift-tab")
-        machine.send_key("ret")
-        machine.wait_for_text(r"(Indicators|Client|List|network|datetime|session)")
-
-        with subtest("ayatana indicator datetime works"):
-            # Select ayatana-indicator-datetime
-            machine.send_key("tab")
-            machine.send_key("down")
-            machine.send_key("ret")
-            machine.wait_for_text("Time and Date Settings")
-            machine.screenshot("indicators_timedate")
-
-        machine.send_key("shift-tab")
-        machine.send_key("ret")
-        machine.wait_for_text(r"(Indicators|Client|List|network|datetime|session)")
-
-        with subtest("ayatana indicator session works"):
-            # Select ayatana-indicator-session
-            machine.send_key("tab")
-            machine.send_key("down")
-            machine.send_key("ret")
-            machine.wait_for_text("Log Out")
-            machine.screenshot("indicators_session")
-  '';
-})
+
+      nodes.machine =
+        { config, ... }:
+        {
+          imports = [
+            ./common/auto.nix
+            ./common/user-account.nix
+          ];
+
+          virtualisation.memorySize = 2047;
+
+          users.users.${user} = {
+            inherit description password;
+          };
+
+          test-support.displayManager.auto = {
+            enable = true;
+            inherit user;
+          };
+
+          # To control mouse via scripting
+          programs.ydotool.enable = true;
+
+          services.desktopManager.lomiri.enable = lib.mkForce true;
+          services.displayManager.defaultSession = lib.mkForce "lomiri";
+
+          # Help with OCR
+          fonts.packages = [ pkgs.inconsolata ];
+
+          environment.systemPackages = with pkgs; [ qt5.qttools ];
+        };
+
+      enableOCR = true;
+
+      testScript =
+        { nodes, ... }:
+        ''
+          def wait_for_text(text):
+              """
+              Wait for on-screen text, and try to optimise retry count for slow hardware.
+              """
+              machine.sleep(10)
+              machine.wait_for_text(text)
+
+          def mouse_click(xpos, ypos):
+              """
+              Move the mouse to a screen location and hit left-click.
+              """
+
+              # Need to reset to top-left, --absolute doesn't work?
+              machine.execute("ydotool mousemove -- -10000 -10000")
+              machine.sleep(2)
+
+              # Move
+              machine.execute(f"ydotool mousemove -- {xpos} {ypos}")
+              machine.sleep(2)
+
+              # Click (C0 - left button: down & up)
+              machine.execute("ydotool click 0xC0")
+              machine.sleep(2)
+
+          start_all()
+          machine.wait_for_unit("multi-user.target")
+
+          # The session should start, and not be stuck in i.e. a crash loop
+          with subtest("lomiri starts"):
+              machine.wait_until_succeeds("pgrep -u ${user} -f 'lomiri --mode=full-shell'")
+              # Output rendering from Lomiri has started when it starts printing performance diagnostics
+              machine.wait_for_console_text("Last frame took")
+              # Look for datetime's clock, one of the last elements to load
+              wait_for_text(r"(AM|PM)")
+              machine.screenshot("lomiri_launched")
+
+          # The ayatana indicators are an important part of the experience, and they hold the only graphical way of exiting the session.
+          # There's a test app we could use that also displays their contents, but it's abit inconsistent.
+          with subtest("ayatana indicators work"):
+              mouse_click(735, 0) # the cog in the top-right, for the session indicator
+              wait_for_text(r"(Notifications|Rotation|Battery|Sound|Time|Date|System)")
+              machine.screenshot("indicators_open")
+
+              # Indicator order within the menus *should* be fixed based on per-indicator order setting
+              # Session is the one we clicked, but the last we should test (logout). Go as far left as we can test.
+              machine.send_key("left")
+              machine.send_key("left")
+              machine.send_key("left")
+              machine.send_key("left")
+              machine.send_key("left")
+              machine.send_key("left")
+              # Notifications are usually empty, nothing to check there
+
+              with subtest("ayatana indicator display works"):
+                  # We start on this, don't go right
+                  wait_for_text("Lock")
+                  machine.screenshot("indicators_display")
+
+              with subtest("ayatana indicator bluetooth works"):
+                  machine.send_key("right")
+                  wait_for_text("Bluetooth settings")
+                  machine.screenshot("indicators_bluetooth")
+
+              with subtest("lomiri indicator network works"):
+                  machine.send_key("right")
+                  wait_for_text(r"(Flight|Wi-Fi)")
+                  machine.screenshot("indicators_network")
+
+              with subtest("ayatana indicator sound works"):
+                  machine.send_key("right")
+                  wait_for_text(r"(Silent|Volume)")
+                  machine.screenshot("indicators_sound")
+
+              with subtest("ayatana indicator power works"):
+                  machine.send_key("right")
+                  wait_for_text(r"(Charge|Battery settings)")
+                  machine.screenshot("indicators_power")
+
+              with subtest("ayatana indicator datetime works"):
+                  machine.send_key("right")
+                  wait_for_text("Time and Date Settings")
+                  machine.screenshot("indicators_timedate")
+
+              with subtest("ayatana indicator session works"):
+                  machine.send_key("right")
+                  wait_for_text("Log Out")
+                  machine.screenshot("indicators_session")
+
+                  # We should be able to log out and return to the greeter
+                  mouse_click(720, 280) # "Log Out"
+                  mouse_click(400, 240) # confirm logout
+                  machine.wait_until_fails("pgrep -u ${user} -f 'lomiri --mode=full-shell'")
+        '';
+    }
+  );
+
+}
diff --git a/nixos/tests/lorri/default.nix b/nixos/tests/lorri/default.nix
index a4bdc92490ce1..e9e26c03f6ca1 100644
--- a/nixos/tests/lorri/default.nix
+++ b/nixos/tests/lorri/default.nix
@@ -17,12 +17,12 @@ import ../make-test-python.nix {
 
     # Start the daemon and wait until it is ready
     machine.execute("lorri daemon > lorri.stdout 2> lorri.stderr &")
-    machine.wait_until_succeeds("grep --fixed-strings 'ready' lorri.stdout")
+    machine.wait_until_succeeds("grep --fixed-strings 'ready' lorri.stderr")
 
     # Ping the daemon
-    machine.succeed("lorri internal ping shell.nix")
+    machine.succeed("lorri internal ping --shell-file shell.nix")
 
     # Wait for the daemon to finish the build
-    machine.wait_until_succeeds("grep --fixed-strings 'Completed' lorri.stdout")
+    machine.wait_until_succeeds("grep --fixed-strings 'Completed' lorri.stderr")
   '';
 }
diff --git a/nixos/tests/lvm2/default.nix b/nixos/tests/lvm2/default.nix
index 84f24cbc38593..346ec6739501d 100644
--- a/nixos/tests/lvm2/default.nix
+++ b/nixos/tests/lvm2/default.nix
@@ -10,7 +10,7 @@ let
   tests = let callTest = p: lib.flip (import p) { inherit system pkgs; }; in {
     thinpool = { test = callTest ./thinpool.nix; kernelFilter = lib.id; };
     # we would like to test all versions, but the kernel module currently does not compile against the other versions
-    vdo = { test = callTest ./vdo.nix; kernelFilter = lib.filter (v: v == "5.15"); };
+    vdo = { test = callTest ./vdo.nix; kernelFilter = lib.filter (v: v == "latest"); };
 
 
     # systemd in stage 1
@@ -26,7 +26,7 @@ let
     };
     vdo-sd-stage-1 = {
       test = callTest ./systemd-stage-1.nix;
-      kernelFilter = lib.filter (v: v == "5.15");
+      kernelFilter = lib.filter (v: v == "latest");
       flavour = "vdo";
     };
   };
diff --git a/nixos/tests/lvm2/systemd-stage-1.nix b/nixos/tests/lvm2/systemd-stage-1.nix
index 7f106e1b0dd64..fe57a615a9555 100644
--- a/nixos/tests/lvm2/systemd-stage-1.nix
+++ b/nixos/tests/lvm2/systemd-stage-1.nix
@@ -81,7 +81,17 @@ in import ../make-test-python.nix ({ pkgs, lib, ... }: {
       kernelPackages = lib.mkIf (kernelPackages != null) kernelPackages;
     };
 
-    specialisation.boot-lvm.configuration.virtualisation.rootDevice = "/dev/test_vg/test_lv";
+    specialisation.boot-lvm.configuration.virtualisation = {
+      useDefaultFilesystems = false;
+      fileSystems = {
+        "/" = {
+          device = "/dev/test_vg/test_lv";
+          fsType = "xfs";
+        };
+      };
+
+      rootDevice = "/dev/test_vg/test_lv";
+    };
   };
 
   testScript = ''
@@ -99,7 +109,7 @@ in import ../make-test-python.nix ({ pkgs, lib, ... }: {
 
     # Ensure we have successfully booted from LVM
     assert "(initrd)" in machine.succeed("systemd-analyze")  # booted with systemd in stage 1
-    assert "/dev/mapper/test_vg-test_lv on / type ext4" in machine.succeed("mount")
+    assert "/dev/mapper/test_vg-test_lv on / type xfs" in machine.succeed("mount")
     assert "hello" in machine.succeed("cat /test")
     ${extraCheck}
   '';
diff --git a/nixos/tests/lxc/default.nix b/nixos/tests/lxc/default.nix
new file mode 100644
index 0000000000000..0f67010863efc
--- /dev/null
+++ b/nixos/tests/lxc/default.nix
@@ -0,0 +1,124 @@
+import ../make-test-python.nix (
+  { pkgs, lib, ... }:
+
+  let
+    releases = import ../../release.nix {
+      configuration = {
+        # Building documentation makes the test unnecessarily take a longer time:
+        documentation.enable = lib.mkForce false;
+      };
+    };
+
+    lxc-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
+    lxc-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+
+  in
+  {
+    name = "lxc-container-unprivileged";
+
+    meta = {
+      maintainers = lib.teams.lxc.members;
+    };
+
+    nodes.machine =
+      { lib, pkgs, ... }:
+      {
+        virtualisation = {
+          diskSize = 6144;
+          cores = 2;
+          memorySize = 512;
+          writableStore = true;
+
+          lxc = {
+            enable = true;
+            unprivilegedContainers = true;
+            systemConfig = ''
+              lxc.lxcpath = /tmp/lxc
+            '';
+            defaultConfig = ''
+              lxc.net.0.type = veth
+              lxc.net.0.link = lxcbr0
+              lxc.net.0.flags = up
+              lxc.net.0.hwaddr = 00:16:3e:xx:xx:xx
+              lxc.idmap = u 0 100000 65536
+              lxc.idmap = g 0 100000 65536
+            '';
+            # Permit user alice to connect to bridge
+            usernetConfig = ''
+              @lxc-user veth lxcbr0 10
+            '';
+            bridgeConfig = ''
+              LXC_IPV6_ADDR=""
+              LXC_IPV6_MASK=""
+              LXC_IPV6_NETWORK=""
+              LXC_IPV6_NAT="false"
+            '';
+          };
+        };
+
+        # Needed for lxc
+        environment.systemPackages = with pkgs; [
+          pkgs.wget
+          pkgs.dnsmasq
+        ];
+
+        # Create user for test
+        users.users.alice = {
+          isNormalUser = true;
+          password = "test";
+          description = "Lxc unprivileged user with access to lxcbr0";
+          extraGroups = [ "lxc-user" ];
+          subGidRanges = [
+            {
+              startGid = 100000;
+              count = 65536;
+            }
+          ];
+          subUidRanges = [
+            {
+              startUid = 100000;
+              count = 65536;
+            }
+          ];
+        };
+
+        users.users.bob = {
+          isNormalUser = true;
+          password = "test";
+          description = "Lxc unprivileged user without access to lxcbr0";
+          subGidRanges = [
+            {
+              startGid = 100000;
+              count = 65536;
+            }
+          ];
+          subUidRanges = [
+            {
+              startUid = 100000;
+              count = 65536;
+            }
+          ];
+        };
+      };
+
+    testScript = ''
+      machine.wait_for_unit("lxc-net.service")
+
+      # Copy config files for alice
+      machine.execute("su -- alice -c 'mkdir -p ~/.config/lxc'")
+      machine.execute("su -- alice -c 'cp /etc/lxc/default.conf ~/.config/lxc/'")
+      machine.execute("su -- alice -c 'cp /etc/lxc/lxc.conf ~/.config/lxc/'")
+
+      machine.succeed("su -- alice -c 'lxc-create -t local -n test -- --metadata ${lxc-image-metadata}/*/*.tar.xz --fstree ${lxc-image-rootfs}/*/*.tar.xz'")
+      machine.succeed("su -- alice -c 'lxc-start test'")
+      machine.succeed("su -- alice -c 'lxc-stop test'")
+
+      # Copy config files for bob
+      machine.execute("su -- bob -c 'mkdir -p ~/.config/lxc'")
+      machine.execute("su -- bob -c 'cp /etc/lxc/default.conf ~/.config/lxc/'")
+      machine.execute("su -- bob -c 'cp /etc/lxc/lxc.conf ~/.config/lxc/'")
+
+      machine.fail("su -- bob -c 'lxc-start test'")
+    '';
+  }
+)
diff --git a/nixos/tests/lxd/container.nix b/nixos/tests/lxd/container.nix
index ef9c3f4bbee7e..c04ae42afb8c2 100644
--- a/nixos/tests/lxd/container.nix
+++ b/nixos/tests/lxd/container.nix
@@ -18,10 +18,6 @@ let
 in {
   name = "lxd-container";
 
-  meta = {
-    maintainers = lib.teams.lxc.members;
-  };
-
   nodes.machine = { lib, ... }: {
     virtualisation = {
       diskSize = 6144;
@@ -68,7 +64,7 @@ in {
 
     with subtest("Squashfs image is functional"):
         machine.succeed(
-            "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs-squashfs} --alias nixos-squashfs"
+            "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs-squashfs}/nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}.squashfs --alias nixos-squashfs"
         )
         machine.succeed("lxc launch nixos-squashfs container")
         with machine.nested("Waiting for instance to start and be usable"):
diff --git a/nixos/tests/lxd/nftables.nix b/nixos/tests/lxd/nftables.nix
index e6ce4089d719d..d419f9b66af13 100644
--- a/nixos/tests/lxd/nftables.nix
+++ b/nixos/tests/lxd/nftables.nix
@@ -8,10 +8,6 @@
 import ../make-test-python.nix ({ pkgs, lib, ...} : {
   name = "lxd-nftables";
 
-  meta = {
-    maintainers = lib.teams.lxc.members;
-  };
-
   nodes.machine = { lib, ... }: {
     virtualisation = {
       lxd.enable = true;
diff --git a/nixos/tests/lxd/preseed.nix b/nixos/tests/lxd/preseed.nix
index fb80dcf3893e4..2e0ff33d521fe 100644
--- a/nixos/tests/lxd/preseed.nix
+++ b/nixos/tests/lxd/preseed.nix
@@ -3,10 +3,6 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
 {
   name = "lxd-preseed";
 
-  meta = {
-    maintainers = lib.teams.lxc.members;
-  };
-
   nodes.machine = { lib, ... }: {
     virtualisation = {
       diskSize = 4096;
diff --git a/nixos/tests/lxd/ui.nix b/nixos/tests/lxd/ui.nix
index c442f44ab81cd..f96c3d74d93cd 100644
--- a/nixos/tests/lxd/ui.nix
+++ b/nixos/tests/lxd/ui.nix
@@ -1,10 +1,6 @@
-import ../make-test-python.nix ({ pkgs, lib, ... }: {
+import ../make-test-python.nix ({ pkgs, ... }: {
   name = "lxd-ui";
 
-  meta = {
-    maintainers = lib.teams.lxc.members;
-  };
-
   nodes.machine = { lib, ... }: {
     virtualisation = {
       lxd.enable = true;
diff --git a/nixos/tests/lxd/virtual-machine.nix b/nixos/tests/lxd/virtual-machine.nix
index 2a9dd8fcdbf61..14c5e8a82aa8f 100644
--- a/nixos/tests/lxd/virtual-machine.nix
+++ b/nixos/tests/lxd/virtual-machine.nix
@@ -18,10 +18,6 @@ let
 in {
   name = "lxd-virtual-machine";
 
-  meta = {
-    maintainers = lib.teams.lxc.members;
-  };
-
   nodes.machine = {lib, ...}: {
     virtualisation = {
       diskSize = 4096;
diff --git a/nixos/tests/ly.nix b/nixos/tests/ly.nix
new file mode 100644
index 0000000000000..04c6ed9c7774b
--- /dev/null
+++ b/nixos/tests/ly.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix (
+  { ... }:
+
+  {
+    name = "ly";
+
+    nodes.machine =
+      { ... }:
+      {
+        imports = [ ./common/user-account.nix ];
+        services.displayManager.ly = {
+          enable = true;
+          settings = {
+            load = false;
+            save = false;
+          };
+        };
+        services.xserver.enable = true;
+        services.displayManager.defaultSession = "none+icewm";
+        services.xserver.windowManager.icewm.enable = true;
+      };
+
+    testScript =
+      { nodes, ... }:
+      let
+        user = nodes.machine.users.users.alice;
+      in
+      ''
+        start_all()
+        machine.wait_until_tty_matches("2", "password:")
+        machine.send_key("ctrl-alt-f2")
+        machine.sleep(1)
+        machine.screenshot("ly")
+        machine.send_chars("alice")
+        machine.send_key("tab")
+        machine.send_chars("${user.password}")
+        machine.send_key("ret")
+        machine.wait_for_file("/run/user/${toString user.uid}/lyxauth")
+        machine.succeed("xauth merge /run/user/${toString user.uid}/lyxauth")
+        machine.wait_for_window("^IceWM ")
+        machine.screenshot("icewm")
+      '';
+  }
+)
diff --git a/nixos/tests/mailpit.nix b/nixos/tests/mailpit.nix
new file mode 100644
index 0000000000000..887f700ae6843
--- /dev/null
+++ b/nixos/tests/mailpit.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+  {
+    name = "mailpit";
+    meta.maintainers = lib.teams.flyingcircus.members;
+
+    nodes.machine =
+      { pkgs, ... }:
+      {
+        services.mailpit.instances.default = { };
+
+        environment.systemPackages = with pkgs; [ swaks ];
+      };
+
+    testScript = ''
+      start_all()
+
+      from json import loads
+
+      machine.wait_for_unit("mailpit-default.service")
+      machine.wait_for_open_port(1025)
+      machine.wait_for_open_port(8025)
+      machine.succeed(
+          'echo "this is the body of the email" | swaks --to root@example.org --body - --server localhost:1025'
+      )
+
+      received = loads(machine.succeed("curl http://localhost:8025/api/v1/messages"))
+      assert received['total'] == 1
+      message = received["messages"][0]
+      assert len(message['To']) == 1
+      assert message['To'][0]['Address'] == 'root@example.org'
+      assert "this is the body of the email" in message['Snippet']
+    '';
+  }
+)
diff --git a/nixos/tests/mate-wayland.nix b/nixos/tests/mate-wayland.nix
index e5c96d2af7470..73f94ababc493 100644
--- a/nixos/tests/mate-wayland.nix
+++ b/nixos/tests/mate-wayland.nix
@@ -20,8 +20,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     };
     services.xserver.desktopManager.mate.enableWaylandSession = true;
 
-    hardware.pulseaudio.enable = true;
-
     # Need to switch to a different GPU driver than the default one (-vga std) so that wayfire can launch:
     virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
   };
@@ -41,7 +39,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       with subtest("Check if MATE session components actually start"):
           for i in ["wayfire", "mate-panel", "mate-wayland.sh", "mate-wayland-components.sh"]:
               machine.wait_until_succeeds(f"pgrep -f {i}")
-          machine.wait_for_text('(Applications|Places|System)')
           # It is expected that this applet doesn't work in Wayland
           machine.wait_for_text('WorkspaceSwitcherApplet')
 
diff --git a/nixos/tests/mate.nix b/nixos/tests/mate.nix
index 1252ec43cf3d5..0cadd6d0d5bff 100644
--- a/nixos/tests/mate.nix
+++ b/nixos/tests/mate.nix
@@ -21,10 +21,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     };
 
     services.xserver.desktopManager.mate.enable = true;
-
-    # Silence log spam due to no sound drivers loaded:
-    # ALSA lib confmisc.c:855:(parse_card) cannot find card '0'
-    hardware.pulseaudio.enable = true;
   };
 
   enableOCR = true;
diff --git a/nixos/tests/matomo.nix b/nixos/tests/matomo.nix
index 130f3dd8485a3..cf54f71b738fc 100644
--- a/nixos/tests/matomo.nix
+++ b/nixos/tests/matomo.nix
@@ -41,14 +41,14 @@ let
 in {
   matomo = matomoTest pkgs.matomo // {
     name = "matomo";
-    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ];
+    meta.maintainers = with maintainers; [ florianjacob mmilata twey boozedog ];
   };
   matomo-beta = matomoTest pkgs.matomo-beta // {
     name = "matomo-beta";
-    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ];
+    meta.maintainers = with maintainers; [ florianjacob mmilata twey boozedog ];
   };
   matomo_5 = matomoTest pkgs.matomo_5 // {
     name = "matomo-5";
-    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ] ++ lib.teams.flyingcircus.members;
+    meta.maintainers = with maintainers; [ florianjacob mmilata twey boozedog ] ++ lib.teams.flyingcircus.members;
   };
 }
diff --git a/nixos/tests/matrix/appservice-irc.nix b/nixos/tests/matrix/appservice-irc.nix
index 78c53024ca6c4..41a6b005064fd 100644
--- a/nixos/tests/matrix/appservice-irc.nix
+++ b/nixos/tests/matrix/appservice-irc.nix
@@ -1,4 +1,4 @@
-import ../make-test-python.nix ({ pkgs, ... }:
+{ pkgs, ... }:
   let
     homeserverUrl = "http://homeserver:8008";
   in
@@ -9,7 +9,7 @@ import ../make-test-python.nix ({ pkgs, ... }:
     };
 
     nodes = {
-      homeserver = { pkgs, ... }: {
+      homeserver = {
         # We'll switch to this once the config is copied into place
         specialisation.running.configuration = {
           services.matrix-synapse = {
@@ -46,7 +46,7 @@ import ../make-test-python.nix ({ pkgs, ... }:
         };
       };
 
-      ircd = { pkgs, ... }: {
+      ircd = {
         services.ngircd = {
           enable = true;
           config = ''
@@ -75,12 +75,18 @@ import ../make-test-python.nix ({ pkgs, ... }:
             homeserver.url = homeserverUrl;
             homeserver.domain = "homeserver";
 
-            ircService.servers."ircd" = {
-              name = "IRCd";
-              port = 6667;
-              dynamicChannels = {
-                enabled = true;
-                aliasTemplate = "#irc_$CHANNEL";
+            ircService = {
+              servers."ircd" = {
+                name = "IRCd";
+                port = 6667;
+                dynamicChannels = {
+                  enabled = true;
+                  aliasTemplate = "#irc_$CHANNEL";
+                };
+              };
+              mediaProxy = {
+                publicUrl = "http://localhost:11111/media";
+                ttl = 0;
               };
             };
           };
@@ -203,6 +209,8 @@ import ../make-test-python.nix ({ pkgs, ... }:
       with subtest("start the appservice"):
           appservice.wait_for_unit("matrix-appservice-irc.service")
           appservice.wait_for_open_port(8009)
+          appservice.wait_for_file("/var/lib/matrix-appservice-irc/media-signingkey.jwk")
+          appservice.wait_for_open_port(11111)
 
       with subtest("copy the registration file"):
           appservice.copy_from_vm("/var/lib/matrix-appservice-irc/registration.yml")
@@ -222,4 +230,4 @@ import ../make-test-python.nix ({ pkgs, ... }:
       with subtest("ensure messages can be exchanged"):
           client.succeed("do_test ${homeserverUrl} >&2")
     '';
-  })
+  }
diff --git a/nixos/tests/matrix/mjolnir.nix b/nixos/tests/matrix/mjolnir.nix
index 8a888b17a3d7c..265b13821ebd8 100644
--- a/nixos/tests/matrix/mjolnir.nix
+++ b/nixos/tests/matrix/mjolnir.nix
@@ -109,8 +109,8 @@ import ../make-test-python.nix (
         environment.systemPackages = [
           (pkgs.writers.writePython3Bin "create_management_room_and_invite_mjolnir"
             { libraries = with pkgs.python3Packages; [
-                matrix-nio
-              ] ++ matrix-nio.optional-dependencies.e2e;
+                (matrix-nio.override { withOlm = true; })
+              ];
             } ''
             import asyncio
 
diff --git a/nixos/tests/mealie.nix b/nixos/tests/mealie.nix
index 88f749c712948..810d47ecd2ec4 100644
--- a/nixos/tests/mealie.nix
+++ b/nixos/tests/mealie.nix
@@ -3,7 +3,7 @@ import ./make-test-python.nix ({ pkgs, ...} :
 {
   name = "mealie";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ litchipi ];
+    maintainers = [ litchipi anoa ];
   };
 
   nodes = {
diff --git a/nixos/tests/mediamtx.nix b/nixos/tests/mediamtx.nix
index 8cacd02631d95..f40c4a8cb5832 100644
--- a/nixos/tests/mediamtx.nix
+++ b/nixos/tests/mediamtx.nix
@@ -1,57 +1,60 @@
-import ./make-test-python.nix ({ pkgs, lib, ...} :
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
 
-{
-  name = "mediamtx";
-  meta.maintainers = with lib.maintainers; [ fpletz ];
+  {
+    name = "mediamtx";
+    meta.maintainers = with lib.maintainers; [ fpletz ];
 
-  nodes = {
-    machine = { config, ... }: {
-      services.mediamtx = {
-        enable = true;
-        settings = {
-          metrics = true;
-          paths.all.source = "publisher";
+    nodes = {
+      machine = {
+        services.mediamtx = {
+          enable = true;
+          settings = {
+            metrics = true;
+            paths.all.source = "publisher";
+          };
         };
-      };
 
-      systemd.services.rtmp-publish = {
-        description = "Publish an RTMP stream to mediamtx";
-        after = [ "mediamtx.service" ];
-        bindsTo = [ "mediamtx.service" ];
-        wantedBy = [ "multi-user.target" ];
-        serviceConfig = {
-          DynamicUser = true;
-          Restart = "on-failure";
-          RestartSec = "1s";
-          TimeoutStartSec = "10s";
-          ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -re -f lavfi -i smptebars=size=800x600:rate=10 -c libx264 -f flv rtmp://localhost:1935/test";
+        systemd.services.rtmp-publish = {
+          description = "Publish an RTMP stream to mediamtx";
+          after = [ "mediamtx.service" ];
+          bindsTo = [ "mediamtx.service" ];
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            Restart = "on-failure";
+            RestartSec = "1s";
+            TimeoutStartSec = "10s";
+            ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -re -f lavfi -i smptebars=size=800x600:rate=10 -c libx264 -f flv rtmp://localhost:1935/test";
+          };
         };
-      };
 
-      systemd.services.rtmp-receive = {
-        description = "Receive an RTMP stream from mediamtx";
-        after = [ "rtmp-publish.service" ];
-        bindsTo = [ "rtmp-publish.service" ];
-        wantedBy = [ "multi-user.target" ];
-        serviceConfig = {
-          DynamicUser = true;
-          Restart = "on-failure";
-          RestartSec = "1s";
-          TimeoutStartSec = "10s";
-          ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -y -re -i rtmp://localhost:1935/test -f flv /dev/null";
+        systemd.services.rtmp-receive = {
+          description = "Receive an RTMP stream from mediamtx";
+          after = [ "rtmp-publish.service" ];
+          bindsTo = [ "rtmp-publish.service" ];
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            Restart = "on-failure";
+            RestartSec = "1s";
+            TimeoutStartSec = "10s";
+            ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -y -re -i rtmp://localhost:1935/test -f flv /dev/null";
+          };
         };
       };
     };
-  };
 
-  testScript = ''
-    start_all()
+    testScript = ''
+      start_all()
 
-    machine.wait_for_unit("mediamtx.service")
-    machine.wait_for_unit("rtmp-publish.service")
-    machine.wait_for_unit("rtmp-receive.service")
-    machine.wait_for_open_port(9998)
-    machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"publish\".*1$'")
-    machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"read\".*1$'")
-  '';
-})
+      machine.wait_for_unit("mediamtx.service")
+      machine.wait_for_unit("rtmp-publish.service")
+      machine.sleep(10)
+      machine.wait_for_unit("rtmp-receive.service")
+      machine.wait_for_open_port(9998)
+      machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"publish\".*1$'")
+      machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"read\".*1$'")
+    '';
+  }
+)
diff --git a/nixos/tests/mediatomb.nix b/nixos/tests/mediatomb.nix
index 9c84aa3e92a5d..5718a9a4a2992 100644
--- a/nixos/tests/mediatomb.nix
+++ b/nixos/tests/mediatomb.nix
@@ -30,15 +30,22 @@ import ./make-test-python.nix {
     client = {};
   };
 
-  testScript = ''
-    start_all()
+  testScript = { nodes, ... }:
+    let
+      serverIP = nodes.server.networking.primaryIPAddress;
+      serverIPv6 = nodes.server.networking.primaryIPv6Address;
+    in
+    ''
+      start_all()
 
-    server.wait_for_unit("mediatomb")
-    server.wait_until_succeeds("nc -z 192.168.1.2 49152")
-    server.succeed("curl -v --fail http://server:49152/")
+      server.wait_for_unit("mediatomb")
+      server.wait_until_succeeds("nc -z ${serverIP} 49152")
+      server.succeed("curl -v --fail http://${serverIP}:49152/")
+      server.succeed("curl -v --fail http://[${serverIPv6}]:49152/")
 
-    client.wait_for_unit("multi-user.target")
-    page = client.succeed("curl -v --fail http://server:49152/")
-    assert "Gerbera" in page and "MediaTomb" not in page
-  '';
+      client.wait_for_unit("multi-user.target")
+      page = client.succeed("curl -v --fail http://${serverIP}:49152/")
+      page = client.succeed("curl -v --fail http://[${serverIPv6}]:49152/")
+      assert "Gerbera" in page and "MediaTomb" not in page
+    '';
 }
diff --git a/nixos/tests/miracle-wm.nix b/nixos/tests/miracle-wm.nix
new file mode 100644
index 0000000000000..2bb62222b22a1
--- /dev/null
+++ b/nixos/tests/miracle-wm.nix
@@ -0,0 +1,131 @@
+{ pkgs, lib, ... }:
+{
+  name = "miracle-wm";
+
+  meta = {
+    maintainers = with lib.maintainers; [ OPNA2608 ];
+  };
+
+  nodes.machine =
+    { config, ... }:
+    {
+      imports = [
+        ./common/auto.nix
+        ./common/user-account.nix
+      ];
+
+      # Seems to very rarely get interrupted by oom-killer
+      virtualisation.memorySize = 2047;
+
+      test-support.displayManager.auto = {
+        enable = true;
+        user = "alice";
+      };
+
+      services.xserver.enable = true;
+      services.displayManager.defaultSession = lib.mkForce "miracle-wm";
+
+      programs.wayland.miracle-wm.enable = true;
+
+      # To ensure a specific config for the tests
+      systemd.tmpfiles.rules =
+        let
+          testConfig = (pkgs.formats.yaml { }).generate "miracle-wm.yaml" {
+            terminal = "env WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY= alacritty";
+            startup_apps = [
+              {
+                command = "foot";
+                restart_on_death = false;
+              }
+            ];
+          };
+        in
+        [
+          "d ${config.users.users.alice.home}/.config 0700 alice users - -"
+          "L ${config.users.users.alice.home}/.config/miracle-wm.yaml - - - - ${testConfig}"
+        ];
+
+      environment = {
+        shellAliases = {
+          test-wayland = "wayland-info | tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok";
+          test-x11 = "glinfo | tee /tmp/test-x11.out && touch /tmp/test-x11-exit-ok";
+        };
+
+        systemPackages = with pkgs; [
+          mesa-demos
+          wayland-utils
+          foot
+          alacritty
+        ];
+
+        # To help with OCR
+        etc."xdg/foot/foot.ini".text = lib.generators.toINI { } {
+          main = {
+            font = "inconsolata:size=16";
+          };
+          colors = rec {
+            foreground = "000000";
+            background = "ffffff";
+            regular2 = foreground;
+          };
+        };
+        etc."xdg/alacritty/alacritty.yml".text = lib.generators.toYAML { } {
+          font = rec {
+            normal.family = "Inconsolata";
+            bold.family = normal.family;
+            italic.family = normal.family;
+            bold_italic.family = normal.family;
+            size = 16;
+          };
+          colors = rec {
+            primary = {
+              foreground = "0x000000";
+              background = "0xffffff";
+            };
+            normal = {
+              green = primary.foreground;
+            };
+          };
+        };
+      };
+
+      fonts.packages = [ pkgs.inconsolata ];
+    };
+
+  enableOCR = true;
+
+  testScript =
+    { ... }:
+    ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+
+      # Wait for Miriway to complete startup
+      machine.wait_for_file("/run/user/1000/wayland-0")
+      machine.succeed("pgrep miracle-wm")
+      machine.screenshot("miracle-wm_launched")
+
+      # Test Wayland
+      with subtest("wayland client works"):
+          # We let miracle-wm start the first terminal, as we might get stuck if it's not ready to process the first keybind
+          # machine.send_key("ctrl-alt-t")
+          machine.wait_for_text("alice@machine")
+          machine.send_chars("test-wayland\n")
+          machine.wait_for_file("/tmp/test-wayland-exit-ok")
+          machine.copy_from_vm("/tmp/test-wayland.out")
+          machine.screenshot("foot_wayland_info")
+          machine.send_chars("exit\n")
+          machine.wait_until_fails("pgrep foot")
+
+      # Test XWayland
+      with subtest("x11 client works"):
+          machine.send_key("meta_l-ret")
+          machine.wait_for_text("alice@machine")
+          machine.send_chars("test-x11\n")
+          machine.wait_for_file("/tmp/test-x11-exit-ok")
+          machine.copy_from_vm("/tmp/test-x11.out")
+          machine.screenshot("alacritty_glinfo")
+          machine.send_chars("exit\n")
+          machine.wait_until_fails("pgrep alacritty")
+    '';
+}
diff --git a/nixos/tests/misc.nix b/nixos/tests/misc.nix
index e7842debba7a2..1d296accf121f 100644
--- a/nixos/tests/misc.nix
+++ b/nixos/tests/misc.nix
@@ -44,28 +44,6 @@ in {
 
   testScript =
     ''
-      import json
-
-
-      def get_path_info(path):
-          result = machine.succeed(f"nix --option experimental-features nix-command path-info --json {path}")
-          parsed = json.loads(result)
-          return parsed
-
-
-      with subtest("nix-db"):
-          info = get_path_info("${foo}")
-          print(info)
-
-          if (
-              info[0]["narHash"]
-              != "sha256-BdMdnb/0eWy3EddjE83rdgzWWpQjfWPAj3zDIFMD3Ck="
-          ):
-              raise Exception("narHash not set")
-
-          if info[0]["narSize"] != 128:
-              raise Exception("narSize not set")
-
       with subtest("nixos-version"):
           machine.succeed("[ `nixos-version | wc -w` = 2 ]")
 
@@ -121,6 +99,9 @@ in {
       with subtest("whether systemd-tmpfiles settings works"):
           machine.succeed("[ -e /tmp/somefile ]")
 
+      with subtest("/etc/mtab"):
+          assert "/proc/mounts" == machine.succeed("readlink --no-newline /etc/mtab")
+
       with subtest("whether automounting works"):
           machine.fail("grep '/tmp2 tmpfs' /proc/mounts")
           machine.succeed("touch /tmp2/x")
@@ -149,9 +130,6 @@ in {
       with subtest("shell-vars"):
           machine.succeed('[ -n "$NIX_PATH" ]')
 
-      with subtest("nix-db"):
-          machine.succeed("nix-store -qR /run/current-system | grep nixos-")
-
       with subtest("Test sysctl"):
           machine.wait_for_unit("systemd-sysctl.service")
           assert "1" == machine.succeed("sysctl -ne vm.swappiness").strip()
diff --git a/nixos/tests/misskey.nix b/nixos/tests/misskey.nix
new file mode 100644
index 0000000000000..1a450c518aaeb
--- /dev/null
+++ b/nixos/tests/misskey.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+  let
+    port = 61812;
+  in
+  {
+    name = "misskey";
+
+    meta.maintainers = [ lib.maintainers.feathecutie ];
+
+    nodes.machine = {
+      services.misskey = {
+        enable = true;
+        settings = {
+          url = "http://misskey.local";
+          inherit port;
+        };
+        database.createLocally = true;
+        redis.createLocally = true;
+      };
+    };
+
+    testScript = ''
+      machine.wait_for_unit("misskey.service")
+      machine.wait_for_open_port(${toString port})
+      machine.succeed("curl --fail http://localhost:${toString port}/")
+    '';
+  }
+)
diff --git a/nixos/tests/monado.nix b/nixos/tests/monado.nix
index 8368950951e73..6f0d27ee42454 100644
--- a/nixos/tests/monado.nix
+++ b/nixos/tests/monado.nix
@@ -5,7 +5,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     { pkgs, ... }:
 
     {
-      hardware.opengl.enable = true;
+      hardware.graphics.enable = true;
       users.users.alice = {
         isNormalUser = true;
         uid = 1000;
diff --git a/nixos/tests/mongodb.nix b/nixos/tests/mongodb.nix
index 97729e38864c4..1a260814f8b87 100644
--- a/nixos/tests/mongodb.nix
+++ b/nixos/tests/mongodb.nix
@@ -33,6 +33,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
     nodes = {
       node = {...}: {
         environment.systemPackages = with pkgs; [
+          # remember to update mongodb.passthru.tests if you change this
           mongodb-5_0
         ];
       };
diff --git a/nixos/tests/morph-browser.nix b/nixos/tests/morph-browser.nix
index 859e6bb47646a..65ad4d85cc12e 100644
--- a/nixos/tests/morph-browser.nix
+++ b/nixos/tests/morph-browser.nix
@@ -23,7 +23,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
 
     fonts.packages = with pkgs; [
       # Intended font & helps with OCR
-      ubuntu_font_family
+      ubuntu-classic
     ];
   };
 
diff --git a/nixos/tests/mosquitto.nix b/nixos/tests/mosquitto.nix
index c0980b23e78fd..eca29292721fd 100644
--- a/nixos/tests/mosquitto.nix
+++ b/nixos/tests/mosquitto.nix
@@ -55,7 +55,7 @@ let
 in {
   name = "mosquitto";
   meta = with pkgs.lib; {
-    maintainers = with maintainers; [ pennae peterhoeg ];
+    maintainers = with maintainers; [ peterhoeg ];
   };
 
   nodes = let
diff --git a/nixos/tests/mpd.nix b/nixos/tests/mpd.nix
index 52d9c7fd33a1b..0772c05d12ac4 100644
--- a/nixos/tests/mpd.nix
+++ b/nixos/tests/mpd.nix
@@ -37,7 +37,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
 
     mkServer = { mpd, musicService, }:
       { boot.kernelModules = [ "snd-dummy" ];
-        sound.enable = true;
         services.mpd = mpd;
         systemd.services.musicService = musicService;
       };
diff --git a/nixos/tests/mpv.nix b/nixos/tests/mpv.nix
index 32a81cbe2495e..c2e151c224760 100644
--- a/nixos/tests/mpv.nix
+++ b/nixos/tests/mpv.nix
@@ -12,7 +12,7 @@ in
     {
       environment.systemPackages = [
         pkgs.curl
-        (pkgs.wrapMpv pkgs.mpv-unwrapped {
+        (pkgs.mpv.override {
           scripts = [ pkgs.mpvScripts.simple-mpv-webui ];
         })
       ];
diff --git a/nixos/tests/mumble.nix b/nixos/tests/mumble.nix
index 8eee454721a13..12fa00b79bbf8 100644
--- a/nixos/tests/mumble.nix
+++ b/nixos/tests/mumble.nix
@@ -15,7 +15,7 @@ in
 {
   name = "mumble";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ thoughtpolice eelco ];
+    maintainers = [ thoughtpolice ];
   };
 
   nodes = {
diff --git a/nixos/tests/munin.nix b/nixos/tests/munin.nix
index e371b2dffa6b8..7b7bf6f41c046 100644
--- a/nixos/tests/munin.nix
+++ b/nixos/tests/munin.nix
@@ -4,7 +4,7 @@
 import ./make-test-python.nix ({ pkgs, ...} : {
   name = "munin";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ domenkozar eelco ];
+    maintainers = [ domenkozar ];
   };
 
   nodes = {
diff --git a/nixos/tests/musescore.nix b/nixos/tests/musescore.nix
index 0720631ed284b..559c91ed8e550 100644
--- a/nixos/tests/musescore.nix
+++ b/nixos/tests/musescore.nix
@@ -43,13 +43,10 @@ in
     )
 
     # Start MuseScore window
-    machine.execute("DISPLAY=:0.0 mscore >&2 &")
+    machine.execute("env XDG_RUNTIME_DIR=$PWD DISPLAY=:0.0 mscore >&2 &")
 
     # Wait until MuseScore has launched
-    machine.wait_for_window("MuseScore 4")
-
-    # Wait until the window has completely initialised
-    machine.wait_for_text("MuseScore 4")
+    machine.wait_for_window("MuseScore Studio")
 
     machine.screenshot("MuseScore0")
 
@@ -75,29 +72,22 @@ in
     machine.screenshot("MuseScore2")
 
     # Go to the export dialogue and create a PDF
-    machine.send_key("alt-f")
-    machine.sleep(1)
-    machine.send_key("e")
-
-    # Wait until the export dialogue appears.
-    machine.wait_for_text("Export")
+    machine.send_key("ctrl-p")
 
-    machine.screenshot("MuseScore3")
+    # Wait until the Print dialogue appears.
+    machine.wait_for_window("Print")
 
-    machine.send_key("shift-tab")
-    machine.sleep(1)
-    machine.send_key("ret")
+    machine.screenshot("MuseScore4")
+    machine.send_key("alt-p")
     machine.sleep(1)
-    machine.send_key("ret")
 
-    machine.screenshot("MuseScore4")
+    machine.screenshot("MuseScore5")
 
     # Wait until PDF is exported
-    machine.wait_for_file('"/root/Documents/MuseScore4/Scores/Untitled score.pdf"')
+    machine.wait_for_file('"/root/Untitled score.pdf"')
 
-    # Check that it contains the title of the score
-    machine.succeed('pdfgrep "Untitled score" "/root/Documents/MuseScore4/Scores/Untitled score.pdf"')
-
-    machine.screenshot("MuseScore5")
+    ## Check that it contains the title of the score
+    machine.succeed('pdfgrep "Untitled score" "/root/Untitled score.pdf"')
+    machine.copy_from_vm("/root/Untitled score.pdf")
   '';
 })
diff --git a/nixos/tests/music-assistant.nix b/nixos/tests/music-assistant.nix
new file mode 100644
index 0000000000000..ac667ee953035
--- /dev/null
+++ b/nixos/tests/music-assistant.nix
@@ -0,0 +1,21 @@
+{
+  lib,
+  ...
+}:
+
+{
+  name = "music-assistant";
+  meta.maintainers = with lib.maintainers; [ hexa ];
+
+  nodes.machine = {
+    services.music-assistant = {
+      enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("music-assistant.service")
+    machine.wait_until_succeeds("curl --fail http://localhost:8095")
+    machine.log(machine.succeed("systemd-analyze security music-assistant.service | grep -v ✓"))
+  '';
+}
diff --git a/nixos/tests/mutable-users.nix b/nixos/tests/mutable-users.nix
index ebe32e6487ef4..ef83923f3e232 100644
--- a/nixos/tests/mutable-users.nix
+++ b/nixos/tests/mutable-users.nix
@@ -7,19 +7,19 @@ import ./make-test-python.nix ({ pkgs, ...} : {
   };
 
   nodes = {
-    machine = { ... }: {
-      users.mutableUsers = false;
-    };
-    mutable = { ... }: {
-      users.mutableUsers = true;
-      users.users.dry-test.isNormalUser = true;
+    machine = {
+      specialisation.immutable.configuration = {
+        users.mutableUsers = false;
+      };
+
+      specialisation.mutable.configuration = {
+        users.mutableUsers = true;
+        users.users.dry-test.isNormalUser = true;
+      };
     };
   };
 
-  testScript = {nodes, ...}: let
-    immutableSystem = nodes.machine.config.system.build.toplevel;
-    mutableSystem = nodes.mutable.config.system.build.toplevel;
-  in ''
+  testScript = ''
     machine.start()
     machine.wait_for_unit("default.target")
 
@@ -30,7 +30,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
         machine.succeed("sudo useradd foobar")
         assert "foobar" in machine.succeed("cat /etc/passwd")
         machine.succeed(
-            "${immutableSystem}/bin/switch-to-configuration test"
+            "/run/booted-system/specialisation/immutable/bin/switch-to-configuration test"
         )
         assert "foobar" not in machine.succeed("cat /etc/passwd")
 
@@ -39,7 +39,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     with subtest("Password is wrapped in mutable mode"):
         assert "/run/current-system/" in machine.succeed("which passwd")
         machine.succeed(
-            "${mutableSystem}/bin/switch-to-configuration test"
+            "/run/booted-system/specialisation/mutable/bin/switch-to-configuration test"
         )
         assert "/run/wrappers/" in machine.succeed("which passwd")
 
@@ -63,7 +63,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
             expected_hashes[file] = machine.succeed(f"sha256sum {file}")
             expected_stats[file] = machine.succeed(f"stat {file}")
 
-        machine.succeed("/run/current-system/bin/switch-to-configuration dry-activate")
+        machine.succeed("/run/booted-system/specialisation/mutable/bin/switch-to-configuration dry-activate")
 
         machine.fail('test -e /home/dry-test')  # home was not recreated
         for file in files_to_check:
diff --git a/nixos/tests/mxisd.nix b/nixos/tests/mxisd.nix
deleted file mode 100644
index 354612a8a53d0..0000000000000
--- a/nixos/tests/mxisd.nix
+++ /dev/null
@@ -1,21 +0,0 @@
-import ./make-test-python.nix ({ pkgs, ... } : {
-
-  name = "mxisd";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ mguentner ];
-  };
-
-  nodes = {
-    server = args : {
-      services.mxisd.enable = true;
-      services.mxisd.matrix.domain = "example.org";
-    };
-  };
-
-  testScript = ''
-    start_all()
-    server.wait_for_unit("mxisd.service")
-    server.wait_for_open_port(8090)
-    server.succeed("curl -Ssf 'http://127.0.0.1:8090/_matrix/identity/api/v1'")
-  '';
-})
diff --git a/nixos/tests/mycelium/default.nix b/nixos/tests/mycelium/default.nix
index 9174c49d70869..956a822a21860 100644
--- a/nixos/tests/mycelium/default.nix
+++ b/nixos/tests/mycelium/default.nix
@@ -51,6 +51,9 @@ in
       peer1.wait_for_unit("mycelium.service")
       peer2.wait_for_unit("mycelium.service")
 
+      peer1.succeed("mycelium peers list | grep 192.168.1.12")
+      peer2.succeed("mycelium peers list | grep 192.168.1.11")
+
       peer1.succeed("ping -c5 ${peer2-ip}")
       peer2.succeed("ping -c5 ${peer1-ip}")
     '';
diff --git a/nixos/tests/mysql/common.nix b/nixos/tests/mysql/common.nix
index 1cf52347f4c74..ad54b0e00c1b3 100644
--- a/nixos/tests/mysql/common.nix
+++ b/nixos/tests/mysql/common.nix
@@ -4,7 +4,7 @@
     inherit (pkgs) mysql80;
   };
   perconaPackages = {
-    inherit (pkgs) percona-server_8_0;
+    inherit (pkgs) percona-server_lts percona-server_innovation;
   };
   mkTestName = pkg: "mariadb_${builtins.replaceStrings ["."] [""] (lib.versions.majorMinor pkg.version)}";
 }
diff --git a/nixos/tests/mysql/mariadb-galera.nix b/nixos/tests/mysql/mariadb-galera.nix
index 7455abbce5fb0..65705afbf82cd 100644
--- a/nixos/tests/mysql/mariadb-galera.nix
+++ b/nixos/tests/mysql/mariadb-galera.nix
@@ -1,8 +1,8 @@
 {
   system ? builtins.currentSystem,
-  config ? {},
+  config ? { },
   pkgs ? import ../../.. { inherit system config; },
-  lib ? pkgs.lib
+  lib ? pkgs.lib,
 }:
 
 let
@@ -11,240 +11,263 @@ let
   makeTest = import ./../make-test-python.nix;
 
   # Common user configuration
-  makeGaleraTest = {
-    mariadbPackage,
-    name ? mkTestName mariadbPackage,
-    galeraPackage ? pkgs.mariadb-galera
-  }: makeTest {
-    name = "${name}-galera-mariabackup";
-    meta = {
-      maintainers = with lib.maintainers; [ izorkin ] ++ lib.teams.helsinki-systems.members;
-    };
+  makeGaleraTest =
+    {
+      mariadbPackage,
+      name ? mkTestName mariadbPackage,
+      galeraPackage ? pkgs.mariadb-galera,
+    }:
+    makeTest {
+      name = "${name}-galera-mariabackup";
+      meta = {
+        maintainers = with lib.maintainers; [ izorkin ] ++ lib.teams.helsinki-systems.members;
+      };
 
-    # The test creates a Galera cluster with 3 nodes and is checking if mariabackup-based SST works. The cluster is tested by creating a DB and an empty table on one node,
-    # and checking the table's presence on the other node.
-    nodes = let
-      mkGaleraNode = {
-        id,
-        method
-      }: let
-        address = "192.168.1.${toString id}";
-        isFirstClusterNode = id == 1 || id == 4;
-      in {
-        users = {
-          users.testuser = {
-            isSystemUser = true;
-            group = "testusers";
-          };
-          groups.testusers = { };
-        };
+      # The test creates a Galera cluster with 3 nodes and is checking if mariabackup-based SST works. The cluster is tested by creating a DB and an empty table on one node,
+      # and checking the table's presence on the other node.
+      nodes =
+        let
+          mkGaleraNode =
+            {
+              id,
+              method,
+            }:
+            let
+              address = "192.168.1.${toString id}";
+              isFirstClusterNode = id == 1 || id == 4;
+            in
+            {
+              users = {
+                users.testuser = {
+                  isSystemUser = true;
+                  group = "testusers";
+                };
+                groups.testusers = { };
+              };
 
-        networking = {
-          interfaces.eth1 = {
-            ipv4.addresses = [
-              { inherit address; prefixLength = 24; }
-            ];
-          };
-          extraHosts = lib.concatMapStringsSep "\n" (i: "192.168.1.${toString i} galera_0${toString i}") (lib.range 1 6);
-          firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
-          firewall.allowedUDPPorts = [ 4567 ];
-        };
-        systemd.services.mysql = with pkgs; {
-          path = with pkgs; [
-            bash
-            gawk
-            gnutar
-            gzip
-            inetutils
-            iproute2
-            netcat
-            procps
-            pv
-            rsync
-            socat
-            stunnel
-            which
-          ];
-        };
-        services.mysql = {
-          enable = true;
-          package = mariadbPackage;
-          ensureDatabases = lib.mkIf isFirstClusterNode [ "testdb" ];
-          ensureUsers = lib.mkIf isFirstClusterNode [{
-            name = "testuser";
-            ensurePermissions = {
-              "testdb.*" = "ALL PRIVILEGES";
-            };
-          }];
-          initialScript = lib.mkIf isFirstClusterNode (pkgs.writeText "mariadb-init.sql" ''
-            GRANT ALL PRIVILEGES ON *.* TO 'check_repl'@'localhost' IDENTIFIED BY 'check_pass' WITH GRANT OPTION;
-            FLUSH PRIVILEGES;
-          '');
-          settings = {
-            mysqld = {
-              bind_address = "0.0.0.0";
-            };
-            galera = {
-              wsrep_on = "ON";
-              wsrep_debug = "NONE";
-              wsrep_retry_autocommit = "3";
-              wsrep_provider = "${galeraPackage}/lib/galera/libgalera_smm.so";
-              wsrep_cluster_address = "gcomm://"
-                + lib.optionalString (id == 2 || id == 3) "galera_01,galera_02,galera_03"
-                + lib.optionalString (id == 5 || id == 6) "galera_04,galera_05,galera_06";
-              wsrep_cluster_name = "galera";
-              wsrep_node_address = address;
-              wsrep_node_name = "galera_0${toString id}";
-              wsrep_sst_method = method;
-              wsrep_sst_auth = "check_repl:check_pass";
-              binlog_format = "ROW";
-              enforce_storage_engine = "InnoDB";
-              innodb_autoinc_lock_mode = "2";
+              networking = {
+                interfaces.eth1 = {
+                  ipv4.addresses = [
+                    {
+                      inherit address;
+                      prefixLength = 24;
+                    }
+                  ];
+                  ipv6.addresses = lib.mkForce [ ];
+                };
+                extraHosts = lib.concatMapStringsSep "\n" (i: "192.168.1.${toString i} galera_0${toString i}") (
+                  lib.range 1 6
+                );
+                firewall.allowedTCPPorts = [
+                  3306
+                  4444
+                  4567
+                  4568
+                ];
+                firewall.allowedUDPPorts = [ 4567 ];
+              };
+              systemd.services.mysql = with pkgs; {
+                path = with pkgs; [
+                  bash
+                  gawk
+                  gnutar
+                  gzip
+                  inetutils
+                  iproute2
+                  netcat
+                  procps
+                  pv
+                  rsync
+                  socat
+                  stunnel
+                  which
+                ];
+              };
+              services.mysql = {
+                enable = true;
+                package = mariadbPackage;
+                ensureDatabases = lib.mkIf isFirstClusterNode [ "testdb" ];
+                ensureUsers = lib.mkIf isFirstClusterNode [
+                  {
+                    name = "testuser";
+                    ensurePermissions = {
+                      "testdb.*" = "ALL PRIVILEGES";
+                    };
+                  }
+                ];
+                initialScript = lib.mkIf isFirstClusterNode (
+                  pkgs.writeText "mariadb-init.sql" ''
+                    GRANT ALL PRIVILEGES ON *.* TO 'check_repl'@'localhost' IDENTIFIED BY 'check_pass' WITH GRANT OPTION;
+                    FLUSH PRIVILEGES;
+                  ''
+                );
+                settings = {
+                  mysqld = {
+                    bind_address = "0.0.0.0";
+                  };
+                  galera = {
+                    wsrep_on = "ON";
+                    wsrep_debug = "NONE";
+                    wsrep_retry_autocommit = "3";
+                    wsrep_provider = "${galeraPackage}/lib/galera/libgalera_smm.so";
+                    wsrep_cluster_address =
+                      "gcomm://"
+                      + lib.optionalString (id == 2 || id == 3) "galera_01,galera_02,galera_03"
+                      + lib.optionalString (id == 5 || id == 6) "galera_04,galera_05,galera_06";
+                    wsrep_cluster_name = "galera";
+                    wsrep_node_address = address;
+                    wsrep_node_name = "galera_0${toString id}";
+                    wsrep_sst_method = method;
+                    wsrep_sst_auth = "check_repl:check_pass";
+                    binlog_format = "ROW";
+                    enforce_storage_engine = "InnoDB";
+                    innodb_autoinc_lock_mode = "2";
+                  };
+                };
+              };
             };
+        in
+        {
+          galera_01 = mkGaleraNode {
+            id = 1;
+            method = "mariabackup";
           };
-        };
-      };
-    in {
-      galera_01 = mkGaleraNode {
-        id = 1;
-        method = "mariabackup";
-      };
 
-      galera_02 = mkGaleraNode {
-        id = 2;
-        method = "mariabackup";
-      };
+          galera_02 = mkGaleraNode {
+            id = 2;
+            method = "mariabackup";
+          };
 
-      galera_03 = mkGaleraNode {
-        id = 3;
-        method = "mariabackup";
-      };
+          galera_03 = mkGaleraNode {
+            id = 3;
+            method = "mariabackup";
+          };
 
-      galera_04 = mkGaleraNode {
-        id = 4;
-        method = "rsync";
-      };
+          galera_04 = mkGaleraNode {
+            id = 4;
+            method = "rsync";
+          };
 
-      galera_05 = mkGaleraNode {
-        id = 5;
-        method = "rsync";
-      };
+          galera_05 = mkGaleraNode {
+            id = 5;
+            method = "rsync";
+          };
 
-      galera_06 = mkGaleraNode {
-        id = 6;
-        method = "rsync";
-      };
+          galera_06 = mkGaleraNode {
+            id = 6;
+            method = "rsync";
+          };
 
-    };
+        };
 
-    testScript = ''
-      galera_01.start()
-      galera_01.wait_for_unit("mysql")
-      galera_01.wait_for_open_port(3306)
-      galera_01.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
-      )
-      galera_01.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (37);'"
-      )
-      galera_02.start()
-      galera_02.wait_for_unit("mysql")
-      galera_02.wait_for_open_port(3306)
-      galera_03.start()
-      galera_03.wait_for_unit("mysql")
-      galera_03.wait_for_open_port(3306)
-      galera_02.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 37"
-      )
-      galera_02.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
-      )
-      galera_02.succeed("systemctl stop mysql")
-      galera_01.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (38);'"
-      )
-      galera_03.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
-      )
-      galera_01.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (39);'"
-      )
-      galera_02.succeed("systemctl start mysql")
-      galera_02.wait_for_open_port(3306)
-      galera_02.succeed(
-          "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
-      )
-      galera_03.succeed(
-          "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
-      )
-      galera_01.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db3;' -N | grep 39"
-      )
-      galera_02.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db2;' -N | grep 38"
-      )
-      galera_03.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 37"
-      )
-      galera_01.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
-      galera_02.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db2;'")
-      galera_03.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db1;'")
-      galera_01.crash()
-      galera_02.crash()
-      galera_03.crash()
+      testScript = ''
+        galera_01.start()
+        galera_01.wait_for_unit("mysql")
+        galera_01.wait_for_open_port(3306)
+        galera_01.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        )
+        galera_01.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (37);'"
+        )
+        galera_02.start()
+        galera_02.wait_for_unit("mysql")
+        galera_02.wait_for_open_port(3306)
+        galera_03.start()
+        galera_03.wait_for_unit("mysql")
+        galera_03.wait_for_open_port(3306)
+        galera_02.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 37"
+        )
+        galera_02.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        )
+        galera_02.succeed("systemctl stop mysql")
+        galera_01.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (38);'"
+        )
+        galera_03.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        )
+        galera_01.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (39);'"
+        )
+        galera_02.succeed("systemctl start mysql")
+        galera_02.wait_for_open_port(3306)
+        galera_02.succeed(
+            "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+        )
+        galera_03.succeed(
+            "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+        )
+        galera_01.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db3;' -N | grep 39"
+        )
+        galera_02.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db2;' -N | grep 38"
+        )
+        galera_03.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 37"
+        )
+        galera_01.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
+        galera_02.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db2;'")
+        galera_03.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db1;'")
+        galera_01.crash()
+        galera_02.crash()
+        galera_03.crash()
 
-      galera_04.start()
-      galera_04.wait_for_unit("mysql")
-      galera_04.wait_for_open_port(3306)
-      galera_04.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
-      )
-      galera_04.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (41);'"
-      )
-      galera_05.start()
-      galera_05.wait_for_unit("mysql")
-      galera_05.wait_for_open_port(3306)
-      galera_06.start()
-      galera_06.wait_for_unit("mysql")
-      galera_06.wait_for_open_port(3306)
-      galera_05.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 41"
-      )
-      galera_05.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
-      )
-      galera_05.succeed("systemctl stop mysql")
-      galera_04.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (42);'"
-      )
-      galera_06.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
-      )
-      galera_04.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (43);'"
-      )
-      galera_05.succeed("systemctl start mysql")
-      galera_05.wait_for_open_port(3306)
-      galera_05.succeed(
-          "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
-      )
-      galera_06.succeed(
-          "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
-      )
-      galera_04.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db3;' -N | grep 43"
-      )
-      galera_05.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db2;' -N | grep 42"
-      )
-      galera_06.succeed(
-          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 41"
-      )
-      galera_04.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
-      galera_05.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db2;'")
-      galera_06.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db1;'")
-    '';
-  };
+        galera_04.start()
+        galera_04.wait_for_unit("mysql")
+        galera_04.wait_for_open_port(3306)
+        galera_04.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        )
+        galera_04.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (41);'"
+        )
+        galera_05.start()
+        galera_05.wait_for_unit("mysql")
+        galera_05.wait_for_open_port(3306)
+        galera_06.start()
+        galera_06.wait_for_unit("mysql")
+        galera_06.wait_for_open_port(3306)
+        galera_05.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 41"
+        )
+        galera_05.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        )
+        galera_05.succeed("systemctl stop mysql")
+        galera_04.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (42);'"
+        )
+        galera_06.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+        )
+        galera_04.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (43);'"
+        )
+        galera_05.succeed("systemctl start mysql")
+        galera_05.wait_for_open_port(3306)
+        galera_05.succeed(
+            "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+        )
+        galera_06.succeed(
+            "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+        )
+        galera_04.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db3;' -N | grep 43"
+        )
+        galera_05.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db2;' -N | grep 42"
+        )
+        galera_06.succeed(
+            "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 41"
+        )
+        galera_04.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
+        galera_05.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db2;'")
+        galera_06.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db1;'")
+      '';
+    };
 in
-  lib.mapAttrs (_: mariadbPackage: makeGaleraTest { inherit mariadbPackage; }) mariadbPackages
+lib.mapAttrs (_: mariadbPackage: makeGaleraTest { inherit mariadbPackage; }) mariadbPackages
diff --git a/nixos/tests/mysql/mysql-autobackup.nix b/nixos/tests/mysql/mysql-autobackup.nix
index b49466db0a9ce..80dbb47adcd29 100644
--- a/nixos/tests/mysql/mysql-autobackup.nix
+++ b/nixos/tests/mysql/mysql-autobackup.nix
@@ -1,8 +1,8 @@
 {
   system ? builtins.currentSystem,
-  config ? {},
+  config ? { },
   pkgs ? import ../../.. { inherit system config; },
-  lib ? pkgs.lib
+  lib ? pkgs.lib,
 }:
 
 let
@@ -10,44 +10,52 @@ let
 
   makeTest = import ./../make-test-python.nix;
 
-  makeAutobackupTest = {
-    package,
-    name ? mkTestName package,
-  }: makeTest {
-    name = "${name}-automysqlbackup";
-    meta.maintainers = [ lib.maintainers.aanderse ];
-
-    nodes.machine = {
-      services.mysql = {
-        inherit package;
-        enable = true;
-        initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
+  makeAutobackupTest =
+    {
+      package,
+      name ? mkTestName package,
+    }:
+    makeTest {
+      name = "${name}-automysqlbackup";
+      meta.maintainers = [ lib.maintainers.aanderse ];
+
+      nodes.machine = {
+        services.mysql = {
+          inherit package;
+          enable = true;
+          initialDatabases = [
+            {
+              name = "testdb";
+              schema = ./testdb.sql;
+            }
+          ];
+        };
+
+        services.automysqlbackup.enable = true;
+        automysqlbackup.settings.mysql_dump_port = "";
       };
 
-      services.automysqlbackup.enable = true;
-    };
-
-    testScript = ''
-      start_all()
+      testScript = ''
+        start_all()
 
-      # Need to have mysql started so that it can be populated with data.
-      machine.wait_for_unit("mysql.service")
+        # Need to have mysql started so that it can be populated with data.
+        machine.wait_for_unit("mysql.service")
 
-      with subtest("Wait for testdb to be fully populated (5 rows)."):
-          machine.wait_until_succeeds(
-              "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
-          )
+        with subtest("Wait for testdb to be fully populated (5 rows)."):
+            machine.wait_until_succeeds(
+                "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+            )
 
-      with subtest("Do a backup and wait for it to start"):
-          machine.start_job("automysqlbackup.service")
-          machine.wait_for_job("automysqlbackup.service")
+        with subtest("Do a backup and wait for it to start"):
+            machine.start_job("automysqlbackup.service")
+            machine.wait_for_job("automysqlbackup.service")
 
-      with subtest("wait for backup file and check that data appears in backup"):
-          machine.wait_for_file("/var/backup/mysql/daily/testdb")
-          machine.succeed(
-              "${pkgs.gzip}/bin/zcat /var/backup/mysql/daily/testdb/daily_testdb_*.sql.gz | grep hello"
-          )
+        with subtest("wait for backup file and check that data appears in backup"):
+            machine.wait_for_file("/var/backup/mysql/daily/testdb")
+            machine.succeed(
+                "${pkgs.gzip}/bin/zcat /var/backup/mysql/daily/testdb/daily_testdb_*.sql.gz | grep hello"
+            )
       '';
-  };
+    };
 in
-  lib.mapAttrs (_: package: makeAutobackupTest { inherit package; }) mariadbPackages
+lib.mapAttrs (_: package: makeAutobackupTest { inherit package; }) mariadbPackages
diff --git a/nixos/tests/mysql/mysql.nix b/nixos/tests/mysql/mysql.nix
index 0a61f9d38fe2e..093da4f46aa10 100644
--- a/nixos/tests/mysql/mysql.nix
+++ b/nixos/tests/mysql/mysql.nix
@@ -146,6 +146,6 @@ in
   }) mariadbPackages)
   // (lib.mapAttrs (_: package: makeMySQLTest {
     inherit package;
-    name = "percona_8_0";
+    name = builtins.replaceStrings ["-"] ["_"] package.pname;
     hasMroonga = false; useSocketAuth = false;
   }) perconaPackages)
diff --git a/nixos/tests/nat.nix b/nixos/tests/nat.nix
index 0b617cea7774c..507e0fd72e2ac 100644
--- a/nixos/tests/nat.nix
+++ b/nixos/tests/nat.nix
@@ -6,47 +6,36 @@
 import ./make-test-python.nix ({ pkgs, lib, withFirewall, nftables ? false, ... }:
   let
     unit = if nftables then "nftables" else (if withFirewall then "firewall" else "nat");
-
-    routerBase =
-      lib.mkMerge [
-        { virtualisation.vlans = [ 2 1 ];
-          networking.firewall.enable = withFirewall;
-          networking.firewall.filterForward = nftables;
-          networking.nftables.enable = nftables;
-          networking.nat.internalIPs = [ "192.168.1.0/24" ];
-          networking.nat.externalInterface = "eth1";
-        }
-      ];
   in
   {
     name = "nat" + (lib.optionalString nftables "Nftables")
                  + (if withFirewall then "WithFirewall" else "Standalone");
     meta = with pkgs.lib.maintainers; {
-      maintainers = [ eelco rob ];
+      maintainers = [ rob ];
     };
 
     nodes =
-      { client =
-          { pkgs, nodes, ... }:
-          lib.mkMerge [
-            { virtualisation.vlans = [ 1 ];
-              networking.defaultGateway =
-                (pkgs.lib.head nodes.router.config.networking.interfaces.eth2.ipv4.addresses).address;
-              networking.nftables.enable = nftables;
-            }
-          ];
+      {
+        client = { lib, nodes, ... }: {
+          virtualisation.vlans = [ 1 ];
+          networking.defaultGateway =
+            (lib.head nodes.router.networking.interfaces.eth2.ipv4.addresses).address;
+          networking.nftables.enable = nftables;
+        };
 
-        router =
-        { ... }: lib.mkMerge [
-          routerBase
-          { networking.nat.enable = true; }
-        ];
+        router = { lib, ... }: {
+          virtualisation.vlans = [ 2 1 ];
+          networking.firewall.enable = withFirewall;
+          networking.firewall.filterForward = nftables;
+          networking.nftables.enable = nftables;
+          networking.nat.enable = true;
+          networking.nat.internalIPs = [ "192.168.1.0/24" ];
+          networking.nat.externalInterface = "eth1";
 
-        routerDummyNoNat =
-        { ... }: lib.mkMerge [
-          routerBase
-          { networking.nat.enable = false; }
-        ];
+          specialisation.no-nat.configuration = {
+            networking.nat.enable = lib.mkForce false;
+          };
+        };
 
         server =
           { ... }:
@@ -59,11 +48,7 @@ import ./make-test-python.nix ({ pkgs, lib, withFirewall, nftables ? false, ...
           };
       };
 
-    testScript =
-      { nodes, ... }: let
-        routerDummyNoNatClosure = nodes.routerDummyNoNat.config.system.build.toplevel;
-        routerClosure = nodes.router.config.system.build.toplevel;
-      in ''
+    testScript = ''
         client.start()
         router.start()
         server.start()
@@ -72,13 +57,13 @@ import ./make-test-python.nix ({ pkgs, lib, withFirewall, nftables ? false, ...
         server.wait_for_unit("network.target")
         server.wait_for_unit("httpd")
         router.wait_for_unit("network.target")
-        router.succeed("curl --fail http://server/ >&2")
+        router.succeed("curl -4 --fail http://server/ >&2")
 
         # The client should be also able to connect via the NAT router.
         router.wait_for_unit("${unit}")
         client.wait_for_unit("network.target")
         client.succeed("curl --fail http://server/ >&2")
-        client.succeed("ping -c 1 server >&2")
+        client.succeed("ping -4 -c 1 server >&2")
 
         # Test whether passive FTP works.
         server.wait_for_unit("vsftpd")
@@ -89,19 +74,19 @@ import ./make-test-python.nix ({ pkgs, lib, withFirewall, nftables ? false, ...
         client.fail("curl -v -P - ftp://server/foo.txt >&2")
 
         # Test ICMP.
-        client.succeed("ping -c 1 router >&2")
-        router.succeed("ping -c 1 client >&2")
+        client.succeed("ping -4 -c 1 router >&2")
+        router.succeed("ping -4 -c 1 client >&2")
 
         # If we turn off NAT, the client shouldn't be able to reach the server.
         router.succeed(
-            "${routerDummyNoNatClosure}/bin/switch-to-configuration test 2>&1"
+            "/run/booted-system/specialisation/no-nat/bin/switch-to-configuration test 2>&1"
         )
-        client.fail("curl --fail --connect-timeout 5 http://server/ >&2")
-        client.fail("ping -c 1 server >&2")
+        client.fail("curl -4 --fail --connect-timeout 5 http://server/ >&2")
+        client.fail("ping -4 -c 1 server >&2")
 
         # And make sure that reloading the NAT job works.
         router.succeed(
-            "${routerClosure}/bin/switch-to-configuration test 2>&1"
+            "/run/booted-system/bin/switch-to-configuration test 2>&1"
         )
         # FIXME: this should not be necessary, but nat.service is not started because
         #        network.target is not triggered
@@ -109,7 +94,7 @@ import ./make-test-python.nix ({ pkgs, lib, withFirewall, nftables ? false, ...
         ${lib.optionalString (!withFirewall && !nftables) ''
           router.succeed("systemctl start nat.service")
         ''}
-        client.succeed("curl --fail http://server/ >&2")
-        client.succeed("ping -c 1 server >&2")
+        client.succeed("curl -4 --fail http://server/ >&2")
+        client.succeed("ping -4 -c 1 server >&2")
       '';
-  })
+})
diff --git a/nixos/tests/netbird.nix b/nixos/tests/netbird.nix
index 7342e8d04a39c..887747437c22c 100644
--- a/nixos/tests/netbird.nix
+++ b/nixos/tests/netbird.nix
@@ -2,9 +2,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
 {
   name = "netbird";
 
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ misuzu ];
-  };
+  meta.maintainers = with pkgs.lib.maintainers; [ ];
 
   nodes = {
     node = { ... }: {
diff --git a/nixos/tests/netdata.nix b/nixos/tests/netdata.nix
index e3438f63404e7..df4d342905c68 100644
--- a/nixos/tests/netdata.nix
+++ b/nixos/tests/netdata.nix
@@ -11,7 +11,10 @@ import ./make-test-python.nix ({ pkgs, ...} : {
       { pkgs, ... }:
         {
           environment.systemPackages = with pkgs; [ curl jq netdata ];
-          services.netdata.enable = true;
+          services.netdata = {
+            enable = true;
+            python.recommendedPythonPackages = true;
+          };
         };
     };
 
diff --git a/nixos/tests/networking-proxy.nix b/nixos/tests/networking-proxy.nix
index 330bac2588a5a..72f33c78bd0ec 100644
--- a/nixos/tests/networking-proxy.nix
+++ b/nixos/tests/networking-proxy.nix
@@ -12,7 +12,7 @@ let default-config = {
 in import ./make-test-python.nix ({ pkgs, ...} : {
   name = "networking-proxy";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [  ];
+    maintainers = [ ];
   };
 
   nodes = {
diff --git a/nixos/tests/networking/networkmanager.nix b/nixos/tests/networking/networkmanager.nix
index e654e37d7efb7..bd989408df8a1 100644
--- a/nixos/tests/networking/networkmanager.nix
+++ b/nixos/tests/networking/networkmanager.nix
@@ -166,7 +166,7 @@ let
 in lib.mapAttrs (lib.const (attrs: makeTest (attrs // {
   name = "${attrs.name}-Networking-NetworkManager";
   meta = {
-    maintainers = with lib.maintainers; [ janik ];
+    maintainers = [ ];
   };
 
 }))) testCases
diff --git a/nixos/tests/nextcloud/basic.nix b/nixos/tests/nextcloud/basic.nix
index 428fe0aa10db9..bea08e3231104 100644
--- a/nixos/tests/nextcloud/basic.nix
+++ b/nixos/tests/nextcloud/basic.nix
@@ -1,27 +1,27 @@
-args@{ pkgs, nextcloudVersion ? 22, ... }:
+{ name, pkgs, testBase, system,... }:
 
-(import ../make-test-python.nix ({ pkgs, ...}: let
-  adminpass = "notproduction";
-  adminuser = "root";
-in {
-  name = "nextcloud-basic";
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+runTest ({ config, ... }: {
+  inherit name;
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ globin eqyiel ];
+    maintainers = [ globin eqyiel ma27 ];
   };
 
-  nodes = rec {
+  imports = [ testBase ];
+
+  nodes = {
     # The only thing the client needs to do is download a file.
     client = { ... }: {
       services.davfs2.enable = true;
       systemd.tmpfiles.settings.nextcloud = {
         "/tmp/davfs2-secrets"."f+" = {
           mode = "0600";
-          argument = "http://nextcloud/remote.php/dav/files/${adminuser} ${adminuser} ${adminpass}";
+          argument = "http://nextcloud/remote.php/dav/files/${config.adminuser} ${config.adminuser} ${config.adminpass}";
         };
       };
       virtualisation.fileSystems = {
         "/mnt/dav" = {
-          device = "http://nextcloud/remote.php/dav/files/${adminuser}";
+          device = "http://nextcloud/remote.php/dav/files/${config.adminuser}";
           fsType = "davfs";
           options = let
             davfs2Conf = (pkgs.writeText "davfs2.conf" "secrets /tmp/davfs2-secrets");
@@ -30,11 +30,7 @@ in {
       };
     };
 
-    nextcloud = { config, pkgs, ... }: let
-      cfg = config;
-    in {
-      networking.firewall.allowedTCPPorts = [ 80 ];
-
+    nextcloud = { config, pkgs, ... }: {
       systemd.tmpfiles.rules = [
         "d /var/lib/nextcloud-data 0750 nextcloud nginx - -"
       ];
@@ -42,14 +38,6 @@ in {
       services.nextcloud = {
         enable = true;
         datadir = "/var/lib/nextcloud-data";
-        hostName = "nextcloud";
-        database.createLocally = true;
-        config = {
-          # Don't inherit adminuser since "root" is supposed to be the default
-          adminpassFile = "${pkgs.writeText "adminpass" adminpass}"; # Don't try this at home!
-          dbtableprefix = "nixos_";
-        };
-        package = pkgs.${"nextcloud" + (toString nextcloudVersion)};
         autoUpdateApps = {
           enable = true;
           startAt = "20:00";
@@ -57,64 +45,31 @@ in {
         phpExtraExtensions = all: [ all.bz2 ];
       };
 
-      environment.systemPackages = [ cfg.services.nextcloud.occ ];
+      specialisation.withoutMagick.configuration = {
+        services.nextcloud.enableImagemagick = false;
+      };
     };
-
-    nextcloudWithoutMagick = args@{ config, pkgs, lib, ... }:
-      lib.mkMerge
-      [ (nextcloud args)
-        { services.nextcloud.enableImagemagick = false; } ];
   };
 
-  testScript = { nodes, ... }: let
-    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
-      #!${pkgs.runtimeShell}
-      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
-      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${adminuser}"
-      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
-      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
-      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
-      "''${@}"
-    '';
-    copySharedFile = pkgs.writeScript "copy-shared-file" ''
-      #!${pkgs.runtimeShell}
-      echo 'hi' | ${withRcloneEnv} ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
-    '';
-
-    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
-      #!${pkgs.runtimeShell}
-      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
-    '';
-
+  test-helpers.extraTests = { nodes, ... }: let
     findInClosure = what: drv: pkgs.runCommand "find-in-closure" { exportReferencesGraph = [ "graph" drv ]; inherit what; } ''
       test -e graph
       grep "$what" graph >$out || true
     '';
-    nextcloudUsesImagick = findInClosure "imagick" nodes.nextcloud.system.build.vm;
-    nextcloudWithoutDoesntUseIt = findInClosure "imagick" nodes.nextcloudWithoutMagick.system.build.vm;
+    nexcloudWithImagick = findInClosure "imagick" nodes.nextcloud.system.build.vm;
+    nextcloudWithoutImagick = findInClosure "imagick" nodes.nextcloud.specialisation.withoutMagick.configuration.system.build.vm;
   in ''
-    assert open("${nextcloudUsesImagick}").read() != ""
-    assert open("${nextcloudWithoutDoesntUseIt}").read() == ""
+    with subtest("File is in proper nextcloud home"):
+        nextcloud.succeed("test -f ${nodes.nextcloud.services.nextcloud.datadir}/data/root/files/test-shared-file")
+
+    with subtest("Closure checks"):
+        assert open("${nexcloudWithImagick}").read() != ""
+        assert open("${nextcloudWithoutImagick}").read() == ""
+
+    with subtest("Davfs2"):
+        assert "hi" in client.succeed("cat /mnt/dav/test-shared-file")
 
-    nextcloud.start()
-    client.start()
-    nextcloud.wait_for_unit("multi-user.target")
-    # This is just to ensure the nextcloud-occ program is working
-    nextcloud.succeed("nextcloud-occ status")
-    nextcloud.succeed("curl -sSf http://nextcloud/login")
-    # Ensure that no OpenSSL 1.1 is used.
-    nextcloud.succeed(
-        "${nodes.nextcloud.services.phpfpm.pools.nextcloud.phpPackage}/bin/php -i | grep 'OpenSSL Library Version' | awk -F'=>' '{ print $2 }' | awk '{ print $2 }' | grep -v 1.1"
-    )
-    nextcloud.succeed(
-        "${withRcloneEnv} ${copySharedFile}"
-    )
-    client.wait_for_unit("multi-user.target")
-    nextcloud.succeed("test -f /var/lib/nextcloud-data/data/root/files/test-shared-file")
-    client.succeed(
-        "${withRcloneEnv} ${diffSharedFile}"
-    )
-    assert "hi" in client.succeed("cat /mnt/dav/test-shared-file")
-    nextcloud.succeed("grep -vE '^HBEGIN:oc_encryption_module' /var/lib/nextcloud-data/data/root/files/test-shared-file")
+    with subtest("Ensure SSE is disabled by default"):
+        nextcloud.succeed("grep -vE '^HBEGIN:oc_encryption_module' /var/lib/nextcloud-data/data/root/files/test-shared-file")
   '';
-})) args
+})
diff --git a/nixos/tests/nextcloud/default.nix b/nixos/tests/nextcloud/default.nix
index d024adffd9f06..9f8b06561b074 100644
--- a/nixos/tests/nextcloud/default.nix
+++ b/nixos/tests/nextcloud/default.nix
@@ -5,21 +5,108 @@
 
 with pkgs.lib;
 
-foldl
-  (matrix: ver: matrix // {
-    "basic${toString ver}" = import ./basic.nix { inherit system pkgs; nextcloudVersion = ver; };
-    "with-postgresql-and-redis${toString ver}" = import ./with-postgresql-and-redis.nix {
-      inherit system pkgs;
-      nextcloudVersion = ver;
-    };
-    "with-mysql-and-memcached${toString ver}" = import ./with-mysql-and-memcached.nix {
-      inherit system pkgs;
-      nextcloudVersion = ver;
-    };
-    "with-declarative-redis-and-secrets${toString ver}" = import ./with-declarative-redis-and-secrets.nix {
-      inherit system pkgs;
-      nextcloudVersion = ver;
+let
+  baseModule = { config, ... }: {
+    imports = [
+      {
+        options.test-helpers = {
+          rclone = mkOption { type = types.str; };
+          upload-sample = mkOption { type = types.str; };
+          check-sample = mkOption { type = types.str; };
+          init = mkOption { type = types.str; default = ""; };
+          extraTests = mkOption { type = types.either types.str (types.functionTo types.str); default = ""; };
+        };
+        options.adminuser = mkOption { type = types.str; };
+        options.adminpass = mkOption { type = types.str; };
+      }
+    ];
+
+    adminuser = "root";
+    adminpass = "hunter2";
+
+    test-helpers.rclone = "${pkgs.writeShellScript "rclone" ''
+      set -euo pipefail
+      export PATH="${pkgs.rclone}/bin:$PATH"
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${config.adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${config.adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(rclone obscure ${config.adminpass})"
+      exec "$@"
+    ''}";
+    test-helpers.upload-sample = "${pkgs.writeShellScript "rclone-upload" ''
+      <<<'hi' rclone rcat nextcloud:test-shared-file
+    ''}";
+    test-helpers.check-sample = "${pkgs.writeShellScript "check-sample" ''
+      set -e
+      diff <(echo 'hi') <(rclone cat nextcloud:test-shared-file)
+    ''}";
+
+    nodes = {
+      client = { ... }: {};
+      nextcloud = {
+        networking.firewall.allowedTCPPorts = [ 80 ];
+        services.nextcloud = {
+          enable = true;
+          hostName = "nextcloud";
+          https = false;
+          database.createLocally = true;
+          config = {
+            adminpassFile = "${pkgs.writeText "adminpass" config.adminpass}"; # Don't try this at home!
+          };
+        };
+      };
     };
-  })
-{ }
-  [ 27 28 29 ]
+
+    testScript = args@{ nodes, ... }: let
+      inherit (config) test-helpers;
+    in mkBefore ''
+      nextcloud.start()
+      client.start()
+      nextcloud.wait_for_unit("multi-user.target")
+
+      ${test-helpers.init}
+
+      with subtest("Ensure nextcloud-occ is working"):
+          nextcloud.succeed("nextcloud-occ status")
+          nextcloud.succeed("curl -sSf http://nextcloud/login")
+
+      with subtest("Upload/Download test"):
+          nextcloud.succeed(
+              "${test-helpers.rclone} ${test-helpers.upload-sample}"
+          )
+          client.wait_for_unit("multi-user.target")
+          client.succeed(
+              "${test-helpers.rclone} ${test-helpers.check-sample}"
+          )
+
+      ${if builtins.isFunction test-helpers.extraTests then test-helpers.extraTests args else test-helpers.extraTests}
+    '';
+  };
+
+  genTests = version:
+    let
+      testBase.imports = [
+        baseModule
+        {
+          nodes.nextcloud = { pkgs, ... }: {
+            services.nextcloud.package = pkgs.${"nextcloud${toString version}"};
+          };
+        }
+      ];
+
+      callNextcloudTest = path:
+        let
+          name = "${removeSuffix ".nix" (baseNameOf path)}${toString version}";
+        in nameValuePair name (import path {
+          inherit system pkgs testBase;
+          name = "nextcloud-${name}";
+        });
+    in map callNextcloudTest [
+      ./basic.nix
+      ./with-mysql-and-memcached.nix
+      ./with-postgresql-and-redis.nix
+      ./with-objectstore.nix
+    ];
+in
+listToAttrs (concatMap genTests [ 28 29 ])
diff --git a/nixos/tests/nextcloud/with-mysql-and-memcached.nix b/nixos/tests/nextcloud/with-mysql-and-memcached.nix
index 035a7fdcb0c80..07a3e56fae4af 100644
--- a/nixos/tests/nextcloud/with-mysql-and-memcached.nix
+++ b/nixos/tests/nextcloud/with-mysql-and-memcached.nix
@@ -1,79 +1,37 @@
-args@{ pkgs, nextcloudVersion ? 22, ... }:
+{ pkgs, testBase, system, ... }:
 
-(import ../make-test-python.nix ({ pkgs, ...}: let
-  adminpass = "hunter2";
-  adminuser = "root";
-in {
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+runTest ({ config, ... }: {
   name = "nextcloud-with-mysql-and-memcached";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ eqyiel ];
   };
 
-  nodes = {
-    # The only thing the client needs to do is download a file.
-    client = { ... }: {};
+  imports = [ testBase ];
 
+  nodes = {
     nextcloud = { config, pkgs, ... }: {
-      networking.firewall.allowedTCPPorts = [ 80 ];
-
       services.nextcloud = {
-        enable = true;
-        hostName = "nextcloud";
-        https = true;
-        package = pkgs.${"nextcloud" + (toString nextcloudVersion)};
         caching = {
           apcu = true;
           redis = false;
           memcached = true;
         };
-        database.createLocally = true;
-        config = {
-          dbtype = "mysql";
-          # Don't inherit adminuser since "root" is supposed to be the default
-          adminpassFile = "${pkgs.writeText "adminpass" adminpass}"; # Don't try this at home!
-        };
+        config.dbtype = "mysql";
       };
 
       services.memcached.enable = true;
     };
   };
 
-  testScript = let
+  test-helpers.init = let
     configureMemcached = pkgs.writeScript "configure-memcached" ''
-      #!${pkgs.runtimeShell}
       nextcloud-occ config:system:set memcached_servers 0 0 --value 127.0.0.1 --type string
       nextcloud-occ config:system:set memcached_servers 0 1 --value 11211 --type integer
       nextcloud-occ config:system:set memcache.local --value '\OC\Memcache\APCu' --type string
       nextcloud-occ config:system:set memcache.distributed --value '\OC\Memcache\Memcached' --type string
     '';
-    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
-      #!${pkgs.runtimeShell}
-      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
-      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${adminuser}"
-      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
-      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
-      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
-    '';
-    copySharedFile = pkgs.writeScript "copy-shared-file" ''
-      #!${pkgs.runtimeShell}
-      echo 'hi' | ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
-    '';
-
-    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
-      #!${pkgs.runtimeShell}
-      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
-    '';
   in ''
-    start_all()
-    nextcloud.wait_for_unit("multi-user.target")
     nextcloud.succeed("${configureMemcached}")
-    nextcloud.succeed("curl -sSf http://nextcloud/login")
-    nextcloud.succeed(
-        "${withRcloneEnv} ${copySharedFile}"
-    )
-    client.wait_for_unit("multi-user.target")
-    client.succeed(
-        "${withRcloneEnv} ${diffSharedFile}"
-    )
   '';
-})) args
+})
diff --git a/nixos/tests/nextcloud/with-objectstore.nix b/nixos/tests/nextcloud/with-objectstore.nix
new file mode 100644
index 0000000000000..fc26760b8babd
--- /dev/null
+++ b/nixos/tests/nextcloud/with-objectstore.nix
@@ -0,0 +1,96 @@
+{ name, pkgs, testBase, system, ... }:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+runTest ({ config, lib, ... }: let
+  accessKey = "BKIKJAA5BMMU2RHO6IBB";
+  secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
+
+  rootCredentialsFile = pkgs.writeText "minio-credentials-full" ''
+    MINIO_ROOT_USER=${accessKey}
+    MINIO_ROOT_PASSWORD=${secretKey}
+  '';
+in {
+  inherit name;
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ onny ma27 ];
+  };
+
+  imports = [ testBase ];
+
+  nodes = {
+    nextcloud = { config, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 9000 ];
+      environment.systemPackages = [ pkgs.minio-client ];
+
+      services.nextcloud.config.objectstore.s3 = {
+        enable = true;
+        bucket = "nextcloud";
+        autocreate = true;
+        key = accessKey;
+        secretFile = "${pkgs.writeText "secretKey" secretKey}";
+        hostname = "nextcloud";
+        useSsl = false;
+        port = 9000;
+        usePathStyle = true;
+        region = "us-east-1";
+      };
+
+      services.minio = {
+        enable = true;
+        listenAddress = "0.0.0.0:9000";
+        consoleAddress = "0.0.0.0:9001";
+        inherit rootCredentialsFile;
+      };
+    };
+  };
+
+  test-helpers.init = ''
+    nextcloud.wait_for_open_port(9000)
+  '';
+
+  test-helpers.extraTests = { nodes, ... }: ''
+    with subtest("File is not on the filesystem"):
+        nextcloud.succeed("test ! -e ${nodes.nextcloud.services.nextcloud.home}/data/root/files/test-shared-file")
+
+    with subtest("Check if file is in S3"):
+        nextcloud.succeed(
+            "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4"
+        )
+        files = nextcloud.succeed('mc ls minio/nextcloud|sort').strip().split('\n')
+
+        # Cannot assert an exact number here, nc27 writes more stuff initially into S3.
+        # For now let's assume it's always the most recently added file.
+        assert len(files) > 0, f"""
+          Expected to have at least one object in minio/nextcloud. But `mc ls` gave output:
+
+          '{files}'
+        """
+
+        import re
+        ptrn = re.compile("^\[[A-Z0-9 :-]+\] +(?P<details>[A-Za-z0-9 :]+)$")
+        match = ptrn.match(files[-1].strip())
+        assert match, "Cannot match mc client output!"
+        size, type_, file = tuple(match.group('details').split(' '))
+
+        assert size == "3B", f"""
+          Expected size of uploaded file to be 3 bytes, got {size}
+        """
+
+        assert type_ == 'STANDARD', f"""
+          Expected type of bucket entry to be a file, i.e. 'STANDARD'. Got {type_}
+        """
+
+        assert file.startswith('urn:oid'), """
+          Expected filename to start with 'urn:oid', instead got '{file}.
+        """
+
+    with subtest("Test download from S3"):
+        client.succeed(
+            "env AWS_ACCESS_KEY_ID=${accessKey} AWS_SECRET_ACCESS_KEY=${secretKey} "
+            + f"${lib.getExe pkgs.awscli2} s3 cp s3://nextcloud/{file} test --endpoint-url http://nextcloud:9000 "
+            + "--region us-east-1"
+        )
+
+        client.succeed("test hi = $(cat test)")
+  '';
+})
diff --git a/nixos/tests/nextcloud/with-postgresql-and-redis.nix b/nixos/tests/nextcloud/with-postgresql-and-redis.nix
index 06afc589403dd..24c17f70932d3 100644
--- a/nixos/tests/nextcloud/with-postgresql-and-redis.nix
+++ b/nixos/tests/nextcloud/with-postgresql-and-redis.nix
@@ -1,45 +1,30 @@
-args@{ pkgs, nextcloudVersion ? 22, ... }:
+{ name, pkgs, testBase, system, ... }:
 
-(import ../make-test-python.nix ({ pkgs, ...}: let
-  adminpass = "hunter2";
-  adminuser = "custom-admin-username";
-in {
-  name = "nextcloud-with-postgresql-and-redis";
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+runTest ({ config, ... }: {
+  inherit name;
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eqyiel ];
+    maintainers = [ eqyiel ma27 ];
   };
 
-  nodes = {
-    # The only thing the client needs to do is download a file.
-    client = { ... }: {};
+  imports = [ testBase ];
 
+  nodes = {
     nextcloud = { config, pkgs, lib, ... }: {
-      networking.firewall.allowedTCPPorts = [ 80 ];
-
       services.nextcloud = {
-        enable = true;
-        hostName = "nextcloud";
-        package = pkgs.${"nextcloud" + (toString nextcloudVersion)};
         caching = {
           apcu = false;
           redis = true;
           memcached = false;
         };
-        database.createLocally = true;
-        config = {
-          dbtype = "pgsql";
-          inherit adminuser;
-          adminpassFile = toString (pkgs.writeText "admin-pass-file" ''
-            ${adminpass}
-          '');
-        };
+        config.dbtype = "pgsql";
         notify_push = {
           enable = true;
           logLevel = "debug";
         };
         extraAppsEnable = true;
-        extraApps = {
-          inherit (pkgs."nextcloud${lib.versions.major config.services.nextcloud.package.version}Packages".apps) notify_push notes;
+        extraApps = with config.services.nextcloud.package.packages.apps; {
+          inherit notify_push notes;
         };
         settings.trusted_proxies = [ "::1" ];
       };
@@ -49,50 +34,27 @@ in {
     };
   };
 
-  testScript = let
+  test-helpers.init = let
     configureRedis = pkgs.writeScript "configure-redis" ''
-      #!${pkgs.runtimeShell}
       nextcloud-occ config:system:set redis 'host' --value 'localhost' --type string
       nextcloud-occ config:system:set redis 'port' --value 6379 --type integer
       nextcloud-occ config:system:set memcache.local --value '\OC\Memcache\Redis' --type string
       nextcloud-occ config:system:set memcache.locking --value '\OC\Memcache\Redis' --type string
     '';
-    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
-      #!${pkgs.runtimeShell}
-      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
-      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${adminuser}"
-      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
-      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
-      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
-      "''${@}"
-    '';
-    copySharedFile = pkgs.writeScript "copy-shared-file" ''
-      #!${pkgs.runtimeShell}
-      echo 'hi' | ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
-    '';
-
-    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
-      #!${pkgs.runtimeShell}
-      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
-    '';
   in ''
-    start_all()
-    nextcloud.wait_for_unit("multi-user.target")
     nextcloud.succeed("${configureRedis}")
-    nextcloud.succeed("curl -sSf http://nextcloud/login")
-    nextcloud.succeed(
-        "${withRcloneEnv} ${copySharedFile}"
-    )
-    client.wait_for_unit("multi-user.target")
-    client.execute("${pkgs.lib.getExe pkgs.nextcloud-notify_push.passthru.test_client} http://nextcloud ${adminuser} ${adminpass} >&2 &")
-    client.succeed(
-        "${withRcloneEnv} ${diffSharedFile}"
-    )
-    nextcloud.wait_until_succeeds("journalctl -u nextcloud-notify_push | grep -q \"Sending ping to ${adminuser}\"")
+  '';
+
+  test-helpers.extraTests = ''
+    with subtest("notify-push"):
+        client.execute("${pkgs.lib.getExe pkgs.nextcloud-notify_push.passthru.test_client} http://nextcloud ${config.adminuser} ${config.adminpass} >&2 &")
+        nextcloud.wait_until_succeeds("journalctl -u nextcloud-notify_push | grep -q \"Sending ping to ${config.adminuser}\"")
 
-    # redis cache should not be empty
-    nextcloud.fail('test "[]" = "$(redis-cli --json KEYS "*")"')
+    with subtest("Redis is used for caching"):
+        # redis cache should not be empty
+        nextcloud.fail('test "[]" = "$(redis-cli --json KEYS "*")"')
 
-    nextcloud.fail("curl -f http://nextcloud/nix-apps/notes/lib/AppInfo/Application.php")
+    with subtest("No code is returned when requesting PHP files (regression test)"):
+        nextcloud.fail("curl -f http://nextcloud/nix-apps/notes/lib/AppInfo/Application.php")
   '';
-})) args
+})
diff --git a/nixos/tests/nfs/simple.nix b/nixos/tests/nfs/simple.nix
index 026da9563bc03..077c1d4109356 100644
--- a/nixos/tests/nfs/simple.nix
+++ b/nixos/tests/nfs/simple.nix
@@ -20,7 +20,7 @@ in
 {
   name = "nfs";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco ];
+    maintainers = [ ];
   };
 
   nodes =
diff --git a/nixos/tests/nix-required-mounts/default.nix b/nixos/tests/nix-required-mounts/default.nix
new file mode 100644
index 0000000000000..60f894ce0bcc6
--- /dev/null
+++ b/nixos/tests/nix-required-mounts/default.nix
@@ -0,0 +1,58 @@
+{ pkgs, ... }:
+
+let
+  inherit (pkgs) lib;
+in
+
+{
+  name = "nix-required-mounts";
+  meta.maintainers = with lib.maintainers; [ SomeoneSerge ];
+  nodes.machine =
+    { config, pkgs, ... }:
+    {
+      virtualisation.writableStore = true;
+      system.extraDependencies = [ (pkgs.runCommand "deps" { } "mkdir $out").inputDerivation ];
+      nix.nixPath = [ "nixpkgs=${../../..}" ];
+      nix.settings.substituters = lib.mkForce [ ];
+      nix.settings.system-features = [ "supported-feature" ];
+      nix.settings.experimental-features = [ "nix-command" ];
+      programs.nix-required-mounts.enable = true;
+      programs.nix-required-mounts.allowedPatterns.supported-feature = {
+        onFeatures = [ "supported-feature" ];
+        paths = [
+          "/supported-feature-files"
+          {
+            host = "/usr/lib/imaginary-fhs-drivers";
+            guest = "/run/opengl-driver/lib";
+          }
+        ];
+        unsafeFollowSymlinks = true;
+      };
+      users.users.person.isNormalUser = true;
+      systemd.tmpfiles.rules = [
+        "d /supported-feature-files 0755 person users -"
+        "f /usr/lib/libcuda.so 0444 root root - fakeContent"
+        "L /usr/lib/imaginary-fhs-drivers/libcuda.so 0444 root root - /usr/lib/libcuda.so"
+      ];
+    };
+  testScript = ''
+    import shlex
+
+    def person_do(cmd, succeed=True):
+        cmd = shlex.quote(cmd)
+        cmd = f"su person -l -c {cmd} &>/dev/console"
+
+        if succeed:
+            return machine.succeed(cmd)
+        else:
+            return machine.fail(cmd)
+
+    start_all()
+
+    person_do("nix-build ${./ensure-path-not-present.nix} --argstr feature supported-feature")
+    person_do("nix-build ${./test-require-feature.nix} --argstr feature supported-feature")
+    person_do("nix-build ${./test-require-feature.nix} --argstr feature unsupported-feature", succeed=False)
+    person_do("nix-build ${./test-structured-attrs.nix} --argstr feature supported-feature")
+    person_do("nix-build ${./test-structured-attrs-empty.nix}")
+  '';
+}
diff --git a/nixos/tests/nix-required-mounts/ensure-path-not-present.nix b/nixos/tests/nix-required-mounts/ensure-path-not-present.nix
new file mode 100644
index 0000000000000..270c268fcbd9e
--- /dev/null
+++ b/nixos/tests/nix-required-mounts/ensure-path-not-present.nix
@@ -0,0 +1,13 @@
+{
+  pkgs ? import <nixpkgs> { },
+  feature,
+}:
+
+pkgs.runCommandNoCC "${feature}-not-present" { } ''
+  if [[ -e /${feature}-files ]]; then
+    echo "No ${feature} in requiredSystemFeatures, but /${feature}-files was mounted anyway"
+    exit 1
+  else
+    touch $out
+  fi
+''
diff --git a/nixos/tests/nix-required-mounts/test-require-feature.nix b/nixos/tests/nix-required-mounts/test-require-feature.nix
new file mode 100644
index 0000000000000..447fd49a300aa
--- /dev/null
+++ b/nixos/tests/nix-required-mounts/test-require-feature.nix
@@ -0,0 +1,26 @@
+{
+  pkgs ? import <nixpkgs> { },
+  feature,
+}:
+
+pkgs.runCommandNoCC "${feature}-present" { requiredSystemFeatures = [ feature ]; } ''
+  if [[ ! -e /${feature}-files ]]; then
+    echo "The host declares ${feature} support, but doesn't expose /${feature}-files" >&2
+    exit 1
+  fi
+  libcudaLocation=/run/opengl-driver/lib/libcuda.so
+  if [[ -e "$libcudaLocation" || -h "$libcudaLocation" ]] ; then
+    true # we're good
+  else
+    echo "The host declares ${feature} support, but it the hook fails to handle the hostPath != guestPath cases" >&2
+    exit 1
+  fi
+  if cat "$libcudaLocation" | xargs test fakeContent = ; then
+    true # we're good
+  else
+    echo "The host declares ${feature} support, but it seems to fail to follow symlinks" >&2
+    echo "The content of /run/opengl-driver/lib/libcuda.so is: $(cat /run/opengl-driver/lib/libcuda.so)" >&2
+    exit 1
+  fi
+  touch $out
+''
diff --git a/nixos/tests/nix-required-mounts/test-structured-attrs-empty.nix b/nixos/tests/nix-required-mounts/test-structured-attrs-empty.nix
new file mode 100644
index 0000000000000..86f2753309368
--- /dev/null
+++ b/nixos/tests/nix-required-mounts/test-structured-attrs-empty.nix
@@ -0,0 +1,8 @@
+{
+  pkgs ? import <nixpkgs> { },
+}:
+
+pkgs.runCommandNoCC "nix-required-mounts-structured-attrs-no-features" { __structuredAttrs = true; }
+  ''
+    touch $out
+  ''
diff --git a/nixos/tests/nix-required-mounts/test-structured-attrs.nix b/nixos/tests/nix-required-mounts/test-structured-attrs.nix
new file mode 100644
index 0000000000000..874910eee7bb3
--- /dev/null
+++ b/nixos/tests/nix-required-mounts/test-structured-attrs.nix
@@ -0,0 +1,18 @@
+{
+  pkgs ? import <nixpkgs> { },
+  feature,
+}:
+
+pkgs.runCommandNoCC "${feature}-present-structured"
+  {
+    __structuredAttrs = true;
+    requiredSystemFeatures = [ feature ];
+  }
+  ''
+    if [[ -e /${feature}-files ]]; then
+      touch $out
+    else
+      echo "The host declares ${feature} support, but doesn't expose /${feature}-files" >&2
+      echo "Do we fail to parse __structuredAttrs=true derivations?" >&2
+    fi
+  ''
diff --git a/nixos/tests/nix-serve.nix b/nixos/tests/nix-serve.nix
index 3aa913f81107a..c41bc505e9635 100644
--- a/nixos/tests/nix-serve.nix
+++ b/nixos/tests/nix-serve.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ... }:
+{ config, ... }:
 {
   name = "nix-serve";
   nodes.machine = { pkgs, ... }: {
@@ -9,7 +9,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
   };
   testScript = let
     pkgHash = builtins.head (
-      builtins.match "${builtins.storeDir}/([^-]+).+" (toString pkgs.hello)
+      builtins.match "${builtins.storeDir}/([^-]+).+" (toString config.node.pkgs.hello)
     );
   in ''
     start_all()
@@ -19,4 +19,4 @@ import ./make-test-python.nix ({ pkgs, ... }:
         "curl --fail -g http://0.0.0.0:5000/nar/${pkgHash}.nar -o /tmp/hello.nar"
     )
   '';
-})
+}
diff --git a/nixos/tests/nix/misc.nix b/nixos/tests/nix/misc.nix
new file mode 100644
index 0000000000000..6a22ffe0d901f
--- /dev/null
+++ b/nixos/tests/nix/misc.nix
@@ -0,0 +1,64 @@
+# Miscellaneous small tests that don't warrant their own VM run.
+{ pkgs, ... }:
+
+let
+  inherit (pkgs) lib;
+  tests = {
+    default = testsForPackage { nixPackage = pkgs.nix; };
+    lix = testsForPackage { nixPackage = pkgs.lix; };
+  };
+
+  testsForPackage = args: lib.recurseIntoAttrs {
+    # If the attribute is not named 'test'
+    # You will break all the universe on the release-*.nix side of things.
+    # `discoverTests` relies on `test` existence to perform a `callTest`.
+    test = testMiscFeatures args;
+    passthru.override = args': testsForPackage (args // args');
+  };
+
+  testMiscFeatures = { nixPackage, ... }: pkgs.testers.nixosTest (
+  let
+    foo = pkgs.writeText "foo" "Hello World";
+  in {
+    name = "${nixPackage.pname}-misc";
+    meta.maintainers = with lib.maintainers; [ raitobezarius artturin ];
+
+    nodes.machine =
+      { lib, ... }:
+      {
+        system.extraDependencies = [ foo ];
+
+        nix.package = nixPackage;
+      };
+
+    testScript =
+      ''
+        import json
+
+        def get_path_info(path):
+            result = machine.succeed(f"nix --option experimental-features nix-command path-info --json {path}")
+            parsed = json.loads(result)
+            return parsed
+
+        with subtest("nix-db"):
+            out = "${foo}"
+            info = get_path_info(out)
+            print(info)
+
+            pathinfo = info[0] if isinstance(info, list) else info[out]
+
+            if (
+                pathinfo["narHash"]
+                != "sha256-BdMdnb/0eWy3EddjE83rdgzWWpQjfWPAj3zDIFMD3Ck="
+            ):
+                raise Exception("narHash not set")
+
+            if pathinfo["narSize"] != 128:
+                raise Exception("narSize not set")
+
+        with subtest("nix-db"):
+            machine.succeed("nix-store -qR /run/current-system | grep nixos-")
+      '';
+  });
+  in
+  tests
diff --git a/nixos/tests/nix/upgrade.nix b/nixos/tests/nix/upgrade.nix
new file mode 100644
index 0000000000000..c55441586b322
--- /dev/null
+++ b/nixos/tests/nix/upgrade.nix
@@ -0,0 +1,108 @@
+{ pkgs, nixVersions, ... }:
+let
+  lib = pkgs.lib;
+
+  fallback-paths-external = pkgs.writeTextDir "fallback-paths.nix" ''
+    {
+      ${pkgs.system} = "${nixVersions.latest}";
+    }'';
+
+  inputDrv = import ../.. {
+    configuration = {
+      imports = [ nixos-module ];
+      nix.package = nixVersions.latest;
+      boot.isContainer = true;
+
+      users.users.alice.isNormalUser = true;
+    };
+    system = pkgs.system;
+  };
+
+  nixos-module = builtins.toFile "nixos-module.nix" ''
+    { lib, pkgs, modulesPath, ... }:
+    {
+      imports = [
+        (modulesPath + "/profiles/minimal.nix")
+        (modulesPath + "/testing/test-instrumentation.nix")
+      ];
+
+      hardware.enableAllFirmware = lib.mkForce false;
+
+      nix.settings.substituters = lib.mkForce [];
+      nix.settings.hashed-mirrors = null;
+      nix.settings.connect-timeout = 1;
+      nix.extraOptions = "experimental-features = nix-command";
+
+      environment.localBinInPath = true;
+      users.users.alice = {
+        isNormalUser = true;
+        packages = [ pkgs.nixVersions.latest ];
+      };
+      documentation.enable = false;
+    }
+  '';
+in
+
+pkgs.testers.nixosTest {
+  name = "nix-upgrade-${nixVersions.stable.version}-${nixVersions.latest.version}";
+  meta.maintainers = with lib.maintainers; [ tomberek ];
+
+  nodes.machine = {
+    imports = [ nixos-module ];
+
+    nix.package = nixVersions.stable;
+    system.extraDependencies = [
+      fallback-paths-external
+      inputDrv.system
+    ];
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("multi-user.target")
+
+    with subtest("nix-current"):
+        # Create a profile to pretend we are on non-NixOS
+
+        print(machine.succeed("nix --version"))
+        print(machine.succeed("nix-env -i /run/current-system/sw/bin/nix -p /root/.local"))
+
+    with subtest("nix-upgrade"):
+        print(machine.succeed("nix upgrade-nix --nix-store-paths-url file://${fallback-paths-external}/fallback-paths.nix --profile /root/.local"))
+        result = machine.succeed("nix --version")
+        print(result)
+
+        import re
+        match = re.match(r".*${nixVersions.latest.version}$",result)
+        if not match: raise Exception("Couldn't find new version in output: " + result)
+
+    with subtest("nix-build-with-mismatch-daemon"):
+        machine.succeed("runuser -u alice -- nix build --expr 'derivation {name =\"test\"; system = \"${pkgs.system}\";builder = \"/bin/sh\"; args = [\"-c\" \"echo test > $out\"];}' --print-out-paths")
+
+
+    with subtest("remove-new-nix"):
+        machine.succeed("rm -rf /root/.local")
+
+        result = machine.succeed("nix --version")
+        print(result)
+
+        import re
+        match = re.match(r".*${nixVersions.stable.version}$",result)
+
+    with subtest("upgrade-via-switch-to-configuration"):
+        # not using nixos-rebuild due to nix-instantiate being called and forcing all drv's to be rebuilt
+        print(machine.succeed("${inputDrv.system.outPath}/bin/switch-to-configuration switch"))
+        result = machine.succeed("nix --version")
+        print(result)
+
+        import re
+        match = re.match(r".*${nixVersions.latest.version}$",result)
+        if not match: raise Exception("Couldn't find new version in output: " + result)
+
+    with subtest("nix-build-with-new-daemon"):
+        machine.succeed("runuser -u alice -- nix build --expr 'derivation {name =\"test-new\"; system = \"${pkgs.system}\";builder = \"/bin/sh\"; args = [\"-c\" \"echo test > $out\"];}' --print-out-paths")
+
+    with subtest("nix-collect-garbage-with-old-nix"):
+        machine.succeed("${nixVersions.stable}/bin/nix-collect-garbage")
+  '';
+}
diff --git a/nixos/tests/nixos-rebuild-specialisations.nix b/nixos/tests/nixos-rebuild-specialisations.nix
index 9192b8a8a030b..ab67bbaba6762 100644
--- a/nixos/tests/nixos-rebuild-specialisations.nix
+++ b/nixos/tests/nixos-rebuild-specialisations.nix
@@ -21,6 +21,8 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         pkgs.grub2
       ];
 
+      system.switch.enable = true;
+
       virtualisation = {
         cores = 2;
         memorySize = 4096;
diff --git a/nixos/tests/nvidia-container-toolkit.nix b/nixos/tests/nvidia-container-toolkit.nix
new file mode 100644
index 0000000000000..b22b989c0814f
--- /dev/null
+++ b/nixos/tests/nvidia-container-toolkit.nix
@@ -0,0 +1,149 @@
+{ pkgs, lib, ... }:
+let
+  testCDIScript = pkgs.writeShellScriptBin "test-cdi" ''
+    die() {
+      echo "$1"
+      exit 1
+    }
+
+    check_file_referential_integrity() {
+      echo "checking $1 referential integrity"
+      ( ${pkgs.glibc.bin}/bin/ldd "$1" | ${lib.getExe pkgs.gnugrep} "not found" &> /dev/null ) && return 1
+      return 0
+    }
+
+    check_directory_referential_integrity() {
+      ${lib.getExe pkgs.findutils} "$1" -type f -print0 | while read -d $'\0' file; do
+        if [[ $(${lib.getExe pkgs.file} "$file" | ${lib.getExe pkgs.gnugrep} ELF) ]]; then
+          check_file_referential_integrity "$file" || exit 1
+        else
+          echo "skipping $file: not an ELF file"
+        fi
+      done
+    }
+
+    check_directory_referential_integrity "/usr/bin" || exit 1
+    check_directory_referential_integrity "${pkgs.addDriverRunpath.driverLink}" || exit 1
+    check_directory_referential_integrity "/usr/local/nvidia" || exit 1
+  '';
+  testContainerImage = pkgs.dockerTools.buildImage {
+    name = "cdi-test";
+    tag = "latest";
+    config = {
+      Cmd = [ (lib.getExe testCDIScript) ];
+    };
+    copyToRoot = with pkgs.dockerTools; [
+      usrBinEnv
+      binSh
+    ];
+  };
+  emptyCDISpec = ''
+    {
+      "cdiVersion": "0.5.0",
+      "kind": "nvidia.com/gpu",
+      "devices": [
+        {
+          "name": "all",
+          "containerEdits": {
+            "deviceNodes": [
+              {
+                "path": "/dev/urandom"
+              }
+            ],
+            "hooks": [],
+            "mounts": []
+          }
+        }
+      ],
+      "containerEdits": {
+        "deviceNodes": [],
+        "hooks": [],
+        "mounts": []
+      }
+    }
+  '';
+  nvidia-container-toolkit = {
+    enable = true;
+    package = pkgs.stdenv.mkDerivation {
+      pname = "nvidia-ctk-dummy";
+      version = "1.0.0";
+      dontUnpack = true;
+      dontBuild = true;
+
+      inherit emptyCDISpec;
+      passAsFile = [ "emptyCDISpec" ];
+
+      installPhase = ''
+        mkdir -p $out/bin $out/share/nvidia-container-toolkit
+        cp "$emptyCDISpecPath" "$out/share/nvidia-container-toolkit/spec.json"
+        echo -n "$emptyCDISpec" > "$out/bin/nvidia-ctk";
+        cat << EOF > "$out/bin/nvidia-ctk"
+        #!${pkgs.runtimeShell}
+        cat "$out/share/nvidia-container-toolkit/spec.json"
+        EOF
+        chmod +x $out/bin/nvidia-ctk
+      '';
+      meta.mainProgram = "nvidia-ctk";
+    };
+  };
+in
+{
+  name = "nvidia-container-toolkit";
+  meta = with lib.maintainers; {
+    maintainers = [ ereslibre ];
+  };
+  defaults =
+    { config, ... }:
+    {
+      environment.systemPackages = with pkgs; [ jq ];
+      virtualisation.diskSize = lib.mkDefault 10240;
+      virtualisation.containers.enable = lib.mkDefault true;
+      hardware = {
+        inherit nvidia-container-toolkit;
+        nvidia = {
+          open = true;
+          package = config.boot.kernelPackages.nvidiaPackages.stable.open;
+        };
+        graphics.enable = lib.mkDefault true;
+      };
+    };
+  nodes = {
+    no-gpus = {
+      virtualisation.containers.enable = false;
+      hardware.graphics.enable = false;
+    };
+    one-gpu =
+      { pkgs, ... }:
+      {
+        environment.systemPackages = with pkgs; [ podman ];
+        hardware.graphics.enable = true;
+      };
+
+    one-gpu-invalid-host-paths = {
+      hardware.nvidia-container-toolkit.mounts = [
+        {
+          hostPath = "/non-existant-path";
+          containerPath = "/some/path";
+        }
+      ];
+    };
+  };
+  testScript = ''
+    start_all()
+
+    with subtest("Generate an empty CDI spec for a machine with no Nvidia GPUs"):
+      no_gpus.wait_for_unit("nvidia-container-toolkit-cdi-generator.service")
+      no_gpus.succeed("cat /var/run/cdi/nvidia-container-toolkit.json | jq")
+
+    with subtest("Podman loads the generated CDI spec for a machine with an Nvidia GPU"):
+      one_gpu.wait_for_unit("nvidia-container-toolkit-cdi-generator.service")
+      one_gpu.succeed("cat /var/run/cdi/nvidia-container-toolkit.json | jq")
+      one_gpu.succeed("podman load < ${testContainerImage}")
+      print(one_gpu.succeed("podman run --pull=never --device=nvidia.com/gpu=all -v /run/opengl-driver:/run/opengl-driver:ro cdi-test:latest"))
+
+    # Issue: https://github.com/NixOS/nixpkgs/issues/319201
+    with subtest("The generated CDI spec skips specified non-existant paths in the host"):
+      one_gpu_invalid_host_paths.wait_for_unit("nvidia-container-toolkit-cdi-generator.service")
+      one_gpu_invalid_host_paths.fail("grep 'non-existant-path' /var/run/cdi/nvidia-container-toolkit.json")
+  '';
+}
diff --git a/nixos/tests/nvmetcfg.nix b/nixos/tests/nvmetcfg.nix
index a4c459a343cfd..169e5e9d7b0c9 100644
--- a/nixos/tests/nvmetcfg.nix
+++ b/nixos/tests/nvmetcfg.nix
@@ -27,7 +27,7 @@ import ./make-test-python.nix ({ lib, ... }: {
 
     with subtest("Bind subsystem to port"):
       server.wait_for_unit("network-online.target")
-      server.succeed("nvmet port add 1 tcp 0.0.0.0:4420")
+      server.succeed("nvmet port add 1 tcp [::]:4420")
       server.succeed("nvmet port add-subsystem 1 ${subsystem}")
 
     with subtest("Discover and connect to available subsystems"):
diff --git a/nixos/tests/nzbhydra2.nix b/nixos/tests/nzbhydra2.nix
index e1d528cd9520e..6262a50b4be0e 100644
--- a/nixos/tests/nzbhydra2.nix
+++ b/nixos/tests/nzbhydra2.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ lib, ... }:
   {
     name = "nzbhydra2";
-    meta.maintainers = with lib.maintainers; [ jamiemagee ];
+    meta.maintainers = with lib.maintainers; [ matteopacini ];
 
     nodes.machine = { pkgs, ... }: { services.nzbhydra2.enable = true; };
 
diff --git a/nixos/tests/oci-containers.nix b/nixos/tests/oci-containers.nix
index 1f8e276204a82..9adceb11f18c1 100644
--- a/nixos/tests/oci-containers.nix
+++ b/nixos/tests/oci-containers.nix
@@ -20,7 +20,7 @@ let
           inherit backend;
           containers.nginx = {
             image = "nginx-container";
-            imageFile = pkgs.dockerTools.examples.nginx;
+            imageStream = pkgs.dockerTools.examples.nginxStream;
             ports = ["8181:80"];
           };
         };
diff --git a/nixos/tests/odoo.nix b/nixos/tests/odoo.nix
index 00ae4a2137d10..d3764cbc9f0b9 100644
--- a/nixos/tests/odoo.nix
+++ b/nixos/tests/odoo.nix
@@ -12,6 +12,8 @@ import ./make-test-python.nix ({ pkgs, lib, package ? pkgs.odoo, ...} : {
       services.odoo = {
         enable = true;
         package = package;
+        autoInit = true;
+        autoInitExtraFlags = [ "--without-demo=all" ];
         domain = "localhost";
       };
     };
diff --git a/nixos/tests/ollama-cuda.nix b/nixos/tests/ollama-cuda.nix
new file mode 100644
index 0000000000000..bbab7e24d35c7
--- /dev/null
+++ b/nixos/tests/ollama-cuda.nix
@@ -0,0 +1,17 @@
+{ lib, ... }:
+{
+  name = "ollama-cuda";
+  meta.maintainers = with lib.maintainers; [ abysssol ];
+
+  nodes.cuda =
+    { ... }:
+    {
+      services.ollama.enable = true;
+      services.ollama.acceleration = "cuda";
+    };
+
+  testScript = ''
+    cuda.wait_for_unit("multi-user.target")
+    cuda.wait_for_open_port(11434)
+  '';
+}
diff --git a/nixos/tests/ollama-rocm.nix b/nixos/tests/ollama-rocm.nix
new file mode 100644
index 0000000000000..81915630d950b
--- /dev/null
+++ b/nixos/tests/ollama-rocm.nix
@@ -0,0 +1,17 @@
+{ lib, ... }:
+{
+  name = "ollama-rocm";
+  meta.maintainers = with lib.maintainers; [ abysssol ];
+
+  nodes.rocm =
+    { ... }:
+    {
+      services.ollama.enable = true;
+      services.ollama.acceleration = "rocm";
+    };
+
+  testScript = ''
+    rocm.wait_for_unit("multi-user.target")
+    rocm.wait_for_open_port(11434)
+  '';
+}
diff --git a/nixos/tests/ollama.nix b/nixos/tests/ollama.nix
index 4b21f445cdbd3..34347716af726 100644
--- a/nixos/tests/ollama.nix
+++ b/nixos/tests/ollama.nix
@@ -1,56 +1,53 @@
-import ./make-test-python.nix ({ pkgs, lib, ... }:
+{ lib, ... }:
 let
-  mainPort = "11434";
-  altPort = "11435";
-
-  curlRequest = port: request:
-    "curl http://127.0.0.1:${port}/api/generate -d '${builtins.toJSON request}'";
-
-  prompt = {
-    model = "tinydolphin";
-    prompt = "lorem ipsum";
-    options = {
-      seed = 69;
-      temperature = 0;
-    };
-  };
+  mainPort = 11434;
+  altPort = 11435;
 in
 {
   name = "ollama";
-  meta = with lib.maintainers; {
-    maintainers = [ abysssol ];
-  };
+  meta.maintainers = with lib.maintainers; [ abysssol ];
 
   nodes = {
-    cpu = { ... }: {
-      services.ollama.enable = true;
-    };
-
-    rocm = { ... }: {
-      services.ollama.enable = true;
-      services.ollama.acceleration = "rocm";
-    };
-
-    cuda = { ... }: {
-      services.ollama.enable = true;
-      services.ollama.acceleration = "cuda";
-    };
-
-    altAddress = { ... }: {
-      services.ollama.enable = true;
-      services.ollama.listenAddress = "127.0.0.1:${altPort}";
-    };
+    cpu =
+      { ... }:
+      {
+        services.ollama.enable = true;
+      };
+
+    altAddress =
+      { ... }:
+      {
+        services.ollama.enable = true;
+        services.ollama.port = altPort;
+      };
   };
 
   testScript = ''
-    vms = [ cpu, rocm, cuda, altAddress ];
+    import json
 
-    start_all()
-    for vm in vms:
-        vm.wait_for_unit("multi-user.target")
+    def curl_request_ollama(prompt, port):
+      json_prompt = json.dumps(prompt)
+      return f"""curl http://127.0.0.1:{port}/api/generate -d '{json_prompt}'"""
 
-    stdout = cpu.succeed("""${curlRequest mainPort prompt}""", timeout=100)
+    prompt = {
+      "model": "tinydolphin",
+      "prompt": "lorem ipsum",
+      "options": {
+        "seed": 69,
+        "temperature": 0,
+      },
+    }
 
-    stdout = altAddress.succeed("""${curlRequest altPort prompt}""", timeout=100)
+
+    vms = [
+      (cpu, ${toString mainPort}),
+      (altAddress, ${toString altPort}),
+    ]
+
+    start_all()
+    for (vm, port) in vms:
+      vm.wait_for_unit("multi-user.target")
+      vm.wait_for_open_port(port)
+      stdout = vm.succeed(curl_request_ollama(prompt, port), timeout = 100)
   '';
-})
+}
diff --git a/nixos/tests/open-webui.nix b/nixos/tests/open-webui.nix
new file mode 100644
index 0000000000000..faf4dae671d04
--- /dev/null
+++ b/nixos/tests/open-webui.nix
@@ -0,0 +1,49 @@
+{ config, lib, ... }:
+let
+  mainPort = "8080";
+  webuiName = "NixOS Test";
+in
+{
+  name = "open-webui";
+  meta = with lib.maintainers; {
+    maintainers = [ shivaraj-bh ];
+  };
+
+  nodes = {
+    machine =
+      { ... }:
+      {
+        services.open-webui = {
+          enable = true;
+          environment = {
+            # Requires network connection
+            RAG_EMBEDDING_MODEL = "";
+          };
+
+          # Test that environment variables can be
+          # overridden through a file.
+          environmentFile = config.node.pkgs.writeText "test.env" ''
+            WEBUI_NAME="${webuiName}"
+          '';
+        };
+      };
+  };
+
+  testScript = ''
+    import json
+
+    machine.start()
+
+    machine.wait_for_unit("open-webui.service")
+    machine.wait_for_open_port(${mainPort})
+
+    machine.succeed("curl http://127.0.0.1:${mainPort}")
+
+    # Load the Web UI config JSON and parse it.
+    webui_config_json = machine.succeed("curl http://127.0.0.1:${mainPort}/api/config")
+    webui_config = json.loads(webui_config_json)
+
+    # Check that the name was overridden via the environmentFile option.
+    assert webui_config["name"] == "${webuiName} (Open WebUI)"
+  '';
+}
diff --git a/nixos/tests/openarena.nix b/nixos/tests/openarena.nix
index 63dc1b9a68570..4dfe71a9a1e95 100644
--- a/nixos/tests/openarena.nix
+++ b/nixos/tests/openarena.nix
@@ -5,7 +5,7 @@ let
     { pkgs, ... }:
 
     { imports = [ ./common/x11.nix ];
-      hardware.opengl.driSupport = true;
+      hardware.graphics.enable = true;
       environment.systemPackages = [ pkgs.openarena ];
     };
 
diff --git a/nixos/tests/openssh.nix b/nixos/tests/openssh.nix
index 2684b6f45e84e..d420c482ca7f2 100644
--- a/nixos/tests/openssh.nix
+++ b/nixos/tests/openssh.nix
@@ -5,7 +5,7 @@ let inherit (import ./ssh-keys.nix pkgs)
 in {
   name = "openssh";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ aszlig eelco ];
+    maintainers = [ aszlig ];
   };
 
   nodes = {
@@ -111,21 +111,23 @@ in {
     server-no-openssl =
       { ... }:
       {
-        programs.ssh.package = pkgs.opensshPackages.openssh.override {
-          linkOpenssl = false;
-        };
         services.openssh = {
           enable = true;
+          package = pkgs.opensshPackages.openssh.override {
+            linkOpenssl = false;
+          };
           hostKeys = [
             { type = "ed25519"; path = "/etc/ssh/ssh_host_ed25519_key"; }
           ];
           settings = {
-            # Must not specify the OpenSSL provided algorithms.
-            Ciphers = [ "chacha20-poly1305@openssh.com" ];
-            KexAlgorithms = [
-              "curve25519-sha256"
-              "curve25519-sha256@libssh.org"
-            ];
+            # Since this test is against an OpenSSH-without-OpenSSL,
+            # we have to override NixOS's defaults ciphers (which require OpenSSL)
+            # and instead set these to null, which will mean OpenSSH uses its defaults.
+            # Expectedly, OpenSSH's defaults don't require OpenSSL when it's compiled
+            # without OpenSSL.
+            Ciphers = null;
+            KexAlgorithms = null;
+            Macs = null;
           };
         };
         users.users.root.openssh.authorizedKeys.keys = [
@@ -136,11 +138,11 @@ in {
     server-no-pam =
       { pkgs, ... }:
       {
-        programs.ssh.package = pkgs.opensshPackages.openssh.override {
-          withPAM = false;
-        };
         services.openssh = {
           enable = true;
+          package = pkgs.opensshPackages.openssh.override {
+            withPAM = false;
+          };
           settings = {
             UsePAM = false;
           };
diff --git a/nixos/tests/opentelemetry-collector.nix b/nixos/tests/opentelemetry-collector.nix
index 9a56a22ca47eb..98e597cf807b5 100644
--- a/nixos/tests/opentelemetry-collector.nix
+++ b/nixos/tests/opentelemetry-collector.nix
@@ -12,7 +12,9 @@ in {
       enable = true;
       settings = {
         exporters.logging.verbosity = "detailed";
-        receivers.otlp.protocols.http = {};
+        receivers.otlp.protocols = {
+          http.endpoint = "0.0.0.0:${toString port}";
+        };
         service = {
           pipelines.logs = {
             receivers = [ "otlp" ];
diff --git a/nixos/tests/outline.nix b/nixos/tests/outline.nix
index e45be37f5d3b0..c7a34c5d6f85e 100644
--- a/nixos/tests/outline.nix
+++ b/nixos/tests/outline.nix
@@ -13,7 +13,7 @@ in
 {
   name = "outline";
 
-  meta.maintainers = with lib.maintainers; [ xanderio ];
+  meta.maintainers = lib.teams.cyberus.members;
 
   nodes = {
     outline = { pkgs, config, ... }: {
diff --git a/nixos/tests/pam/pam-u2f.nix b/nixos/tests/pam/pam-u2f.nix
index 46e307a3f125a..caa56c30bbce9 100644
--- a/nixos/tests/pam/pam-u2f.nix
+++ b/nixos/tests/pam/pam-u2f.nix
@@ -7,12 +7,16 @@ import ../make-test-python.nix ({ ... }:
     { ... }:
     {
       security.pam.u2f = {
-        control = "required";
-        cue = true;
-        debug = true;
         enable = true;
-        interactive = true;
-        origin = "nixos-test";
+        control = "required";
+        settings = {
+          cue = true;
+          debug = true;
+          interactive = true;
+          origin = "nixos-test";
+          # Freeform option
+          userpresence = 1;
+        };
       };
     };
 
@@ -20,7 +24,7 @@ import ../make-test-python.nix ({ ... }:
     ''
       machine.wait_for_unit("multi-user.target")
       machine.succeed(
-          'egrep "auth required .*/lib/security/pam_u2f.so.*cue.*debug.*interactive.*origin=nixos-test" /etc/pam.d/ -R'
+          'egrep "auth required .*/lib/security/pam_u2f.so.*cue.*debug.*interactive.*origin=nixos-test.*userpresence=1" /etc/pam.d/ -R'
       )
     '';
 })
diff --git a/nixos/tests/pantheon.nix b/nixos/tests/pantheon.nix
index d2a4a009af53d..0387a0359ab08 100644
--- a/nixos/tests/pantheon.nix
+++ b/nixos/tests/pantheon.nix
@@ -10,6 +10,9 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
   {
     imports = [ ./common/user-account.nix ];
 
+    # Workaround ".gala-wrapped invoked oom-killer"
+    virtualisation.memorySize = 2047;
+
     services.xserver.enable = true;
     services.xserver.desktopManager.pantheon.enable = true;
 
@@ -83,10 +86,10 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
         machine.wait_for_window("io.elementary.calendar")
 
     with subtest("Open system settings"):
-        machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.switchboard >&2 &'")
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.settings >&2 &'")
         # Wait for all plugins to be loaded before we check if the window is still there.
         machine.sleep(5)
-        machine.wait_for_window("io.elementary.switchboard")
+        machine.wait_for_window("io.elementary.settings")
 
     with subtest("Open elementary terminal"):
         machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.terminal >&2 &'")
@@ -96,9 +99,8 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
         cmd = "dbus-send --session --dest=org.pantheon.gala --print-reply /org/pantheon/gala org.pantheon.gala.PerformAction int32:1"
         env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0"
         machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
-        machine.sleep(3)
+        machine.sleep(5)
         machine.screenshot("multitasking")
-        machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
 
     with subtest("Check if gala has ever coredumped"):
         machine.fail("coredumpctl --json=short | grep gala")
diff --git a/nixos/tests/paperless.nix b/nixos/tests/paperless.nix
index 3ef291ba7e06f..c895780669140 100644
--- a/nixos/tests/paperless.nix
+++ b/nixos/tests/paperless.nix
@@ -1,6 +1,6 @@
 import ./make-test-python.nix ({ lib, ... }: {
   name = "paperless";
-  meta.maintainers = with lib.maintainers; [ erikarvstedt Flakebi ];
+  meta.maintainers = with lib.maintainers; [ leona SuperSandro2000 erikarvstedt ];
 
   nodes = let self = {
     simple = { pkgs, ... }: {
diff --git a/nixos/tests/patroni.nix b/nixos/tests/patroni.nix
index 1f15cd59677ad..68fce4051553e 100644
--- a/nixos/tests/patroni.nix
+++ b/nixos/tests/patroni.nix
@@ -155,7 +155,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
               print(node.succeed("patronictl list cluster1"))
               node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'length') == {expected_replicas + 1} ]")
               node.wait_until_succeeds("[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Leader$\"))) | map(select(.State | test(\"^running$\"))) | length') == 1 ]")
-              node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^running$\"))) | length') == {expected_replicas} ]")
+              node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^streaming$\"))) | length') == {expected_replicas} ]")
               print(node.succeed("patronictl list cluster1"))
           client.wait_until_succeeds("psql -h 127.0.0.1 -U postgres --command='select 1;'")
 
diff --git a/nixos/tests/pgbouncer.nix b/nixos/tests/pgbouncer.nix
index bb5afd35ee28f..8d11c4b3f4bf5 100644
--- a/nixos/tests/pgbouncer.nix
+++ b/nixos/tests/pgbouncer.nix
@@ -1,20 +1,12 @@
-import ./make-test-python.nix ({ pkgs, ... } :
-let
-  testAuthFile = pkgs.writeTextFile {
-    name = "authFile";
-    text = ''
-      "testuser" "testpass"
-    '';
-  };
-in
-{
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
   name = "pgbouncer";
-  meta = with pkgs.lib.maintainers; {
+
+  meta = with lib.maintainers; {
     maintainers = [ _1000101 ];
   };
-  nodes = {
-    one = { config, pkgs, ... }: {
 
+  nodes = {
+    one = { pkgs, ... }: {
       systemd.services.postgresql = {
         postStart = ''
           ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER ROLE testuser WITH LOGIN PASSWORD 'testpass'";
@@ -26,10 +18,7 @@ in
         postgresql = {
           enable = true;
           ensureDatabases = [ "testdb" ];
-          ensureUsers = [
-          {
-            name = "testuser";
-          }];
+          ensureUsers = [{ name = "testuser"; }];
           authentication = ''
             local testdb testuser scram-sha-256
           '';
@@ -37,10 +26,19 @@ in
 
         pgbouncer = {
           enable = true;
-          listenAddress = "localhost";
-          databases = { test = "host=/run/postgresql/ port=5432 auth_user=testuser dbname=testdb"; };
-          authType = "scram-sha-256";
-          authFile = testAuthFile;
+          openFirewall = true;
+          settings = {
+            pgbouncer = {
+              listen_addr = "localhost";
+              auth_type = "scram-sha-256";
+              auth_file = builtins.toFile "pgbouncer-users.txt" ''
+                "testuser" "testpass"
+              '';
+            };
+            databases = {
+              test = "host=/run/postgresql port=5432 auth_user=testuser dbname=testdb";
+            };
+          };
         };
       };
     };
diff --git a/nixos/tests/pghero.nix b/nixos/tests/pghero.nix
new file mode 100644
index 0000000000000..bce32da008862
--- /dev/null
+++ b/nixos/tests/pghero.nix
@@ -0,0 +1,63 @@
+let
+  pgheroPort = 1337;
+  pgheroUser = "pghero";
+  pgheroPass = "pghero";
+in
+{ lib, ... }: {
+  name = "pghero";
+  meta.maintainers = [ lib.maintainers.tie ];
+
+  nodes.machine = { config, ... }: {
+    services.postgresql = {
+      enable = true;
+      # This test uses default peer authentication (socket and its directory is
+      # world-readably by default), so we essentially test that we can connect
+      # with DynamicUser= set.
+      ensureUsers = [{
+        name = "pghero";
+        ensureClauses.superuser = true;
+      }];
+    };
+    services.pghero = {
+      enable = true;
+      listenAddress = "[::]:${toString pgheroPort}";
+      settings = {
+        databases = {
+          postgres.url = "<%= ENV['POSTGRES_DATABASE_URL'] %>";
+          nulldb.url = "nulldb:///";
+        };
+      };
+      environment = {
+        PGHERO_USERNAME = pgheroUser;
+        PGHERO_PASSWORD = pgheroPass;
+        POSTGRES_DATABASE_URL = "postgresql:///postgres?host=/run/postgresql";
+      };
+    };
+  };
+
+  testScript = ''
+    pgheroPort = ${toString pgheroPort}
+    pgheroUser = "${pgheroUser}"
+    pgheroPass = "${pgheroPass}"
+
+    pgheroUnauthorizedURL = f"http://localhost:{pgheroPort}"
+    pgheroBaseURL = f"http://{pgheroUser}:{pgheroPass}@localhost:{pgheroPort}"
+
+    def expect_http_code(node, code, url):
+        http_code = node.succeed(f"curl -s -o /dev/null -w '%{{http_code}}' '{url}'")
+        assert http_code.split("\n")[-1].strip() == code, \
+          f"expected HTTP status code {code} but got {http_code}"
+
+    machine.wait_for_unit("postgresql.service")
+    machine.wait_for_unit("pghero.service")
+
+    with subtest("requires HTTP Basic Auth credentials"):
+      expect_http_code(machine, "401", pgheroUnauthorizedURL)
+
+    with subtest("works with some databases being unavailable"):
+      expect_http_code(machine, "500", pgheroBaseURL + "/nulldb")
+
+    with subtest("connects to the PostgreSQL database"):
+      expect_http_code(machine, "200", pgheroBaseURL + "/postgres")
+  '';
+}
diff --git a/nixos/tests/pgvecto-rs.nix b/nixos/tests/pgvecto-rs.nix
index cd871dab6a0f1..8d9d6c0b88f51 100644
--- a/nixos/tests/pgvecto-rs.nix
+++ b/nixos/tests/pgvecto-rs.nix
@@ -66,7 +66,7 @@ let
     '';
 
   };
-  applicablePostgresqlVersions = filterAttrs (_: value: versionAtLeast value.version "12") postgresql-versions;
+  applicablePostgresqlVersions = filterAttrs (_: value: versionAtLeast value.version "14") postgresql-versions;
 in
 mapAttrs'
   (name: package: {
diff --git a/nixos/tests/phosh.nix b/nixos/tests/phosh.nix
index d505f0ffc5245..64d6889aaf741 100644
--- a/nixos/tests/phosh.nix
+++ b/nixos/tests/phosh.nix
@@ -3,7 +3,7 @@ import ./make-test-python.nix ({ pkgs, ...}: let
 in {
   name = "phosh";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ tomfitzhenry zhaofengli ];
+    maintainers = [ zhaofengli ];
   };
 
   nodes = {
diff --git a/nixos/tests/pingvin-share.nix b/nixos/tests/pingvin-share.nix
new file mode 100644
index 0000000000000..5b1ec55add206
--- /dev/null
+++ b/nixos/tests/pingvin-share.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+  {
+    name = "pingvin-share";
+    meta.maintainers = with lib.maintainers; [ ratcornu ];
+
+    nodes.machine =
+      { ... }:
+      {
+        services.pingvin-share = {
+          enable = true;
+
+          backend.port = 9010;
+          frontend.port = 9011;
+        };
+      };
+
+    testScript = ''
+      machine.wait_for_unit("pingvin-share-frontend.service")
+      machine.wait_for_open_port(9010)
+      machine.wait_for_open_port(9011)
+      machine.succeed("curl --fail http://127.0.0.1:9010/api/configs")
+      machine.succeed("curl --fail http://127.0.0.1:9011/")
+    '';
+  }
+)
diff --git a/nixos/tests/plasma5.nix b/nixos/tests/plasma5.nix
index 1bff37981da3f..b4e10581219cd 100644
--- a/nixos/tests/plasma5.nix
+++ b/nixos/tests/plasma5.nix
@@ -19,7 +19,6 @@ import ./make-test-python.nix ({ pkgs, ...} :
       enable = true;
       user = "alice";
     };
-    hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
   };
 
   testScript = { nodes, ... }: let
diff --git a/nixos/tests/playwright-python.nix b/nixos/tests/playwright-python.nix
new file mode 100644
index 0000000000000..0a5deecbb508b
--- /dev/null
+++ b/nixos/tests/playwright-python.nix
@@ -0,0 +1,58 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+  {
+    name = "playwright-python";
+
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ phaer ];
+    };
+
+    nodes.machine =
+      { pkgs, ... }:
+      {
+        environment.variables = {
+          NIX_MANUAL_DOCROOT = "file://${pkgs.nix.doc}/share/doc/nix/manual/index.html";
+          PLAYWRIGHT_BROWSERS_PATH = pkgs.playwright-driver.browsers;
+        };
+        environment.systemPackages = [
+          (pkgs.writers.writePython3Bin "test_playwright"
+            {
+              libraries = [ pkgs.python3Packages.playwright ];
+            }
+            ''
+              import sys
+              from playwright.sync_api import sync_playwright
+              from playwright.sync_api import expect
+
+              browsers = {
+                "chromium": ["--headless", "--disable-gpu"],
+                "firefox": [],
+                "webkit": []
+              }
+              if len(sys.argv) != 3 or sys.argv[1] not in browsers.keys():
+                  print(f"usage: {sys.argv[0]} [{'|'.join(browsers.keys())}] <url>")
+                  sys.exit(1)
+              browser_name = sys.argv[1]
+              url = sys.argv[2]
+              browser_args = browsers.get(browser_name)
+              print(f"Running test on {browser_name} {' '.join(browser_args)}")
+              with sync_playwright() as p:
+                  browser = getattr(p, browser_name).launch(args=browser_args)
+                  context = browser.new_context()
+                  page = context.new_page()
+                  page.goto(url)
+                  expect(page.get_by_text("Nix Reference Manual")).to_be_visible()
+            ''
+          )
+        ];
+      };
+
+    testScript = ''
+      # FIXME: Webkit segfaults
+      for browser in ["firefox", "chromium"]:
+          with subtest(f"Render Nix Manual in {browser}"):
+              machine.succeed(f"test_playwright {browser} $NIX_MANUAL_DOCROOT")
+    '';
+
+  }
+)
diff --git a/nixos/tests/pleroma.nix b/nixos/tests/pleroma.nix
index 08a01585f8778..721f27e8f8c6a 100644
--- a/nixos/tests/pleroma.nix
+++ b/nixos/tests/pleroma.nix
@@ -32,7 +32,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
     # system one. Overriding this pretty bad default behaviour.
     export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
 
-    echo "jamy-password" | toot login_cli -i "pleroma.nixos.test" -e "jamy@nixos.test"
+    toot --debug login_cli -i "pleroma.nixos.test" -e "jamy@nixos.test" -p "jamy-password"
     echo "Login OK"
 
     # Send a toot then verify it's part of the public timeline
@@ -182,7 +182,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
     client = { nodes, pkgs, config, ... }: {
       security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
       networking.extraHosts = hosts nodes;
-      environment.systemPackages = with pkgs; [
+      environment.systemPackages = [
         pkgs.toot
         send-toot
       ];
@@ -191,7 +191,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
       security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
       networking.extraHosts = hosts nodes;
       networking.firewall.enable = false;
-      environment.systemPackages = with pkgs; [
+      environment.systemPackages = [
         provision-db
         provision-secrets
         provision-user
@@ -245,10 +245,13 @@ import ./make-test-python.nix ({ pkgs, ... }:
   testScript = { nodes, ... }: ''
     pleroma.wait_for_unit("postgresql.service")
     pleroma.succeed("provision-db")
+    pleroma.wait_for_file("/var/lib/pleroma")
     pleroma.succeed("provision-secrets")
     pleroma.systemctl("restart pleroma.service")
     pleroma.wait_for_unit("pleroma.service")
     pleroma.succeed("provision-user")
     client.succeed("send-toot")
   '';
+
+  meta.timeout = 600;
 })
diff --git a/nixos/tests/plotinus.nix b/nixos/tests/plotinus.nix
index b6ebab9b01989..2bb2b705eb7e9 100644
--- a/nixos/tests/plotinus.nix
+++ b/nixos/tests/plotinus.nix
@@ -2,6 +2,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
   name = "plotinus";
   meta = {
     maintainers = pkgs.plotinus.meta.maintainers;
+    timeout = 600;
   };
 
   nodes.machine =
@@ -9,20 +10,23 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
     { imports = [ ./common/x11.nix ];
       programs.plotinus.enable = true;
-      environment.systemPackages = [ pkgs.gnome.gnome-calculator pkgs.xdotool ];
+      environment.systemPackages = [
+        pkgs.gnome-pomodoro
+        pkgs.xdotool
+      ];
     };
 
   testScript = ''
     machine.wait_for_x()
-    machine.succeed("gnome-calculator >&2 &")
-    machine.wait_for_window("gnome-calculator")
+    machine.succeed("gnome-pomodoro >&2 &")
+    machine.wait_for_window("Pomodoro", timeout=120)
     machine.succeed(
-        "xdotool search --sync --onlyvisible --class gnome-calculator "
+        "xdotool search --sync --onlyvisible --class gnome-pomodoro "
         + "windowfocus --sync key --clearmodifiers --delay 1 'ctrl+shift+p'"
     )
     machine.sleep(5)  # wait for the popup
+    machine.screenshot("popup")
     machine.succeed("xdotool key --delay 100 p r e f e r e n c e s Return")
-    machine.wait_for_window("Preferences")
-    machine.screenshot("screen")
+    machine.wait_for_window("Preferences", timeout=120)
   '';
 })
diff --git a/nixos/tests/podman/default.nix b/nixos/tests/podman/default.nix
index 3eea45832f0a6..f57523cf58886 100644
--- a/nixos/tests/podman/default.nix
+++ b/nixos/tests/podman/default.nix
@@ -1,5 +1,22 @@
 import ../make-test-python.nix (
-  { pkgs, lib, ... }: {
+  { pkgs, lib, ... }:
+  let
+    quadletContainerFile = pkgs.writeText "quadlet.container" ''
+      [Unit]
+      Description=A test quadlet container
+
+      [Container]
+      Image=localhost/scratchimg:latest
+      Exec=bash -c 'trap exit SIGTERM SIGINT; while true; do sleep 1; done'
+      ContainerName=quadlet
+      Volume=/nix/store:/nix/store
+      Volume=/run/current-system/sw/bin:/bin
+
+      [Install]
+      WantedBy=default.target
+    '';
+  in
+  {
     name = "podman";
     meta = {
       maintainers = lib.teams.podman.members;
@@ -174,6 +191,16 @@ import ../make-test-python.nix (
       with subtest("A podman non-member can not use the docker cli"):
           docker.fail(su_cmd("docker version", user="mallory"))
 
+      with subtest("A rootless quadlet container service is created"):
+          dir = "/home/alice/.config/containers/systemd"
+          rootless.succeed(su_cmd("tar cv --files-from /dev/null | podman import - scratchimg"))
+          rootless.succeed(su_cmd(f"mkdir -p {dir}"))
+          rootless.succeed(su_cmd(f"cp -f ${quadletContainerFile} {dir}/quadlet.container"))
+          rootless.systemctl("daemon-reload", "alice")
+          rootless.systemctl("start quadlet", "alice")
+          rootless.wait_until_succeeds(su_cmd("podman ps | grep quadlet"), timeout=20)
+          rootless.systemctl("stop quadlet", "alice")
+
       # TODO: add docker-compose test
 
     '';
diff --git a/nixos/tests/postgresql-jit.nix b/nixos/tests/postgresql-jit.nix
index baf26b8da2b39..f4b1d07a7faf8 100644
--- a/nixos/tests/postgresql-jit.nix
+++ b/nixos/tests/postgresql-jit.nix
@@ -1,6 +1,7 @@
 { system ? builtins.currentSystem
 , config ? {}
 , pkgs ? import ../.. { inherit system config; }
+, package ? null
 }:
 
 with import ../lib/testing-python.nix { inherit system pkgs; };
@@ -9,14 +10,17 @@ let
   inherit (pkgs) lib;
   packages = builtins.attrNames (import ../../pkgs/servers/sql/postgresql pkgs);
 
-  mkJitTest = packageName: makeTest {
-    name = "${packageName}";
+  mkJitTestFromName = name:
+    mkJitTest pkgs.${name};
+
+  mkJitTest = package: makeTest {
+    name = package.name;
     meta.maintainers = with lib.maintainers; [ ma27 ];
     nodes.machine = { pkgs, lib, ... }: {
       services.postgresql = {
+        inherit package;
         enable = true;
         enableJIT = true;
-        package = pkgs.${packageName};
         initialScript = pkgs.writeText "init.sql" ''
           create table demo (id int);
           insert into demo (id) select generate_series(1, 5);
@@ -45,4 +49,7 @@ let
     '';
   };
 in
-lib.genAttrs packages mkJitTest
+if package == null then
+  lib.genAttrs packages mkJitTestFromName
+else
+  mkJitTest package
diff --git a/nixos/tests/postgresql-tls-client-cert.nix b/nixos/tests/postgresql-tls-client-cert.nix
new file mode 100644
index 0000000000000..c1678ed733beb
--- /dev/null
+++ b/nixos/tests/postgresql-tls-client-cert.nix
@@ -0,0 +1,141 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+, package ? null
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  lib = pkgs.lib;
+
+  # Makes a test for a PostgreSQL package, given by name and looked up from `pkgs`.
+  makeTestAttribute = name:
+    {
+      inherit name;
+      value = makePostgresqlTlsClientCertTest pkgs."${name}";
+    };
+
+  makePostgresqlTlsClientCertTest = pkg:
+    let
+      runWithOpenSSL = file: cmd: pkgs.runCommand file
+        {
+          buildInputs = [ pkgs.openssl ];
+        }
+        cmd;
+      caKey = runWithOpenSSL "ca.key" "openssl ecparam -name prime256v1 -genkey -noout -out $out";
+      caCert = runWithOpenSSL
+        "ca.crt"
+        ''
+          openssl req -new -x509 -sha256 -key ${caKey} -out $out -subj "/CN=test.example" -days 36500
+        '';
+      serverKey =
+        runWithOpenSSL "server.key" "openssl ecparam -name prime256v1 -genkey -noout -out $out";
+      serverKeyPath = "/var/lib/postgresql";
+      serverCert =
+        runWithOpenSSL "server.crt" ''
+          openssl req -new -sha256 -key ${serverKey} -out server.csr -subj "/CN=db.test.example"
+          openssl x509 -req -in server.csr -CA ${caCert} -CAkey ${caKey} \
+            -CAcreateserial -out $out -days 36500 -sha256
+        '';
+      clientKey =
+        runWithOpenSSL "client.key" "openssl ecparam -name prime256v1 -genkey -noout -out $out";
+      clientCert =
+        runWithOpenSSL "client.crt" ''
+          openssl req -new -sha256 -key ${clientKey} -out client.csr -subj "/CN=test"
+          openssl x509 -req -in client.csr -CA ${caCert} -CAkey ${caKey} \
+            -CAcreateserial -out $out -days 36500 -sha256
+        '';
+      clientKeyPath = "/root";
+
+    in
+    makeTest {
+      name = "postgresql-tls-client-cert-${pkg.name}";
+      meta.maintainers = with lib.maintainers; [ erictapen ];
+
+      nodes.server = { ... }: {
+        system.activationScripts = {
+          keyPlacement.text = ''
+            mkdir -p '${serverKeyPath}'
+            cp '${serverKey}' '${serverKeyPath}/server.key'
+            chown postgres:postgres '${serverKeyPath}/server.key'
+            chmod 600 '${serverKeyPath}/server.key'
+          '';
+        };
+        services.postgresql = {
+          package = pkg;
+          enable = true;
+          enableTCPIP = true;
+          ensureUsers = [
+            {
+              name = "test";
+              ensureDBOwnership = true;
+            }
+          ];
+          ensureDatabases = [ "test" ];
+          settings = {
+            ssl = "on";
+            ssl_ca_file = toString caCert;
+            ssl_cert_file = toString serverCert;
+            ssl_key_file = "${serverKeyPath}/server.key";
+          };
+          authentication = ''
+            hostssl test test ::/0 cert clientcert=verify-full
+          '';
+        };
+        networking = {
+          interfaces.eth1 = {
+            ipv6.addresses = [
+              { address = "fc00::1"; prefixLength = 120; }
+            ];
+          };
+          firewall.allowedTCPPorts = [ 5432 ];
+        };
+      };
+
+      nodes.client = { ... }: {
+        system.activationScripts = {
+          keyPlacement.text = ''
+            mkdir -p '${clientKeyPath}'
+            cp '${clientKey}' '${clientKeyPath}/client.key'
+            chown root:root '${clientKeyPath}/client.key'
+            chmod 600 '${clientKeyPath}/client.key'
+          '';
+        };
+        environment = {
+          variables = {
+            PGHOST = "db.test.example";
+            PGPORT = "5432";
+            PGDATABASE = "test";
+            PGUSER = "test";
+            PGSSLMODE = "verify-full";
+            PGSSLCERT = clientCert;
+            PGSSLKEY = "${clientKeyPath}/client.key";
+            PGSSLROOTCERT = caCert;
+          };
+          systemPackages = [ pkg ];
+        };
+        networking = {
+          interfaces.eth1 = {
+            ipv6.addresses = [
+              { address = "fc00::2"; prefixLength = 120; }
+            ];
+          };
+          hosts = { "fc00::1" = [ "db.test.example" ]; };
+        };
+      };
+
+      testScript = ''
+        server.wait_for_unit("multi-user.target")
+        client.wait_for_unit("multi-user.target")
+        client.succeed("psql -c \"SELECT 1;\"")
+      '';
+    };
+
+in
+if package == null then
+# all-tests.nix: Maps the generic function over all attributes of PostgreSQL packages
+  builtins.listToAttrs (map makeTestAttribute (builtins.attrNames (import ../../pkgs/servers/sql/postgresql pkgs)))
+else
+# Called directly from <package>.tests
+  makePostgresqlTlsClientCertTest package
diff --git a/nixos/tests/postgresql-wal-receiver.nix b/nixos/tests/postgresql-wal-receiver.nix
index b0bd7711dbcd9..ab2ab4ad0d4fa 100644
--- a/nixos/tests/postgresql-wal-receiver.nix
+++ b/nixos/tests/postgresql-wal-receiver.nix
@@ -1,6 +1,7 @@
 { system ? builtins.currentSystem,
   config ? {},
-  pkgs ? import ../.. { inherit system config; }
+  pkgs ? import ../.. { inherit system config; },
+  package ? null
 }:
 
 with import ../lib/testing-python.nix { inherit system pkgs; };
@@ -9,111 +10,110 @@ let
   lib = pkgs.lib;
 
   # Makes a test for a PostgreSQL package, given by name and looked up from `pkgs`.
-  makePostgresqlWalReceiverTest = postgresqlPackage:
+  makeTestAttribute = name:
   {
-    name = postgresqlPackage;
-    value =
-      let
-        pkg = pkgs."${postgresqlPackage}";
-        postgresqlDataDir = "/var/lib/postgresql/${pkg.psqlSchema}";
-        replicationUser = "wal_receiver_user";
-        replicationSlot = "wal_receiver_slot";
-        replicationConn = "postgresql://${replicationUser}@localhost";
-        baseBackupDir = "/tmp/pg_basebackup";
-        walBackupDir = "/tmp/pg_wal";
-        atLeast12 = lib.versionAtLeast pkg.version "12.0";
-
-        recoveryFile = if atLeast12
-            then pkgs.writeTextDir "recovery.signal" ""
-            else pkgs.writeTextDir "recovery.conf" "restore_command = 'cp ${walBackupDir}/%f %p'";
-
-      in makeTest {
-        name = "postgresql-wal-receiver-${postgresqlPackage}";
-        meta.maintainers = with lib.maintainers; [ pacien ];
-
-        nodes.machine = { ... }: {
-          services.postgresql = {
-            package = pkg;
-            enable = true;
-            settings = lib.mkMerge [
-              {
-                wal_level = "archive"; # alias for replica on pg >= 9.6
-                max_wal_senders = 10;
-                max_replication_slots = 10;
-              }
-              (lib.mkIf atLeast12 {
-                restore_command = "cp ${walBackupDir}/%f %p";
-                recovery_end_command = "touch recovery.done";
-              })
-            ];
-            authentication = ''
-              host replication ${replicationUser} all trust
-            '';
-            initialScript = pkgs.writeText "init.sql" ''
-              create user ${replicationUser} replication;
-              select * from pg_create_physical_replication_slot('${replicationSlot}');
-            '';
-          };
+    inherit name;
+    value = makePostgresqlWalReceiverTest pkgs."${name}";
+  };
+
+  makePostgresqlWalReceiverTest = pkg:
+    let
+      postgresqlDataDir = "/var/lib/postgresql/${pkg.psqlSchema}";
+      replicationUser = "wal_receiver_user";
+      replicationSlot = "wal_receiver_slot";
+      replicationConn = "postgresql://${replicationUser}@localhost";
+      baseBackupDir = "/tmp/pg_basebackup";
+      walBackupDir = "/tmp/pg_wal";
+
+      recoveryFile = pkgs.writeTextDir "recovery.signal" "";
 
-          services.postgresqlWalReceiver.receivers.main = {
-            postgresqlPackage = pkg;
-            connection = replicationConn;
-            slot = replicationSlot;
-            directory = walBackupDir;
+    in makeTest {
+      name = "postgresql-wal-receiver-${pkg.name}";
+      meta.maintainers = with lib.maintainers; [ pacien ];
+
+      nodes.machine = { ... }: {
+        services.postgresql = {
+          package = pkg;
+          enable = true;
+          settings = {
+            max_replication_slots = 10;
+            max_wal_senders = 10;
+            recovery_end_command = "touch recovery.done";
+            restore_command = "cp ${walBackupDir}/%f %p";
+            wal_level = "archive"; # alias for replica on pg >= 9.6
           };
-          # This is only to speedup test, it isn't time racing. Service is set to autorestart always,
-          # default 60sec is fine for real system, but is too much for a test
-          systemd.services.postgresql-wal-receiver-main.serviceConfig.RestartSec = lib.mkForce 5;
+          authentication = ''
+            host replication ${replicationUser} all trust
+          '';
+          initialScript = pkgs.writeText "init.sql" ''
+            create user ${replicationUser} replication;
+            select * from pg_create_physical_replication_slot('${replicationSlot}');
+          '';
         };
 
-        testScript = ''
-          # make an initial base backup
-          machine.wait_for_unit("postgresql")
-          machine.wait_for_unit("postgresql-wal-receiver-main")
-          # WAL receiver healthchecks PG every 5 seconds, so let's be sure they have connected each other
-          # required only for 9.4
-          machine.sleep(5)
-          machine.succeed(
-              "${pkg}/bin/pg_basebackup --dbname=${replicationConn} --pgdata=${baseBackupDir}"
-          )
-
-          # create a dummy table with 100 records
-          machine.succeed(
-              "sudo -u postgres psql --command='create table dummy as select * from generate_series(1, 100) as val;'"
-          )
-
-          # stop postgres and destroy data
-          machine.systemctl("stop postgresql")
-          machine.systemctl("stop postgresql-wal-receiver-main")
-          machine.succeed("rm -r ${postgresqlDataDir}/{base,global,pg_*}")
-
-          # restore the base backup
-          machine.succeed(
-              "cp -r ${baseBackupDir}/* ${postgresqlDataDir} && chown postgres:postgres -R ${postgresqlDataDir}"
-          )
-
-          # prepare WAL and recovery
-          machine.succeed("chmod a+rX -R ${walBackupDir}")
-          machine.execute(
-              "for part in ${walBackupDir}/*.partial; do mv $part ''${part%%.*}; done"
-          )  # make use of partial segments too
-          machine.succeed(
-              "cp ${recoveryFile}/* ${postgresqlDataDir}/ && chmod 666 ${postgresqlDataDir}/recovery*"
-          )
-
-          # replay WAL
-          machine.systemctl("start postgresql")
-          machine.wait_for_file("${postgresqlDataDir}/recovery.done")
-          machine.systemctl("restart postgresql")
-          machine.wait_for_unit("postgresql")
-
-          # check that our records have been restored
-          machine.succeed(
-              "test $(sudo -u postgres psql --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100"
-          )
-        '';
+        services.postgresqlWalReceiver.receivers.main = {
+          postgresqlPackage = pkg;
+          connection = replicationConn;
+          slot = replicationSlot;
+          directory = walBackupDir;
+        };
+        # This is only to speedup test, it isn't time racing. Service is set to autorestart always,
+        # default 60sec is fine for real system, but is too much for a test
+        systemd.services.postgresql-wal-receiver-main.serviceConfig.RestartSec = lib.mkForce 5;
       };
+
+      testScript = ''
+        # make an initial base backup
+        machine.wait_for_unit("postgresql")
+        machine.wait_for_unit("postgresql-wal-receiver-main")
+        # WAL receiver healthchecks PG every 5 seconds, so let's be sure they have connected each other
+        # required only for 9.4
+        machine.sleep(5)
+        machine.succeed(
+            "${pkg}/bin/pg_basebackup --dbname=${replicationConn} --pgdata=${baseBackupDir}"
+        )
+
+        # create a dummy table with 100 records
+        machine.succeed(
+            "sudo -u postgres psql --command='create table dummy as select * from generate_series(1, 100) as val;'"
+        )
+
+        # stop postgres and destroy data
+        machine.systemctl("stop postgresql")
+        machine.systemctl("stop postgresql-wal-receiver-main")
+        machine.succeed("rm -r ${postgresqlDataDir}/{base,global,pg_*}")
+
+        # restore the base backup
+        machine.succeed(
+            "cp -r ${baseBackupDir}/* ${postgresqlDataDir} && chown postgres:postgres -R ${postgresqlDataDir}"
+        )
+
+        # prepare WAL and recovery
+        machine.succeed("chmod a+rX -R ${walBackupDir}")
+        machine.execute(
+            "for part in ${walBackupDir}/*.partial; do mv $part ''${part%%.*}; done"
+        )  # make use of partial segments too
+        machine.succeed(
+            "cp ${recoveryFile}/* ${postgresqlDataDir}/ && chmod 666 ${postgresqlDataDir}/recovery*"
+        )
+
+        # replay WAL
+        machine.systemctl("start postgresql")
+        machine.wait_for_file("${postgresqlDataDir}/recovery.done")
+        machine.systemctl("restart postgresql")
+        machine.wait_for_unit("postgresql")
+
+        # check that our records have been restored
+        machine.succeed(
+            "test $(sudo -u postgres psql --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100"
+        )
+      '';
     };
 
-# Maps the generic function over all attributes of PostgreSQL packages
-in builtins.listToAttrs (map makePostgresqlWalReceiverTest (builtins.attrNames (import ../../pkgs/servers/sql/postgresql pkgs)))
+in
+if package == null then
+  # all-tests.nix: Maps the generic function over all attributes of PostgreSQL packages
+  builtins.listToAttrs (map makeTestAttribute (builtins.attrNames (import ../../pkgs/servers/sql/postgresql pkgs)))
+else
+  # Called directly from <package>.tests
+  makePostgresqlWalReceiverTest package
diff --git a/nixos/tests/postgresql-wal2json.nix b/nixos/tests/postgresql-wal2json.nix
new file mode 100644
index 0000000000000..043ad48cbc6ec
--- /dev/null
+++ b/nixos/tests/postgresql-wal2json.nix
@@ -0,0 +1,60 @@
+{
+  system ? builtins.currentSystem,
+  config ? { },
+  pkgs ? import ../.. { inherit system config; },
+  postgresql ? null,
+}:
+
+let
+  makeTest = import ./make-test-python.nix;
+  # Makes a test for a PostgreSQL package, given by name and looked up from `pkgs`.
+  makeTestAttribute = name: {
+    inherit name;
+    value = makePostgresqlWal2jsonTest pkgs."${name}";
+  };
+
+  makePostgresqlWal2jsonTest =
+    postgresqlPackage:
+    makeTest {
+      name = "postgresql-wal2json-${postgresqlPackage.name}";
+      meta.maintainers = with pkgs.lib.maintainers; [ euank ];
+
+      nodes.machine = {
+        services.postgresql = {
+          package = postgresqlPackage;
+          enable = true;
+          extraPlugins = with postgresqlPackage.pkgs; [ wal2json ];
+          settings = {
+            wal_level = "logical";
+            max_replication_slots = "10";
+            max_wal_senders = "10";
+          };
+        };
+      };
+
+      testScript = ''
+        machine.wait_for_unit("postgresql")
+        machine.succeed(
+            "sudo -u postgres psql -qAt -f ${./postgresql/wal2json/example2.sql} postgres > /tmp/example2.out"
+        )
+        machine.succeed(
+            "diff ${./postgresql/wal2json/example2.out} /tmp/example2.out"
+        )
+        machine.succeed(
+            "sudo -u postgres psql -qAt -f ${./postgresql/wal2json/example3.sql} postgres > /tmp/example3.out"
+        )
+        machine.succeed(
+            "diff ${./postgresql/wal2json/example3.out} /tmp/example3.out"
+        )
+      '';
+    };
+
+in
+# By default, create one test per postgresql version
+if postgresql == null then
+  builtins.listToAttrs (
+    map makeTestAttribute (builtins.attrNames (import ../../pkgs/servers/sql/postgresql pkgs))
+  )
+# but if postgresql is set, we're being made as a passthru test for a specific postgres + wal2json version, just run one
+else
+  makePostgresqlWal2jsonTest postgresql
diff --git a/nixos/tests/postgresql/wal2json/LICENSE b/nixos/tests/postgresql/wal2json/LICENSE
new file mode 100644
index 0000000000000..e3e82163fc091
--- /dev/null
+++ b/nixos/tests/postgresql/wal2json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013-2024, Euler Taveira de Oliveira
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+  list of conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.
+
+* Neither the name of the Euler Taveira de Oliveira nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/nixos/tests/postgresql/wal2json/README.md b/nixos/tests/postgresql/wal2json/README.md
new file mode 100644
index 0000000000000..796bf810d757b
--- /dev/null
+++ b/nixos/tests/postgresql/wal2json/README.md
@@ -0,0 +1,11 @@
+Data in this folder taken from the wal2json README's examples [here](https://github.com/eulerto/wal2json/tree/75629c2e1e81a12350cc9d63782fc53252185d8d#sql-functions)
+
+They are used under the terms of the BSD-3 License, a copy of which is included
+in this directory.
+
+These files have been lightly modified in order to make their output more reproducible.
+
+Changes:
+- `\o /dev/null` has been added before commands that print LSNs since LSNs aren't reproducible
+- `now()` has been replaced with a hardcoded timestamp string for reproducibility
+- The test is run with `--quiet`, and the expected output has been trimmed accordingly
diff --git a/nixos/tests/postgresql/wal2json/example2.out b/nixos/tests/postgresql/wal2json/example2.out
new file mode 100644
index 0000000000000..0a089e1122706
--- /dev/null
+++ b/nixos/tests/postgresql/wal2json/example2.out
@@ -0,0 +1,74 @@
+init
+{
+	"change": [
+		{
+			"kind": "message",
+			"transactional": false,
+			"prefix": "wal2json",
+			"content": "this non-transactional message will be delivered even if you rollback the transaction"
+		}
+	]
+}
+{
+	"change": [
+		{
+			"kind": "insert",
+			"schema": "public",
+			"table": "table2_with_pk",
+			"columnnames": ["a", "b", "c"],
+			"columntypes": ["integer", "character varying(30)", "timestamp without time zone"],
+			"columnvalues": [1, "Backup and Restore", "2018-03-27 12:05:29.914496"]
+		}
+		,{
+			"kind": "insert",
+			"schema": "public",
+			"table": "table2_with_pk",
+			"columnnames": ["a", "b", "c"],
+			"columntypes": ["integer", "character varying(30)", "timestamp without time zone"],
+			"columnvalues": [2, "Tuning", "2018-03-27 12:05:29.914496"]
+		}
+		,{
+			"kind": "insert",
+			"schema": "public",
+			"table": "table2_with_pk",
+			"columnnames": ["a", "b", "c"],
+			"columntypes": ["integer", "character varying(30)", "timestamp without time zone"],
+			"columnvalues": [3, "Replication", "2018-03-27 12:05:29.914496"]
+		}
+		,{
+			"kind": "message",
+			"transactional": true,
+			"prefix": "wal2json",
+			"content": "this message will be delivered"
+		}
+		,{
+			"kind": "delete",
+			"schema": "public",
+			"table": "table2_with_pk",
+			"oldkeys": {
+				"keynames": ["a", "c"],
+				"keytypes": ["integer", "timestamp without time zone"],
+				"keyvalues": [1, "2018-03-27 12:05:29.914496"]
+			}
+		}
+		,{
+			"kind": "delete",
+			"schema": "public",
+			"table": "table2_with_pk",
+			"oldkeys": {
+				"keynames": ["a", "c"],
+				"keytypes": ["integer", "timestamp without time zone"],
+				"keyvalues": [2, "2018-03-27 12:05:29.914496"]
+			}
+		}
+		,{
+			"kind": "insert",
+			"schema": "public",
+			"table": "table2_without_pk",
+			"columnnames": ["a", "b", "c"],
+			"columntypes": ["integer", "numeric(5,2)", "text"],
+			"columnvalues": [1, 2.34, "Tapir"]
+		}
+	]
+}
+stop
diff --git a/nixos/tests/postgresql/wal2json/example2.sql b/nixos/tests/postgresql/wal2json/example2.sql
new file mode 100644
index 0000000000000..ec474381bb4d9
--- /dev/null
+++ b/nixos/tests/postgresql/wal2json/example2.sql
@@ -0,0 +1,31 @@
+CREATE TABLE table2_with_pk (a SERIAL, b VARCHAR(30), c TIMESTAMP NOT NULL, PRIMARY KEY(a, c));
+CREATE TABLE table2_without_pk (a SERIAL, b NUMERIC(5,2), c TEXT);
+
+SELECT 'init' FROM pg_create_logical_replication_slot('test_slot', 'wal2json');
+
+BEGIN;
+INSERT INTO table2_with_pk (b, c) VALUES('Backup and Restore', '2018-03-27 12:05:29.914496');
+INSERT INTO table2_with_pk (b, c) VALUES('Tuning', '2018-03-27 12:05:29.914496');
+INSERT INTO table2_with_pk (b, c) VALUES('Replication', '2018-03-27 12:05:29.914496');
+
+-- Avoid printing wal LSNs since they're not reproducible, so harder to assert on
+\o /dev/null
+SELECT pg_logical_emit_message(true, 'wal2json', 'this message will be delivered');
+SELECT pg_logical_emit_message(true, 'pgoutput', 'this message will be filtered');
+\o
+
+DELETE FROM table2_with_pk WHERE a < 3;
+\o /dev/null
+SELECT pg_logical_emit_message(false, 'wal2json', 'this non-transactional message will be delivered even if you rollback the transaction');
+\o
+
+INSERT INTO table2_without_pk (b, c) VALUES(2.34, 'Tapir');
+-- it is not added to stream because there isn't a pk or a replica identity
+UPDATE table2_without_pk SET c = 'Anta' WHERE c = 'Tapir';
+COMMIT;
+
+SELECT data FROM pg_logical_slot_get_changes('test_slot', NULL, NULL, 'pretty-print', '1', 'add-msg-prefixes', 'wal2json');
+SELECT 'stop' FROM pg_drop_replication_slot('test_slot');
+
+DROP TABLE table2_with_pk;
+DROP TABLE table2_without_pk;
diff --git a/nixos/tests/postgresql/wal2json/example3.out b/nixos/tests/postgresql/wal2json/example3.out
new file mode 100644
index 0000000000000..e20d2a8aefd7d
--- /dev/null
+++ b/nixos/tests/postgresql/wal2json/example3.out
@@ -0,0 +1,12 @@
+init
+{"action":"M","transactional":false,"prefix":"wal2json","content":"this non-transactional message will be delivered even if you rollback the transaction"}
+{"action":"B"}
+{"action":"I","schema":"public","table":"table3_with_pk","columns":[{"name":"a","type":"integer","value":1},{"name":"b","type":"character varying(30)","value":"Backup and Restore"},{"name":"c","type":"timestamp without time zone","value":"2019-12-29 04:58:34.806671"}]}
+{"action":"I","schema":"public","table":"table3_with_pk","columns":[{"name":"a","type":"integer","value":2},{"name":"b","type":"character varying(30)","value":"Tuning"},{"name":"c","type":"timestamp without time zone","value":"2019-12-29 04:58:34.806671"}]}
+{"action":"I","schema":"public","table":"table3_with_pk","columns":[{"name":"a","type":"integer","value":3},{"name":"b","type":"character varying(30)","value":"Replication"},{"name":"c","type":"timestamp without time zone","value":"2019-12-29 04:58:34.806671"}]}
+{"action":"M","transactional":true,"prefix":"wal2json","content":"this message will be delivered"}
+{"action":"D","schema":"public","table":"table3_with_pk","identity":[{"name":"a","type":"integer","value":1},{"name":"c","type":"timestamp without time zone","value":"2019-12-29 04:58:34.806671"}]}
+{"action":"D","schema":"public","table":"table3_with_pk","identity":[{"name":"a","type":"integer","value":2},{"name":"c","type":"timestamp without time zone","value":"2019-12-29 04:58:34.806671"}]}
+{"action":"I","schema":"public","table":"table3_without_pk","columns":[{"name":"a","type":"integer","value":1},{"name":"b","type":"numeric(5,2)","value":2.34},{"name":"c","type":"text","value":"Tapir"}]}
+{"action":"C"}
+stop
diff --git a/nixos/tests/postgresql/wal2json/example3.sql b/nixos/tests/postgresql/wal2json/example3.sql
new file mode 100644
index 0000000000000..6d94e261f51ae
--- /dev/null
+++ b/nixos/tests/postgresql/wal2json/example3.sql
@@ -0,0 +1,26 @@
+CREATE TABLE table3_with_pk (a SERIAL, b VARCHAR(30), c TIMESTAMP NOT NULL, PRIMARY KEY(a, c));
+CREATE TABLE table3_without_pk (a SERIAL, b NUMERIC(5,2), c TEXT);
+
+SELECT 'init' FROM pg_create_logical_replication_slot('test_slot', 'wal2json');
+
+BEGIN;
+INSERT INTO table3_with_pk (b, c) VALUES('Backup and Restore', '2019-12-29 04:58:34.806671');
+INSERT INTO table3_with_pk (b, c) VALUES('Tuning', '2019-12-29 04:58:34.806671');
+INSERT INTO table3_with_pk (b, c) VALUES('Replication', '2019-12-29 04:58:34.806671');
+\o /dev/null
+SELECT pg_logical_emit_message(true, 'wal2json', 'this message will be delivered');
+SELECT pg_logical_emit_message(true, 'pgoutput', 'this message will be filtered');
+DELETE FROM table3_with_pk WHERE a < 3;
+SELECT pg_logical_emit_message(false, 'wal2json', 'this non-transactional message will be delivered even if you rollback the transaction');
+\o
+
+INSERT INTO table3_without_pk (b, c) VALUES(2.34, 'Tapir');
+-- it is not added to stream because there isn't a pk or a replica identity
+UPDATE table3_without_pk SET c = 'Anta' WHERE c = 'Tapir';
+COMMIT;
+
+SELECT data FROM pg_logical_slot_get_changes('test_slot', NULL, NULL, 'format-version', '2', 'add-msg-prefixes', 'wal2json');
+SELECT 'stop' FROM pg_drop_replication_slot('test_slot');
+
+DROP TABLE table3_with_pk;
+DROP TABLE table3_without_pk;
diff --git a/nixos/tests/printing.nix b/nixos/tests/printing.nix
index 29c5d810f215a..b413996c67db8 100644
--- a/nixos/tests/printing.nix
+++ b/nixos/tests/printing.nix
@@ -9,7 +9,7 @@ import ./make-test-python.nix (
 {
   name = "printing";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ domenkozar eelco matthewbauer ];
+    maintainers = [ domenkozar matthewbauer ];
   };
 
   nodes.server = { ... }: {
diff --git a/nixos/tests/private-gpt.nix b/nixos/tests/private-gpt.nix
new file mode 100644
index 0000000000000..1c90101d29575
--- /dev/null
+++ b/nixos/tests/private-gpt.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  mainPort = "8001";
+in
+{
+  name = "private-gpt";
+  meta = with lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.private-gpt = {
+        enable = true;
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start()
+
+    machine.wait_for_unit("private-gpt.service")
+    machine.wait_for_open_port(${mainPort})
+
+    machine.succeed("curl http://127.0.0.1:${mainPort}")
+  '';
+})
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 56569c4de2c85..71eef72df6f3a 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -177,6 +177,26 @@ let
       '';
     };
 
+    borgmatic = {
+      exporterConfig = {
+        enable = true;
+        user = "root";
+      };
+      metricProvider = {
+        services.borgmatic.enable = true;
+        services.borgmatic.settings.source_directories = [ "/home" ];
+        services.borgmatic.settings.repositories = [ { label = "local"; path = "/var/backup"; } ];
+        services.borgmatic.settings.keep_daily = 10;
+      };
+      exporterTest = ''
+        succeed("borgmatic rcreate -e none")
+        succeed("borgmatic")
+        wait_for_unit("prometheus-borgmatic-exporter.service")
+        wait_for_open_port(9996)
+        succeed("curl -sSf localhost:9996/metrics | grep 'borg_total_backups{repository=\"/var/backup\"} 1'")
+      '';
+    };
+
     collectd = {
       exporterConfig = {
         enable = true;
@@ -209,6 +229,34 @@ let
         '';
     };
 
+    deluge = {
+      exporterConfig = {
+        enable = true;
+        port = 1234;
+        listenAddress = "127.0.0.1";
+
+        delugeUser = "user";
+        delugePort = 2345;
+        delugePasswordFile = pkgs.writeText "password" "weak_password";
+      };
+      metricProvider = {
+        services.deluge.enable = true;
+        services.deluge.declarative = true;
+        services.deluge.config.daemon_port = 2345;
+        services.deluge.authFile = pkgs.writeText "authFile" ''
+        localclient:abcdef:10
+        user:weak_password:10
+        '';
+      };
+      exporterTest = ''
+        wait_for_unit("deluged.service")
+        wait_for_open_port(2345)
+        wait_for_unit("prometheus-deluge-exporter.service")
+        wait_for_open_port(1234)
+        succeed("curl -sSf http://localhost:1234 | grep 'deluge_torrents'")
+      '';
+    };
+
     dnsmasq = {
       exporterConfig = {
         enable = true;
@@ -314,10 +362,9 @@ let
         tokenPath = pkgs.writeText "token" "abc123";
       };
 
-      # noop: fastly's exporter can't start without first talking to fastly
-      # see: https://github.com/peterbourgon/fastly-exporter/issues/87
       exporterTest = ''
-        succeed("true");
+        wait_for_unit("prometheus-fastly-exporter.service")
+        wait_for_open_port(9118)
       '';
     };
 
@@ -435,7 +482,6 @@ let
     json = {
       exporterConfig = {
         enable = true;
-        url = "http://localhost";
         configFile = pkgs.writeText "json-exporter-conf.json" (builtins.toJSON {
           modules = {
             default = {
@@ -483,9 +529,6 @@ let
                 global-module: mod-stats
                 dnssec-signing: off
                 zonefile-sync: -1
-                journal-db: /var/lib/knot/journal
-                kasp-db: /var/lib/knot/kasp
-                timer-db: /var/lib/knot/timer
                 zonefile-load: difference
                 storage: ${pkgs.buildEnv {
                   name = "foo";
@@ -885,82 +928,35 @@ let
       '';
     };
 
-    openldap = {
-      exporterConfig = {
-        enable = true;
-        ldapCredentialFile = "${pkgs.writeText "exporter.yml" ''
-          ldapUser: "cn=root,dc=example"
-          ldapPass: "notapassword"
-        ''}";
-      };
-      metricProvider = {
-        services.openldap = {
-          enable = true;
-          settings.children = {
-            "cn=schema".includes = [
-              "${pkgs.openldap}/etc/schema/core.ldif"
-              "${pkgs.openldap}/etc/schema/cosine.ldif"
-              "${pkgs.openldap}/etc/schema/inetorgperson.ldif"
-              "${pkgs.openldap}/etc/schema/nis.ldif"
-            ];
-            "olcDatabase={1}mdb" = {
-              attrs = {
-                objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
-                olcDatabase = "{1}mdb";
-                olcDbDirectory = "/var/lib/openldap/db";
-                olcSuffix = "dc=example";
-                olcRootDN = {
-                  # cn=root,dc=example
-                  base64 = "Y249cm9vdCxkYz1leGFtcGxl";
-                };
-                olcRootPW = {
-                  path = "${pkgs.writeText "rootpw" "notapassword"}";
-                };
-              };
-            };
-            "olcDatabase={2}monitor".attrs = {
-              objectClass = [ "olcDatabaseConfig" ];
-              olcDatabase = "{2}monitor";
-              olcAccess = [ "to dn.subtree=cn=monitor by users read" ];
-            };
-          };
-          declarativeContents."dc=example" = ''
-            dn: dc=example
-            objectClass: domain
-            dc: example
-
-            dn: ou=users,dc=example
-            objectClass: organizationalUnit
-            ou: users
-          '';
-        };
-      };
-      exporterTest = ''
-        wait_for_unit("prometheus-openldap-exporter.service")
-        wait_for_open_port(389)
-        wait_for_open_port(9330)
-        wait_until_succeeds(
-            "curl -sSf http://localhost:9330/metrics | grep 'openldap_scrape{result=\"ok\"} 1'"
-        )
-      '';
-    };
-
     pgbouncer = {
       exporterConfig = {
         enable = true;
-        connectionStringFile = pkgs.writeText "connection.conf" "postgres://admin:@localhost:6432/pgbouncer?sslmode=disable";
+        connectionEnvFile = "${pkgs.writeText "connstr-env" ''
+          PGBOUNCER_EXPORTER_CONNECTION_STRING=postgres://admin@localhost:6432/pgbouncer?sslmode=disable
+        ''}";
       };
 
       metricProvider = {
         services.postgresql.enable = true;
         services.pgbouncer = {
-          # https://github.com/prometheus-community/pgbouncer_exporter#pgbouncer-configuration
-          ignoreStartupParameters = "extra_float_digits";
           enable = true;
-          listenAddress = "*";
-          databases = { postgres = "host=/run/postgresql/ port=5432 auth_user=postgres dbname=postgres"; };
-          authType = "any";
-          maxClientConn = 99;
+          settings = {
+            pgbouncer = {
+              listen_addr = "*";
+              auth_type = "any";
+              max_client_conn = 99;
+              # https://github.com/prometheus-community/pgbouncer_exporter#pgbouncer-configuration
+              ignore_startup_parameters = "extra_float_digits";
+            };
+            databases = {
+              postgres = concatStringsSep " " [
+                "host=/run/postgresql"
+                "port=5432"
+                "auth_user=postgres"
+                "dbname=postgres"
+              ];
+            };
+          };
         };
       };
       exporterTest = ''
diff --git a/nixos/tests/prometheus/alertmanager.nix b/nixos/tests/prometheus/alertmanager.nix
new file mode 100644
index 0000000000000..6301db6df62e3
--- /dev/null
+++ b/nixos/tests/prometheus/alertmanager.nix
@@ -0,0 +1,152 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "prometheus-alertmanager";
+
+  nodes = {
+    prometheus = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+
+        alertmanagers = [
+          {
+            scheme = "http";
+            static_configs = [
+              {
+                targets = [
+                  "alertmanager:${toString config.services.prometheus.alertmanager.port}"
+                ];
+              }
+            ];
+          }
+        ];
+
+        rules = [
+          ''
+            groups:
+              - name: test
+                rules:
+                  - alert: InstanceDown
+                    expr: up == 0
+                    for: 5s
+                    labels:
+                      severity: page
+                    annotations:
+                      summary: "Instance {{ $labels.instance }} down"
+          ''
+        ];
+
+        scrapeConfigs = [
+          {
+            job_name = "alertmanager";
+            static_configs = [
+              {
+                targets = [
+                  "alertmanager:${toString config.services.prometheus.alertmanager.port}"
+                ];
+              }
+            ];
+          }
+          {
+            job_name = "node";
+            static_configs = [
+              {
+                targets = [
+                  "node:${toString config.services.prometheus.exporters.node.port}"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    alertmanager = { config, pkgs, ... }: {
+      services.prometheus.alertmanager = {
+        enable = true;
+        openFirewall = true;
+
+        configuration = {
+          global = {
+            resolve_timeout = "1m";
+          };
+
+          route = {
+            # Root route node
+            receiver = "test";
+            group_by = ["..."];
+            continue = false;
+            group_wait = "1s";
+            group_interval = "15s";
+            repeat_interval = "24h";
+          };
+
+          receivers = [
+            {
+              name = "test";
+              webhook_configs = [
+                {
+                  url = "http://logger:6725";
+                  send_resolved = true;
+                  max_alerts = 0;
+                }
+              ];
+            }
+          ];
+        };
+      };
+    };
+
+    logger = { config, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 6725 ];
+
+      services.prometheus.alertmanagerWebhookLogger.enable = true;
+    };
+  };
+
+  testScript = ''
+    alertmanager.wait_for_unit("alertmanager")
+    alertmanager.wait_for_open_port(9093)
+    alertmanager.wait_until_succeeds("curl -s http://127.0.0.1:9093/-/ready")
+    #alertmanager.wait_until_succeeds("journalctl -o cat -u alertmanager.service | grep 'version=${pkgs.prometheus-alertmanager.version}'")
+
+    logger.wait_for_unit("alertmanager-webhook-logger")
+    logger.wait_for_open_port(6725)
+
+    prometheus.wait_for_unit("prometheus")
+    prometheus.wait_for_open_port(9090)
+
+    prometheus.wait_until_succeeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"alertmanager\"\}==1)' | "
+      + "jq '.data.result[0].value[1]' | grep '\"1\"'"
+    )
+
+    prometheus.wait_until_succeeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(alertmanager_build_info)%20by%20(version)' | "
+      + "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-alertmanager.version}\"'"
+    )
+
+    prometheus.wait_until_succeeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\}!=1)' | "
+      + "jq '.data.result[0].value[1]' | grep '\"1\"'"
+    )
+
+    prometheus.wait_until_succeeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=alertmanager_notifications_total\{integration=\"webhook\"\}' | "
+      + "jq '.data.result[0].value[1]' | grep -v '\"0\"'"
+    )
+
+    logger.wait_until_succeeds(
+      "journalctl -o cat -u alertmanager-webhook-logger.service | grep '\"alertname\":\"InstanceDown\"'"
+    )
+
+    logger.log(logger.succeed("systemd-analyze security alertmanager-webhook-logger.service | grep -v '✓'"))
+
+    alertmanager.log(alertmanager.succeed("systemd-analyze security alertmanager.service | grep -v '✓'"))
+  '';
+})
diff --git a/nixos/tests/prometheus/config-reload.nix b/nixos/tests/prometheus/config-reload.nix
new file mode 100644
index 0000000000000..786668c624ea9
--- /dev/null
+++ b/nixos/tests/prometheus/config-reload.nix
@@ -0,0 +1,116 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "prometheus-config-reload";
+
+  nodes = {
+    prometheus = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        enableReload = true;
+        globalConfig.scrape_interval = "2s";
+        scrapeConfigs = [
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [
+                  "prometheus:${toString config.services.prometheus.port}"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+
+      specialisation = {
+        "prometheus-config-change" = {
+          configuration = {
+            environment.systemPackages = [ pkgs.yq ];
+
+            # This configuration just adds a new prometheus job
+            # to scrape the node_exporter metrics of the s3 machine.
+            services.prometheus = {
+              scrapeConfigs = [
+                {
+                  job_name = "node";
+                  static_configs = [
+                    {
+                      targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ];
+                    }
+                  ];
+                }
+              ];
+            };
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    prometheus.wait_for_unit("prometheus")
+    prometheus.wait_for_open_port(9090)
+
+    # Check if switching to a NixOS configuration that changes the prometheus
+    # configuration reloads (instead of restarts) prometheus before the switch
+    # finishes successfully:
+    with subtest("config change reloads prometheus"):
+      import json
+      # We check if prometheus has finished reloading by looking for the message
+      # "Completed loading of configuration file" in the journal between the start
+      # and finish of switching to the new NixOS configuration.
+      #
+      # To mark the start we record the journal cursor before starting the switch:
+      cursor_before_switching = json.loads(
+          prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
+      )["__CURSOR"]
+
+      # Now we switch:
+      prometheus_config_change = prometheus.succeed(
+          "readlink /run/current-system/specialisation/prometheus-config-change"
+      ).strip()
+      prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
+
+      # Next we retrieve all logs since the start of switching:
+      logs_after_starting_switching = prometheus.succeed(
+          """
+            journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
+          """.format(
+              cursor_before_switching=cursor_before_switching
+          )
+      )
+
+      # Finally we check if the message "Completed loading of configuration file"
+      # occurs before the "finished switching to system configuration" message:
+      finished_switching_msg = (
+          "finished switching to system configuration " + prometheus_config_change
+      )
+      reloaded_before_switching_finished = False
+      finished_switching = False
+      for log_line in logs_after_starting_switching.split("\n"):
+          msg = json.loads(log_line)["MESSAGE"]
+          if "Completed loading of configuration file" in msg:
+              reloaded_before_switching_finished = True
+          if msg == finished_switching_msg:
+              finished_switching = True
+              break
+
+      assert reloaded_before_switching_finished
+      assert finished_switching
+
+      # Check if the reloaded config includes the new node job:
+      prometheus.succeed(
+        """
+          curl -sf http://127.0.0.1:9090/api/v1/status/config \
+            | jq -r .data.yaml \
+            | yq '.scrape_configs | any(.job_name == "node")' \
+            | grep true
+        """
+      )
+  '';
+})
diff --git a/nixos/tests/prometheus/default.nix b/nixos/tests/prometheus/default.nix
new file mode 100644
index 0000000000000..133922a453c05
--- /dev/null
+++ b/nixos/tests/prometheus/default.nix
@@ -0,0 +1,13 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
+
+{
+  alertmanager = import ./alertmanager.nix { inherit system pkgs; };
+  config-reload = import ./config-reload.nix { inherit system pkgs; };
+  federation = import ./federation.nix { inherit system pkgs; };
+  prometheus-pair = import ./prometheus-pair.nix { inherit system pkgs; };
+  pushgateway = import ./pushgateway.nix { inherit system pkgs; };
+  remote-write = import ./remote-write.nix { inherit system pkgs; };
+}
diff --git a/nixos/tests/prometheus/federation.nix b/nixos/tests/prometheus/federation.nix
new file mode 100644
index 0000000000000..0f05166c8f5da
--- /dev/null
+++ b/nixos/tests/prometheus/federation.nix
@@ -0,0 +1,213 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "prometheus-federation";
+
+  nodes = {
+    global1 = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+
+        scrapeConfigs = [
+          {
+            job_name = "federate";
+            honor_labels = true;
+            metrics_path = "/federate";
+
+            params = {
+              "match[]" = [
+                "{job=\"node\"}"
+                "{job=\"prometheus\"}"
+              ];
+            };
+
+            static_configs = [
+              {
+                targets = [
+                  "prometheus1:${toString config.services.prometheus.port}"
+                  "prometheus2:${toString config.services.prometheus.port}"
+                ];
+              }
+            ];
+          }
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [
+                  "global1:${toString config.services.prometheus.port}"
+                  "global2:${toString config.services.prometheus.port}"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    global2 = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+
+        scrapeConfigs = [
+          {
+            job_name = "federate";
+            honor_labels = true;
+            metrics_path = "/federate";
+
+            params = {
+              "match[]" = [
+                "{job=\"node\"}"
+                "{job=\"prometheus\"}"
+              ];
+            };
+
+            static_configs = [
+              {
+                targets = [
+                  "prometheus1:${toString config.services.prometheus.port}"
+                  "prometheus2:${toString config.services.prometheus.port}"
+                ];
+              }
+            ];
+          }
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [
+                  "global1:${toString config.services.prometheus.port}"
+                  "global2:${toString config.services.prometheus.port}"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    prometheus1 = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+
+        scrapeConfigs = [
+          {
+            job_name = "node";
+            static_configs = [
+              {
+                targets = [
+                  "node1:${toString config.services.prometheus.exporters.node.port}"
+                ];
+              }
+            ];
+          }
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [
+                  "prometheus1:${toString config.services.prometheus.port}"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    prometheus2 = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+
+        scrapeConfigs = [
+          {
+            job_name = "node";
+            static_configs = [
+              {
+                targets = [
+                  "node2:${toString config.services.prometheus.exporters.node.port}"
+                ];
+              }
+            ];
+          }
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [
+                  "prometheus2:${toString config.services.prometheus.port}"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    node1 = { config, pkgs, ... }: {
+      services.prometheus.exporters.node = {
+        enable = true;
+        openFirewall = true;
+      };
+    };
+
+    node2 = { config, pkgs, ... }: {
+      services.prometheus.exporters.node = {
+        enable = true;
+        openFirewall = true;
+      };
+    };
+  };
+
+  testScript = ''
+    for machine in node1, node2:
+      machine.wait_for_unit("prometheus-node-exporter")
+      machine.wait_for_open_port(9100)
+
+    for machine in prometheus1, prometheus2, global1, global2:
+      machine.wait_for_unit("prometheus")
+      machine.wait_for_open_port(9090)
+
+    # Verify both servers got the same data from the exporter
+    for machine in prometheus1, prometheus2:
+      machine.wait_until_succeeds(
+        "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
+        + "jq '.data.result[0].value[1]' | grep '\"1\"'"
+      )
+      machine.wait_until_succeeds(
+        "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
+        + "jq '.data.result[0].value[1]' | grep '\"1\"'"
+      )
+
+    for machine in global1, global2:
+      machine.wait_until_succeeds(
+        "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
+        + "jq '.data.result[0].value[1]' | grep '\"2\"'"
+      )
+
+      machine.wait_until_succeeds(
+        "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
+        + "jq '.data.result[0].value[1]' | grep '\"4\"'"
+      )
+  '';
+})
diff --git a/nixos/tests/prometheus/prometheus-pair.nix b/nixos/tests/prometheus/prometheus-pair.nix
new file mode 100644
index 0000000000000..3ac70ca0403ec
--- /dev/null
+++ b/nixos/tests/prometheus/prometheus-pair.nix
@@ -0,0 +1,87 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "prometheus-pair";
+
+  nodes = {
+    prometheus1 = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+        scrapeConfigs = [
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [
+                  "prometheus1:${toString config.services.prometheus.port}"
+                  "prometheus2:${toString config.services.prometheus.port}"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    prometheus2 = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+        scrapeConfigs = [
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [
+                  "prometheus1:${toString config.services.prometheus.port}"
+                  "prometheus2:${toString config.services.prometheus.port}"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    for machine in prometheus1, prometheus2:
+      machine.wait_for_unit("prometheus")
+      machine.wait_for_open_port(9090)
+      machine.wait_until_succeeds("journalctl -o cat -u prometheus.service | grep 'version=${pkgs.prometheus.version}'")
+      machine.wait_until_succeeds("curl -sSf http://localhost:9090/-/healthy")
+
+    # Prometheii ready - run some queries
+    for machine in prometheus1, prometheus2:
+      machine.wait_until_succeeds(
+        "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\",version=\"${pkgs.prometheus.version}\"\}' | "
+        + "jq '.data.result[0].value[1]' | grep '\"1\"'"
+      )
+
+      machine.wait_until_succeeds(
+        "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\"\}' | "
+        + "jq '.data.result[0].value[1]' | grep '\"1\"'"
+      )
+
+      machine.wait_until_succeeds(
+        "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
+        + "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus.version}\"'"
+      )
+
+      machine.wait_until_succeeds(
+        "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
+        + "jq '.data.result[0].value[1]' | grep '\"2\"'"
+      )
+
+    prometheus1.log(prometheus1.succeed("systemd-analyze security prometheus.service | grep -v '✓'"))
+  '';
+})
diff --git a/nixos/tests/prometheus/pushgateway.nix b/nixos/tests/prometheus/pushgateway.nix
new file mode 100644
index 0000000000000..261c41598eb02
--- /dev/null
+++ b/nixos/tests/prometheus/pushgateway.nix
@@ -0,0 +1,96 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "prometheus-pushgateway";
+
+  nodes = {
+    prometheus = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+
+        scrapeConfigs = [
+          {
+            job_name = "pushgateway";
+            static_configs = [
+              {
+                targets = [
+                  "pushgateway:9091"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    pushgateway = { config, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 9091 ];
+
+      services.prometheus.pushgateway = {
+        enable = true;
+      };
+    };
+
+    client = { config, pkgs, ... }: {
+    };
+  };
+
+  testScript = ''
+    pushgateway.wait_for_unit("pushgateway")
+    pushgateway.wait_for_open_port(9091)
+    pushgateway.wait_until_succeeds("curl -s http://127.0.0.1:9091/-/ready")
+    pushgateway.wait_until_succeeds("journalctl -o cat -u pushgateway.service | grep 'version=${pkgs.prometheus-pushgateway.version}'")
+
+    prometheus.wait_for_unit("prometheus")
+    prometheus.wait_for_open_port(9090)
+
+    prometheus.wait_until_succeeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"pushgateway\"\})' | "
+      + "jq '.data.result[0].value[1]' | grep '\"1\"'"
+    )
+
+    prometheus.wait_until_succeeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(pushgateway_build_info)%20by%20(version)' | "
+      + "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-pushgateway.version}\"'"
+    )
+
+    client.wait_for_unit("network-online.target")
+
+    # Add a metric and check in Prometheus
+    client.wait_until_succeeds(
+      "echo 'some_metric 3.14' | curl --data-binary @- http://pushgateway:9091/metrics/job/some_job"
+    )
+
+    prometheus.wait_until_succeeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
+      + "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
+    )
+
+    prometheus.wait_until_succeeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
+      + "jq '.data.result[0].value[1]' | grep 'null'"
+    )
+
+    # Delete the metric, check not in Prometheus
+    client.wait_until_succeeds(
+      "curl -X DELETE http://pushgateway:9091/metrics/job/some_job"
+    )
+
+    prometheus.wait_until_fails(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
+      + "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
+    )
+
+    prometheus.wait_until_succeeds(
+      "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
+      + "jq '.data.result[0].value[1]' | grep '\"1\"'"
+    )
+
+    pushgateway.log(pushgateway.succeed("systemd-analyze security pushgateway.service | grep -v '✓'"))
+  '';
+})
diff --git a/nixos/tests/prometheus/remote-write.nix b/nixos/tests/prometheus/remote-write.nix
new file mode 100644
index 0000000000000..24092b9fb88da
--- /dev/null
+++ b/nixos/tests/prometheus/remote-write.nix
@@ -0,0 +1,73 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "prometheus-remote-write";
+
+  nodes = {
+    receiver = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+
+        extraFlags = [ "--web.enable-remote-write-receiver" ];
+      };
+    };
+
+    prometheus = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
+
+      services.prometheus = {
+        enable = true;
+        globalConfig.scrape_interval = "2s";
+
+        remoteWrite = [
+          {
+            url = "http://receiver:9090/api/v1/write";
+          }
+        ];
+
+        scrapeConfigs = [
+          {
+            job_name = "node";
+            static_configs = [
+              {
+                targets = [
+                  "node:${toString config.services.prometheus.exporters.node.port}"
+                ];
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    node = { config, pkgs, ... }: {
+      services.prometheus.exporters.node = {
+        enable = true;
+        openFirewall = true;
+      };
+    };
+  };
+
+  testScript = ''
+    node.wait_for_unit("prometheus-node-exporter")
+    node.wait_for_open_port(9100)
+
+    for machine in prometheus, receiver:
+      machine.wait_for_unit("prometheus")
+      machine.wait_for_open_port(9090)
+
+    # Verify both servers got the same data from the exporter
+    for machine in prometheus, receiver:
+      machine.wait_until_succeeds(
+        "curl -sf 'http://127.0.0.1:9090/api/v1/query?query=node_exporter_build_info\{instance=\"node:9100\"\}' | "
+        + "jq '.data.result[0].value[1]' | grep '\"1\"'"
+      )
+  '';
+})
diff --git a/nixos/tests/proxy.nix b/nixos/tests/proxy.nix
index f8a3d576903e3..ce7131b09a8ab 100644
--- a/nixos/tests/proxy.nix
+++ b/nixos/tests/proxy.nix
@@ -12,7 +12,7 @@ let
 in {
   name = "proxy";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco ];
+    maintainers = [ ];
   };
 
   nodes = {
diff --git a/nixos/tests/pt2-clone.nix b/nixos/tests/pt2-clone.nix
index ea4329c4a9806..57a8495a3296a 100644
--- a/nixos/tests/pt2-clone.nix
+++ b/nixos/tests/pt2-clone.nix
@@ -10,7 +10,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     ];
 
     services.xserver.enable = true;
-    sound.enable = true;
     environment.systemPackages = [ pkgs.pt2-clone ];
   };
 
diff --git a/nixos/tests/qemu-vm-store.nix b/nixos/tests/qemu-vm-store.nix
new file mode 100644
index 0000000000000..9fb9f4baaafc4
--- /dev/null
+++ b/nixos/tests/qemu-vm-store.nix
@@ -0,0 +1,71 @@
+{ lib, ... }: {
+
+  name = "qemu-vm-store";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes = {
+    sharedWritable = {
+      virtualisation.writableStore = true;
+    };
+
+    sharedReadOnly = {
+      virtualisation.writableStore = false;
+    };
+
+    imageWritable = {
+      virtualisation.useNixStoreImage = true;
+      virtualisation.writableStore = true;
+    };
+
+    imageReadOnly = {
+      virtualisation.useNixStoreImage = true;
+      virtualisation.writableStore = false;
+    };
+
+    fullDisk = {
+      virtualisation.useBootLoader = true;
+    };
+  };
+
+  testScript = ''
+    build_derivation = """
+      nix-build --option substitute false -E 'derivation {
+        name = "t";
+        builder = "/bin/sh";
+        args = ["-c" "echo something > $out"];
+        system = builtins.currentSystem;
+        preferLocalBuild = true;
+      }'
+    """
+
+    start_all()
+
+    with subtest("Nix Store is writable"):
+      sharedWritable.succeed(build_derivation)
+      imageWritable.succeed(build_derivation)
+      fullDisk.succeed(build_derivation)
+
+    with subtest("Nix Store is read only"):
+      sharedReadOnly.fail(build_derivation)
+      imageReadOnly.fail(build_derivation)
+
+    # Checking whether the fs type is 9P is just a proxy to test whether the
+    # Nix Store is shared. If we switch to a different technology (e.g.
+    # virtiofs) for sharing, we need to adjust these tests.
+
+    with subtest("Nix store is shared from the host via 9P"):
+      sharedWritable.succeed("findmnt --kernel --type 9P /nix/.ro-store")
+      sharedReadOnly.succeed("findmnt --kernel --type 9P /nix/.ro-store")
+
+    with subtest("Nix store is not shared via 9P"):
+      imageWritable.fail("findmnt --kernel --type 9P /nix/.ro-store")
+      imageReadOnly.fail("findmnt --kernel --type 9P /nix/.ro-store")
+
+    with subtest("Nix store is not mounted separately"):
+      rootDevice = fullDisk.succeed("stat -c %d /")
+      nixStoreDevice = fullDisk.succeed("stat -c %d /nix/store")
+      assert rootDevice == nixStoreDevice, "Nix store is mounted separately from the root fs"
+  '';
+
+}
diff --git a/nixos/tests/qtile/add-widget.patch b/nixos/tests/qtile/add-widget.patch
new file mode 100644
index 0000000000000..622ba35a19990
--- /dev/null
+++ b/nixos/tests/qtile/add-widget.patch
@@ -0,0 +1,19 @@
+--- a/config.py	2024-05-31 14:49:23.852287845 +0200
++++ b/config.py	2024-05-31 14:51:00.935182266 +0200
+@@ -29,6 +29,8 @@
+ from libqtile.lazy import lazy
+ from libqtile.utils import guess_terminal
+ 
++from qtile_extras import widget
++
+ mod = "mod4"
+ terminal = guess_terminal()
+ 
+@@ -162,6 +164,7 @@
+                 # NB Systray is incompatible with Wayland, consider using StatusNotifier instead
+                 # widget.StatusNotifier(),
+                 widget.Systray(),
++                widget.AnalogueClock(),
+                 widget.Clock(format="%Y-%m-%d %a %I:%M %p"),
+                 widget.QuickExit(),
+             ],
diff --git a/nixos/tests/qtile/config.nix b/nixos/tests/qtile/config.nix
new file mode 100644
index 0000000000000..2536b9e2a8aee
--- /dev/null
+++ b/nixos/tests/qtile/config.nix
@@ -0,0 +1,24 @@
+{ stdenvNoCC, fetchurl }:
+stdenvNoCC.mkDerivation {
+  name = "qtile-config";
+  version = "0.0.1";
+
+  src = fetchurl {
+    url = "https://raw.githubusercontent.com/qtile/qtile/v0.28.1/libqtile/resources/default_config.py";
+    hash = "sha256-Y5W277CWVNSi4BdgEW/f7Px/MMjnN9W9TDqdOncVwPc=";
+  };
+
+  prePatch = ''
+    cp $src config.py
+  '';
+
+  patches = [ ./add-widget.patch ];
+
+  dontUnpack = true;
+  dontBuild = true;
+
+  installPhase = ''
+    mkdir -p $out
+    cp config.py $out/config.py
+  '';
+}
diff --git a/nixos/tests/qtile.nix b/nixos/tests/qtile/default.nix
index b4d8f9d421144..718063fa8bb50 100644
--- a/nixos/tests/qtile.nix
+++ b/nixos/tests/qtile/default.nix
@@ -1,16 +1,27 @@
-import ./make-test-python.nix ({ lib, ...} : {
+import ../make-test-python.nix ({ lib, ...} : {
   name = "qtile";
 
   meta = {
     maintainers = with lib.maintainers; [ sigmanificient ];
   };
 
-  nodes.machine = { pkgs, lib, ... }: {
-    imports = [ ./common/x11.nix ./common/user-account.nix ];
+  nodes.machine = { pkgs, lib, ... }: let
+    # We create a custom Qtile configuration file that adds a widget from
+    # qtile-extras to the bar. This ensure that the qtile-extras package
+    # also works, and that extraPackages behave as expected.
+
+    config-deriv = pkgs.callPackage ./config.nix { };
+  in {
+    imports = [ ../common/x11.nix ../common/user-account.nix ];
     test-support.displayManager.auto.user = "alice";
 
-    services.xserver.windowManager.qtile.enable = true;
-    services.displayManager.defaultSession = lib.mkForce "none+qtile";
+    services.xserver.windowManager.qtile = {
+      enable = true;
+      configFile = "${config-deriv}/config.py";
+      extraPackages = ps: [ ps.qtile-extras ];
+    };
+
+    services.displayManager.defaultSession = lib.mkForce "qtile";
 
     environment.systemPackages = [ pkgs.kitty ];
   };
diff --git a/nixos/tests/quake3.nix b/nixos/tests/quake3.nix
index 2d8c5207001cb..947476c7ebc1a 100644
--- a/nixos/tests/quake3.nix
+++ b/nixos/tests/quake3.nix
@@ -21,7 +21,7 @@ let
     { pkgs, ... }:
 
     { imports = [ ./common/x11.nix ];
-      hardware.opengl.driSupport = true;
+      hardware.graphics.enable = true;
       environment.systemPackages = [ pkgs.quake3demo ];
       nixpkgs.config.packageOverrides = overrides;
       nixpkgs.config.allowUnfreePredicate = unfreePredicate;
@@ -32,7 +32,7 @@ in
 rec {
   name = "quake3";
   meta = with lib.maintainers; {
-    maintainers = [ domenkozar eelco ];
+    maintainers = [ domenkozar ];
   };
 
   # TODO: lcov doesn't work atm
@@ -65,8 +65,8 @@ rec {
       client1.wait_for_x()
       client2.wait_for_x()
 
-      client1.execute("quake3 +set r_fullscreen 0 +set name Foo +connect server &")
-      client2.execute("quake3 +set r_fullscreen 0 +set name Bar +connect server &")
+      client1.execute("quake3 +set r_fullscreen 0 +set name Foo +connect server >&2 &", check_return = False)
+      client2.execute("quake3 +set r_fullscreen 0 +set name Bar +connect server >&2 &", check_return = False)
 
       server.wait_until_succeeds("grep -q 'Foo.*entered the game' /tmp/log")
       server.wait_until_succeeds("grep -q 'Bar.*entered the game' /tmp/log")
diff --git a/nixos/tests/quickwit.nix b/nixos/tests/quickwit.nix
new file mode 100644
index 0000000000000..7e617c63d7973
--- /dev/null
+++ b/nixos/tests/quickwit.nix
@@ -0,0 +1,103 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+  # Define an example Quickwit index schema,
+  # and some `exampleDocs` below, to test if ingesting
+  # and querying works as expected.
+  index_yaml = ''
+    version: 0.7
+    index_id: example_server_logs
+    doc_mapping:
+      mode: dynamic
+      field_mappings:
+        - name: datetime
+          type: datetime
+          fast: true
+          input_formats:
+            - iso8601
+          output_format: iso8601
+          fast_precision: seconds
+          fast: true
+        - name: git
+          type: text
+          tokenizer: raw
+        - name: hostname
+          type: text
+          tokenizer: raw
+        - name: level
+          type: text
+          tokenizer: raw
+        - name: message
+          type: text
+        - name: location
+          type: text
+        - name: source
+          type: text
+      timestamp_field: datetime
+
+    search_settings:
+      default_search_fields: [message]
+
+    indexing_settings:
+      commit_timeout_secs: 10
+  '';
+
+  exampleDocs = ''
+    {"datetime":"2024-05-03T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-1","level":"Info","message":"Processing request done","location":"path/to/server.c:6442:32","source":""}
+    {"datetime":"2024-05-04T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-1","level":"Info","message":"Got exception processing request: HTTP 404","location":"path/to/server.c:6444:32","source":""}
+    {"datetime":"2024-05-05T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-1","level":"Info","message":"Got exception processing request: HTTP 404","location":"path/to/server.c:6444:32","source":""}
+    {"datetime":"2024-05-06T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-2","level":"Info","message":"Got exception processing request: HTTP 404","location":"path/to/server.c:6444:32","source":""}
+  '';
+in
+{
+  name = "quickwit";
+  meta.maintainers = [ pkgs.lib.maintainers.happysalada ];
+
+  nodes = {
+    quickwit = { config, pkgs, ... }: {
+      services.quickwit.enable = true;
+    };
+  };
+
+  testScript =
+  ''
+    quickwit.wait_for_unit("quickwit")
+    quickwit.wait_for_open_port(7280)
+    quickwit.wait_for_open_port(7281)
+
+    quickwit.wait_until_succeeds(
+      "journalctl -o cat -u quickwit.service | grep 'version: ${pkgs.quickwit.version}'"
+    )
+
+    quickwit.wait_until_succeeds(
+      "journalctl -o cat -u quickwit.service | grep 'transitioned to ready state'"
+    )
+
+    with subtest("verify UI installed"):
+      machine.succeed("curl -sSf http://127.0.0.1:7280/ui/")
+
+    with subtest("injest and query data"):
+      import json
+
+      # Test CLI ingestion
+      print(machine.succeed('${pkgs.quickwit}/bin/quickwit index create --index-config ${pkgs.writeText "index.yaml" index_yaml}'))
+      # Important to use `--wait`, otherwise the queries below race with index processing.
+      print(machine.succeed('${pkgs.quickwit}/bin/quickwit index ingest --index example_server_logs --input-path ${pkgs.writeText "exampleDocs.json" exampleDocs} --wait'))
+
+      # Test CLI query
+      cli_query_output = machine.succeed('${pkgs.quickwit}/bin/quickwit index search --index example_server_logs --query "exception"')
+      print(cli_query_output)
+
+      # Assert query result is as expected.
+      num_hits = len(json.loads(cli_query_output)["hits"])
+      assert num_hits == 3, f"cli_query_output contains unexpected number of results: {num_hits}"
+
+      # Test API query
+      api_query_output = machine.succeed('curl --fail http://127.0.0.1:7280/api/v1/example_server_logs/search?query=exception')
+      print(api_query_output)
+
+    quickwit.log(quickwit.succeed(
+      "systemd-analyze security quickwit.service | grep -v '✓'"
+    ))
+  '';
+})
diff --git a/nixos/tests/rabbitmq.nix b/nixos/tests/rabbitmq.nix
index 040679e68d989..4b8921662b7f4 100644
--- a/nixos/tests/rabbitmq.nix
+++ b/nixos/tests/rabbitmq.nix
@@ -9,7 +9,7 @@ in
 {
   name = "rabbitmq";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco offline ];
+    maintainers = [ offline ];
   };
 
   nodes.machine = {
diff --git a/nixos/tests/radicle.nix b/nixos/tests/radicle.nix
new file mode 100644
index 0000000000000..b68cb7d716c23
--- /dev/null
+++ b/nixos/tests/radicle.nix
@@ -0,0 +1,207 @@
+# This test runs the radicle-node and radicle-httpd services on a seed host,
+# and verifies that an alice peer can host a repository on the seed,
+# and that a bob peer can send alice a patch via the seed.
+
+{ pkgs, ... }:
+
+let
+  # The Node ID depends on nodes.seed.services.radicle.privateKeyFile
+  seed-nid = "z6Mkg52RcwDrPKRzzHaYgBkHH3Gi5p4694fvPstVE9HTyMB6";
+  seed-ssh-keys = import ./ssh-keys.nix pkgs;
+  seed-tls-certs = import common/acme/server/snakeoil-certs.nix;
+
+  commonHostConfig = { nodes, config, pkgs, ... }: {
+    environment.systemPackages = [
+      config.services.radicle.package
+      pkgs.curl
+      pkgs.gitMinimal
+      pkgs.jq
+    ];
+    environment.etc."gitconfig".text = ''
+      [init]
+        defaultBranch = main
+      [user]
+        email = root@${config.networking.hostName}
+        name = ${config.networking.hostName}
+    '';
+    networking = {
+      extraHosts = ''
+        ${nodes.seed.networking.primaryIPAddress} ${nodes.seed.services.radicle.httpd.nginx.serverName}
+      '';
+    };
+    security.pki.certificateFiles = [
+      seed-tls-certs.ca.cert
+    ];
+  };
+
+  radicleConfig = { nodes, ... }: alias:
+    pkgs.writeText "config.json" (builtins.toJSON {
+      preferredSeeds = [
+        "${seed-nid}@seed:${toString nodes.seed.services.radicle.node.listenPort}"
+      ];
+      node = {
+        inherit alias;
+        relay = "never";
+        seedingPolicy = {
+          default = "block";
+        };
+      };
+    });
+in
+
+{
+  name = "radicle";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [
+      julm
+      lorenzleutgeb
+    ];
+  };
+
+  nodes = {
+    seed = { pkgs, config, ... }: {
+      imports = [ commonHostConfig ];
+
+      services.radicle = {
+        enable = true;
+        privateKeyFile = seed-ssh-keys.snakeOilEd25519PrivateKey;
+        publicKey = seed-ssh-keys.snakeOilEd25519PublicKey;
+        node = {
+          openFirewall = true;
+        };
+        httpd = {
+          enable = true;
+          nginx = {
+            serverName = seed-tls-certs.domain;
+            addSSL = true;
+            sslCertificate = seed-tls-certs.${seed-tls-certs.domain}.cert;
+            sslCertificateKey = seed-tls-certs.${seed-tls-certs.domain}.key;
+          };
+        };
+        settings = {
+          preferredSeeds = [];
+          node = {
+            relay = "always";
+            seedingPolicy = {
+              default = "allow";
+              scope = "all";
+            };
+          };
+        };
+      };
+
+      services.nginx = {
+        enable = true;
+      };
+
+      networking.firewall.allowedTCPPorts = [ 443 ];
+    };
+
+    alice = {
+      imports = [ commonHostConfig ];
+    };
+
+    bob = {
+      imports = [ commonHostConfig ];
+    };
+  };
+
+  testScript = { nodes, ... }@args: ''
+    start_all()
+
+    with subtest("seed can run radicle-node"):
+      # The threshold and/or hardening may have to be changed with new features/checks
+      print(seed.succeed("systemd-analyze security radicle-node.service --threshold=10 --no-pager"))
+      seed.wait_for_unit("radicle-node.service")
+      seed.wait_for_open_port(${toString nodes.seed.services.radicle.node.listenPort})
+
+    with subtest("seed can run radicle-httpd"):
+      # The threshold and/or hardening may have to be changed with new features/checks
+      print(seed.succeed("systemd-analyze security radicle-httpd.service --threshold=10 --no-pager"))
+      seed.wait_for_unit("radicle-httpd.service")
+      seed.wait_for_open_port(${toString nodes.seed.services.radicle.httpd.listenPort})
+      seed.wait_for_open_port(443)
+      assert alice.succeed("curl -sS 'https://${nodes.seed.services.radicle.httpd.nginx.serverName}/api/v1' | jq -r .nid") == "${seed-nid}\n"
+      assert bob.succeed("curl -sS 'https://${nodes.seed.services.radicle.httpd.nginx.serverName}/api/v1' | jq -r .nid") == "${seed-nid}\n"
+
+    with subtest("alice can create a Node ID"):
+      alice.succeed("rad auth --alias alice --stdin </dev/null")
+      alice.copy_from_host("${radicleConfig args "alice"}", "/root/.radicle/config.json")
+    with subtest("alice can run a node"):
+      alice.succeed("rad node start")
+    with subtest("alice can create a Git repository"):
+      alice.succeed(
+        "mkdir /tmp/repo",
+        "git -C /tmp/repo init",
+        "echo hello world > /tmp/repo/testfile",
+        "git -C /tmp/repo add .",
+        "git -C /tmp/repo commit -m init"
+      )
+    with subtest("alice can create a Repository ID"):
+      alice.succeed(
+        "cd /tmp/repo && rad init --name repo --description descr --default-branch main --public"
+      )
+    alice_repo_rid=alice.succeed("cd /tmp/repo && rad inspect --rid").rstrip("\n")
+    with subtest("alice can send a repository to the seed"):
+      alice.succeed(f"rad sync --seed ${seed-nid} {alice_repo_rid}")
+
+    with subtest(f"seed can receive the repository {alice_repo_rid}"):
+      seed.wait_until_succeeds("test 1 = \"$(rad-system stats | jq .local.repos)\"")
+
+    with subtest("bob can create a Node ID"):
+      bob.succeed("rad auth --alias bob --stdin </dev/null")
+      bob.copy_from_host("${radicleConfig args "bob"}", "/root/.radicle/config.json")
+      bob.succeed("rad node start")
+    with subtest("bob can clone alice's repository from the seed"):
+      bob.succeed(f"rad clone {alice_repo_rid} /tmp/repo")
+      assert bob.succeed("cat /tmp/repo/testfile") == "hello world\n"
+
+    with subtest("bob can clone alice's repository from the seed through the HTTP gateway"):
+      bob.succeed(f"git clone https://${nodes.seed.services.radicle.httpd.nginx.serverName}/{alice_repo_rid[4:]}.git /tmp/repo-http")
+      assert bob.succeed("cat /tmp/repo-http/testfile") == "hello world\n"
+
+    with subtest("alice can push the main branch to the rad remote"):
+      alice.succeed(
+        "echo hello bob > /tmp/repo/testfile",
+        "git -C /tmp/repo add .",
+        "git -C /tmp/repo commit -m 'hello to bob'",
+        "git -C /tmp/repo push rad main"
+      )
+    with subtest("bob can sync bob's repository from the seed"):
+      bob.succeed(
+        "cd /tmp/repo && rad sync --seed ${seed-nid}",
+        "cd /tmp/repo && git pull"
+      )
+      assert bob.succeed("cat /tmp/repo/testfile") == "hello bob\n"
+
+    with subtest("bob can push a patch"):
+      bob.succeed(
+        "echo hello alice > /tmp/repo/testfile",
+        "git -C /tmp/repo checkout -b for-alice",
+        "git -C /tmp/repo add .",
+        "git -C /tmp/repo commit -m 'hello to alice'",
+        "git -C /tmp/repo push -o patch.message='hello for alice' rad HEAD:refs/patches"
+      )
+
+    bob_repo_patch1_pid=bob.succeed("cd /tmp/repo && git branch --remotes | sed -ne 's:^ *rad/patches/::'p").rstrip("\n")
+    with subtest("alice can receive the patch"):
+      alice.wait_until_succeeds("test 1 = \"$(rad stats | jq .local.patches)\"")
+      alice.succeed(
+        f"cd /tmp/repo && rad patch show {bob_repo_patch1_pid} | grep 'opened by bob'",
+        f"cd /tmp/repo && rad patch checkout {bob_repo_patch1_pid}"
+      )
+      assert alice.succeed("cat /tmp/repo/testfile") == "hello alice\n"
+    with subtest("alice can comment the patch"):
+      alice.succeed(
+        f"cd /tmp/repo && rad patch comment {bob_repo_patch1_pid} -m thank-you"
+      )
+    with subtest("alice can merge the patch"):
+      alice.succeed(
+        "git -C /tmp/repo checkout main",
+        f"git -C /tmp/repo merge patch/{bob_repo_patch1_pid[:7]}",
+        "git -C /tmp/repo push rad main",
+        "cd /tmp/repo && rad patch list | grep -qxF 'Nothing to show.'"
+      )
+  '';
+}
diff --git a/nixos/tests/rathole.nix b/nixos/tests/rathole.nix
new file mode 100644
index 0000000000000..56d7a0129f803
--- /dev/null
+++ b/nixos/tests/rathole.nix
@@ -0,0 +1,89 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+
+  let
+    successMessage = "Success 3333115147933743662";
+  in
+  {
+    name = "rathole";
+    meta.maintainers = with lib.maintainers; [ xokdvium ];
+    nodes = {
+      server = {
+        networking = {
+          useNetworkd = true;
+          useDHCP = false;
+          firewall.enable = false;
+        };
+
+        systemd.network.networks."01-eth1" = {
+          name = "eth1";
+          networkConfig.Address = "10.0.0.1/24";
+        };
+
+        services.rathole = {
+          enable = true;
+          role = "server";
+          settings = {
+            server = {
+              bind_addr = "0.0.0.0:2333";
+              services = {
+                success-message = {
+                  bind_addr = "0.0.0.0:80";
+                  token = "hunter2";
+                };
+              };
+            };
+          };
+        };
+      };
+
+      client = {
+        networking = {
+          useNetworkd = true;
+          useDHCP = false;
+        };
+
+        systemd.network.networks."01-eth1" = {
+          name = "eth1";
+          networkConfig.Address = "10.0.0.2/24";
+        };
+
+        services.nginx = {
+          enable = true;
+          virtualHosts."127.0.0.1" = {
+            root = pkgs.writeTextDir "success-message.txt" successMessage;
+          };
+        };
+
+        services.rathole = {
+          enable = true;
+          role = "client";
+          credentialsFile = pkgs.writeText "rathole-credentials.toml" ''
+            [client.services.success-message]
+            token = "hunter2"
+          '';
+          settings = {
+            client = {
+              remote_addr = "10.0.0.1:2333";
+              services.success-message = {
+                local_addr = "127.0.0.1:80";
+              };
+            };
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+      server.wait_for_unit("rathole.service")
+      server.wait_for_open_port(2333)
+      client.wait_for_unit("rathole.service")
+      server.wait_for_open_port(80)
+      response = server.succeed("curl http://127.0.0.1/success-message.txt")
+      assert "${successMessage}" in response, "Got invalid response"
+      response = client.succeed("curl http://10.0.0.1/success-message.txt")
+      assert "${successMessage}" in response, "Got invalid response"
+    '';
+  }
+)
diff --git a/nixos/tests/realm.nix b/nixos/tests/realm.nix
new file mode 100644
index 0000000000000..b39b0e0a161c7
--- /dev/null
+++ b/nixos/tests/realm.nix
@@ -0,0 +1,39 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "realm";
+
+  meta = {
+    maintainers = with lib.maintainers; [ ocfox ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    services.nginx = {
+      enable = true;
+      statusPage = true;
+    };
+    # realm need DNS resolv server to run or use config.dns.nameserver
+    services.resolved.enable = true;
+
+    services.realm = {
+      enable = true;
+      config = {
+        endpoints = [
+          {
+            listen = "0.0.0.0:1000";
+            remote = "127.0.0.1:80";
+          }
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_unit("realm.service")
+
+    machine.wait_for_open_port(80)
+    machine.wait_for_open_port(1000)
+
+    machine.succeed("curl --fail http://localhost:1000/")
+  '';
+
+})
diff --git a/nixos/tests/redlib.nix b/nixos/tests/redlib.nix
index e4bde25e30a63..808f857aed196 100644
--- a/nixos/tests/redlib.nix
+++ b/nixos/tests/redlib.nix
@@ -3,7 +3,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
   meta.maintainers = with lib.maintainers; [ soispha ];
 
   nodes.machine = {
-    services.libreddit = {
+    services.redlib = {
       package = pkgs.redlib;
       enable = true;
       # Test CAP_NET_BIND_SERVICE
@@ -12,7 +12,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
   };
 
   testScript = ''
-    machine.wait_for_unit("libreddit.service")
+    machine.wait_for_unit("redlib.service")
     machine.wait_for_open_port(80)
     # Query a page that does not require Internet access
     machine.succeed("curl --fail http://localhost:80/settings")
diff --git a/nixos/tests/renovate.nix b/nixos/tests/renovate.nix
new file mode 100644
index 0000000000000..a30b5b3d60b9c
--- /dev/null
+++ b/nixos/tests/renovate.nix
@@ -0,0 +1,69 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+  {
+    name = "renovate";
+    meta.maintainers = with pkgs.lib.maintainers; [ marie natsukium ];
+
+    nodes.machine =
+      { config, ... }:
+      {
+        services.renovate = {
+          enable = true;
+          settings = {
+            platform = "gitea";
+            endpoint = "http://localhost:3000";
+            autodiscover = true;
+            gitAuthor = "Renovate <renovate@example.com>";
+          };
+          credentials = {
+            RENOVATE_TOKEN = "/etc/renovate-token";
+          };
+        };
+        environment.systemPackages = [
+          config.services.forgejo.package
+          pkgs.tea
+          pkgs.git
+        ];
+        services.forgejo = {
+          enable = true;
+          settings.server.HTTP_PORT = 3000;
+        };
+      };
+
+    testScript = ''
+      def gitea(command):
+        return machine.succeed(f"cd /var/lib/forgejo && sudo --user=forgejo GITEA_WORK_DIR=/var/lib/forgejo GITEA_CUSTOM=/var/lib/forgejo/custom gitea {command}")
+
+      machine.wait_for_unit("forgejo.service")
+      machine.wait_for_open_port(3000)
+
+      machine.systemctl("stop forgejo.service")
+
+      gitea("admin user create --username meow --email meow@example.com --password meow")
+
+      machine.systemctl("start forgejo.service")
+      machine.wait_for_unit("forgejo.service")
+      machine.wait_for_open_port(3000)
+
+      accessToken = gitea("admin user generate-access-token --raw --username meow --scopes all | tr -d '\n'")
+
+      machine.succeed(f"tea login add --name default --user meow --token '{accessToken}' --password meow --url http://localhost:3000")
+      machine.succeed("tea repo create --name kitty --init")
+      machine.succeed("git config --global user.name Meow")
+      machine.succeed("git config --global user.email meow@example.com")
+      machine.succeed(f"git clone http://meow:{accessToken}@localhost:3000/meow/kitty.git /tmp/kitty")
+      machine.succeed("echo '{ \"name\": \"meow\", \"version\": \"0.1.0\" }' > /tmp/kitty/package.json")
+      machine.succeed("git -C /tmp/kitty add /tmp/kitty/package.json")
+      machine.succeed("git -C /tmp/kitty commit -m 'add package.json'")
+      machine.succeed("git -C /tmp/kitty push origin")
+
+      machine.succeed(f"echo '{accessToken}' > /etc/renovate-token")
+      machine.systemctl("start renovate.service")
+
+      machine.succeed("tea pulls list --repo meow/kitty | grep 'Configure Renovate'")
+      machine.succeed("tea pulls merge --repo meow/kitty 1")
+
+      machine.systemctl("start renovate.service")
+    '';
+  }
+)
diff --git a/nixos/tests/restart-by-activation-script.nix b/nixos/tests/restart-by-activation-script.nix
index 0ac079e0101e0..fdab892b72183 100644
--- a/nixos/tests/restart-by-activation-script.nix
+++ b/nixos/tests/restart-by-activation-script.nix
@@ -7,6 +7,8 @@ import ./make-test-python.nix ({ pkgs, ...} : {
   nodes.machine = { pkgs, ... }: {
     imports = [ ../modules/profiles/minimal.nix ];
 
+    system.switch.enable = true;
+
     systemd.services.restart-me = {
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
diff --git a/nixos/tests/restic.nix b/nixos/tests/restic.nix
index 4111720cf6be8..49631d27ca801 100644
--- a/nixos/tests/restic.nix
+++ b/nixos/tests/restic.nix
@@ -4,6 +4,7 @@ import ./make-test-python.nix (
   let
     remoteRepository = "/root/restic-backup";
     remoteFromFileRepository = "/root/restic-backup-from-file";
+    remoteInhibitTestRepository = "/root/restic-backup-inhibit-test";
     remoteNoInitRepository = "/root/restic-backup-no-init";
     rcloneRepository = "rclone:local:/root/restic-rclone-backup";
 
@@ -26,6 +27,7 @@ import ./make-test-python.nix (
         echo some_other_file > $out/some_other_file
         mkdir $out/a_dir
         echo a_file > $out/a_dir/a_file
+        echo a_file_2 > $out/a_dir/a_file_2
       '';
     };
 
@@ -61,11 +63,20 @@ import ./make-test-python.nix (
               inherit passwordFile exclude pruneOpts;
               initialize = true;
               repositoryFile = pkgs.writeText "repositoryFile" remoteFromFileRepository;
-              paths = [ "/opt/a_dir" ];
+              paths = [
+                "/opt/a_dir/a_file"
+                "/opt/a_dir/a_file_2"
+              ];
               dynamicFilesFrom = ''
                 find /opt -mindepth 1 -maxdepth 1 ! -name a_dir # all files in /opt except for a_dir
               '';
             };
+            inhibit-test = {
+              inherit passwordFile paths exclude pruneOpts;
+              repository = remoteInhibitTestRepository;
+              initialize = true;
+              inhibitsSleep = true;
+            };
             remote-noinit-backup = {
               inherit passwordFile exclude pruneOpts paths;
               initialize = false;
@@ -137,15 +148,18 @@ import ./make-test-python.nix (
           # test that remote-from-file-backup produces a snapshot
           "systemctl start restic-backups-remote-from-file-backup.service",
           'restic-remote-from-file-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+          "mkdir /tmp/restore-2",
+          "restic-remote-from-file-backup restore latest -t /tmp/restore-2",
+          "diff -ru ${testDir} /tmp/restore-2/opt",
 
           # test that remote-noinit-backup produces a snapshot
           "systemctl start restic-backups-remote-noinit-backup.service",
           'restic-remote-noinit-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
           # test that restoring that snapshot produces the same directory
-          "mkdir /tmp/restore-2",
-          "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} restore latest -t /tmp/restore-2",
-          "diff -ru ${testDir} /tmp/restore-2/opt",
+          "mkdir /tmp/restore-3",
+          "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} restore latest -t /tmp/restore-3",
+          "diff -ru ${testDir} /tmp/restore-3/opt",
 
           # test that rclonebackup produces a snapshot
           "systemctl start restic-backups-rclonebackup.service",
@@ -190,6 +204,13 @@ import ./make-test-python.nix (
           'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
       )
+
+      # test that the inhibit option is working
+      server.systemctl("start --no-block restic-backups-inhibit-test.service")
+      server.wait_until_succeeds(
+          "systemd-inhibit --no-legend --no-pager | grep -q restic",
+          5
+      )
     '';
   }
 )
diff --git a/nixos/tests/rke2/default.nix b/nixos/tests/rke2/default.nix
new file mode 100644
index 0000000000000..e8a5f382b735f
--- /dev/null
+++ b/nixos/tests/rke2/default.nix
@@ -0,0 +1,13 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+, lib ? pkgs.lib
+}:
+let
+  allRKE2 = lib.filterAttrs (n: _: lib.strings.hasPrefix "rke2" n) pkgs;
+in
+{
+  # Run a single node rke2 cluster and verify a pod can run
+  singleNode = lib.mapAttrs (_: rke2: import ./single-node.nix { inherit system pkgs rke2; }) allRKE2;
+  # Run a multi-node rke2 cluster and verify pod networking works across nodes
+  multiNode = lib.mapAttrs (_: rke2: import ./multi-node.nix { inherit system pkgs rke2; }) allRKE2;
+}
diff --git a/nixos/tests/rke2/multi-node.nix b/nixos/tests/rke2/multi-node.nix
new file mode 100644
index 0000000000000..ddf0b60f6fba4
--- /dev/null
+++ b/nixos/tests/rke2/multi-node.nix
@@ -0,0 +1,176 @@
+import ../make-test-python.nix ({ pkgs, lib, rke2, ... }:
+  let
+    pauseImage = pkgs.dockerTools.streamLayeredImage {
+      name = "test.local/pause";
+      tag = "local";
+      contents = pkgs.buildEnv {
+        name = "rke2-pause-image-env";
+        paths = with pkgs; [ tini bashInteractive coreutils socat ];
+      };
+      config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
+    };
+    # A daemonset that responds 'server' on port 8000
+    networkTestDaemonset = pkgs.writeText "test.yml" ''
+      apiVersion: apps/v1
+      kind: DaemonSet
+      metadata:
+        name: test
+        labels:
+          name: test
+      spec:
+        selector:
+          matchLabels:
+            name: test
+        template:
+          metadata:
+            labels:
+              name: test
+          spec:
+            containers:
+            - name: test
+              image: test.local/pause:local
+              imagePullPolicy: Never
+              resources:
+                limits:
+                  memory: 20Mi
+              command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo server"]
+    '';
+    tokenFile = pkgs.writeText "token" "p@s$w0rd";
+    agentTokenFile = pkgs.writeText "agent-token" "p@s$w0rd";
+  in
+  {
+    name = "${rke2.name}-multi-node";
+    meta.maintainers = rke2.meta.maintainers;
+
+    nodes = {
+      server1 = { pkgs, ... }: {
+        networking.firewall.enable = false;
+        networking.useDHCP = false;
+        networking.defaultGateway = "192.168.1.1";
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+          { address = "192.168.1.1"; prefixLength = 24; }
+        ];
+
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.rke2 = {
+          enable = true;
+          role = "server";
+          inherit tokenFile;
+          inherit agentTokenFile;
+          nodeName = "${rke2.name}-server1";
+          package = rke2;
+          nodeIP = "192.168.1.1";
+          disable = [
+            "rke2-coredns"
+            "rke2-metrics-server"
+            "rke2-ingress-nginx"
+          ];
+          extraFlags = [
+            "--cluster-reset"
+          ];
+        };
+      };
+
+      server2 = { pkgs, ... }: {
+        networking.firewall.enable = false;
+        networking.useDHCP = false;
+        networking.defaultGateway = "192.168.1.2";
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+          { address = "192.168.1.2"; prefixLength = 24; }
+        ];
+
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.rke2 = {
+          enable = true;
+          role = "server";
+          serverAddr = "https://192.168.1.1:6443";
+          inherit tokenFile;
+          inherit agentTokenFile;
+          nodeName = "${rke2.name}-server2";
+          package = rke2;
+          nodeIP = "192.168.1.2";
+          disable = [
+            "rke2-coredns"
+            "rke2-metrics-server"
+            "rke2-ingress-nginx"
+          ];
+        };
+      };
+
+      agent1 = { pkgs, ... }: {
+        networking.firewall.enable = false;
+        networking.useDHCP = false;
+        networking.defaultGateway = "192.168.1.3";
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+          { address = "192.168.1.3"; prefixLength = 24; }
+        ];
+
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.rke2 = {
+          enable = true;
+          role = "agent";
+          tokenFile = agentTokenFile;
+          serverAddr = "https://192.168.1.2:6443";
+          nodeName = "${rke2.name}-agent1";
+          package = rke2;
+          nodeIP = "192.168.1.3";
+        };
+      };
+    };
+
+    testScript = let
+      kubectl = "${pkgs.kubectl}/bin/kubectl --kubeconfig=/etc/rancher/rke2/rke2.yaml";
+      ctr = "${pkgs.containerd}/bin/ctr -a /run/k3s/containerd/containerd.sock";
+      jq = "${pkgs.jq}/bin/jq";
+      ping = "${pkgs.iputils}/bin/ping";
+    in ''
+      machines = [server1, server2, agent1]
+
+      for machine in machines:
+          machine.start()
+          machine.wait_for_unit("rke2")
+
+      # wait for the agent to show up
+      server1.succeed("${kubectl} get node ${rke2.name}-agent1")
+
+      for machine in machines:
+          machine.succeed("${pauseImage} | ${ctr} image import -")
+
+      server1.succeed("${kubectl} cluster-info")
+      server1.wait_until_succeeds("${kubectl} get serviceaccount default")
+
+      # Now create a pod on each node via a daemonset and verify they can talk to each other.
+      server1.succeed("${kubectl} apply -f ${networkTestDaemonset}")
+      server1.wait_until_succeeds(
+          f'[ "$(${kubectl} get ds test -o json | ${jq} .status.numberReady)" -eq {len(machines)} ]'
+      )
+
+      # Get pod IPs
+      pods = server1.succeed("${kubectl} get po -o json | ${jq} '.items[].metadata.name' -r").splitlines()
+      pod_ips = [
+          server1.succeed(f"${kubectl} get po {n} -o json | ${jq} '.status.podIP' -cr").strip() for n in pods
+      ]
+
+      # Verify each server can ping each pod ip
+      for pod_ip in pod_ips:
+          server1.succeed(f"${ping} -c 1 {pod_ip}")
+          agent1.succeed(f"${ping} -c 1 {pod_ip}")
+
+      # Verify the pods can talk to each other
+      resp = server1.wait_until_succeeds(f"${kubectl} exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
+      assert resp.strip() == "server"
+      resp = server1.wait_until_succeeds(f"${kubectl} exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
+      assert resp.strip() == "server"
+
+      # Cleanup
+      server1.succeed("${kubectl} delete -f ${networkTestDaemonset}")
+      for machine in machines:
+          machine.shutdown()
+    '';
+  })
diff --git a/nixos/tests/rke2/single-node.nix b/nixos/tests/rke2/single-node.nix
new file mode 100644
index 0000000000000..5a512eacca0f1
--- /dev/null
+++ b/nixos/tests/rke2/single-node.nix
@@ -0,0 +1,75 @@
+import ../make-test-python.nix ({ pkgs, lib, rke2, ... }:
+  let
+    pauseImage = pkgs.dockerTools.streamLayeredImage {
+      name = "test.local/pause";
+      tag = "local";
+      contents = pkgs.buildEnv {
+        name = "rke2-pause-image-env";
+        paths = with pkgs; [ tini (hiPrio coreutils) busybox ];
+      };
+      config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
+    };
+    testPodYaml = pkgs.writeText "test.yaml" ''
+      apiVersion: v1
+      kind: Pod
+      metadata:
+        name: test
+      spec:
+        containers:
+        - name: test
+          image: test.local/pause:local
+          imagePullPolicy: Never
+          command: ["sh", "-c", "sleep inf"]
+    '';
+  in
+  {
+    name = "${rke2.name}-single-node";
+    meta.maintainers = rke2.meta.maintainers;
+
+    nodes.machine = { pkgs, ... }: {
+      networking.firewall.enable = false;
+      networking.useDHCP = false;
+      networking.defaultGateway = "192.168.1.1";
+      networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+        { address = "192.168.1.1"; prefixLength = 24; }
+      ];
+
+      virtualisation.memorySize = 1536;
+      virtualisation.diskSize = 4096;
+
+      services.rke2 = {
+        enable = true;
+        role = "server";
+        package = rke2;
+        nodeIP = "192.168.1.1";
+        disable = [
+          "rke2-coredns"
+          "rke2-metrics-server"
+          "rke2-ingress-nginx"
+        ];
+        extraFlags = [
+          "--cluster-reset"
+        ];
+      };
+    };
+
+    testScript = let
+      kubectl = "${pkgs.kubectl}/bin/kubectl --kubeconfig=/etc/rancher/rke2/rke2.yaml";
+      ctr = "${pkgs.containerd}/bin/ctr -a /run/k3s/containerd/containerd.sock";
+    in ''
+      start_all()
+
+      machine.wait_for_unit("rke2")
+      machine.succeed("${kubectl} cluster-info")
+      machine.wait_until_succeeds(
+        "${pauseImage} | ${ctr} -n k8s.io image import -"
+      )
+
+      machine.wait_until_succeeds("${kubectl} get serviceaccount default")
+      machine.succeed("${kubectl} apply -f ${testPodYaml}")
+      machine.succeed("${kubectl} wait --for 'condition=Ready' pod/test")
+      machine.succeed("${kubectl} delete -f ${testPodYaml}")
+
+      machine.shutdown()
+    '';
+  })
diff --git a/nixos/tests/rosenpass.nix b/nixos/tests/rosenpass.nix
index ec4046c8c035b..5ef6e55f53746 100644
--- a/nixos/tests/rosenpass.nix
+++ b/nixos/tests/rosenpass.nix
@@ -44,7 +44,8 @@ in
           enable = true;
           networks."rosenpass" = {
             matchConfig.Name = deviceName;
-            networkConfig.IPForward = true;
+            networkConfig.IPv4Forwarding = true;
+            networkConfig.IPv6Forwarding = true;
             address = [ "${peer.ip}/64" ];
           };
 
@@ -74,10 +75,8 @@ in
           wireguardConfig.ListenPort = server.wg.listen;
           wireguardPeers = [
             {
-              wireguardPeerConfig = {
-                AllowedIPs = [ "::/0" ];
-                PublicKey = client.wg.public;
-              };
+              AllowedIPs = [ "::/0" ];
+              PublicKey = client.wg.public;
             }
           ];
         };
@@ -97,11 +96,9 @@ in
 
         systemd.network.netdevs."10-${deviceName}".wireguardPeers = [
           {
-            wireguardPeerConfig = {
-              AllowedIPs = [ "::/0" ];
-              PublicKey = server.wg.public;
-              Endpoint = "server:${builtins.toString server.wg.listen}";
-            };
+            AllowedIPs = [ "::/0" ];
+            PublicKey = server.wg.public;
+            Endpoint = "server:${builtins.toString server.wg.listen}";
           }
         ];
 
diff --git a/nixos/tests/rtorrent.nix b/nixos/tests/rtorrent.nix
new file mode 100644
index 0000000000000..77e78b549a96f
--- /dev/null
+++ b/nixos/tests/rtorrent.nix
@@ -0,0 +1,25 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  port = 50001;
+in
+{
+  name = "rtorrent";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ thiagokokada ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    services.rtorrent = {
+      inherit port;
+      enable = true;
+    };
+  };
+
+  testScript = /* python */ ''
+    machine.start()
+    machine.wait_for_unit("rtorrent.service")
+    machine.wait_for_open_port(${toString port})
+
+    machine.succeed("nc -z localhost ${toString port}")
+  '';
+})
diff --git a/nixos/tests/samba.nix b/nixos/tests/samba.nix
index 252c3dd9c76e9..2501fea2d3768 100644
--- a/nixos/tests/samba.nix
+++ b/nixos/tests/samba.nix
@@ -1,46 +1,47 @@
-import ./make-test-python.nix ({ pkgs, ... }:
-
-{
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "samba";
 
-  meta.maintainers = [ pkgs.lib.maintainers.eelco ];
+  meta.maintainers = [ lib.maintainers.anthonyroussel ];
 
-  nodes =
-    { client =
-        { pkgs, ... }:
-        { virtualisation.fileSystems =
-            { "/public" = {
-                fsType = "cifs";
-                device = "//server/public";
-                options = [ "guest" ];
-              };
-            };
+  nodes = {
+    client =
+      { ... }:
+      {
+        virtualisation.fileSystems = {
+          "/public" = {
+            fsType = "cifs";
+            device = "//server/public";
+            options = [ "guest" ];
+          };
         };
+      };
 
-      server =
-        { ... }:
-        { services.samba.enable = true;
-          services.samba.openFirewall = true;
-          services.samba.shares.public =
-            { path = "/public";
+    server =
+      { ... }:
+      {
+        services.samba = {
+          enable = true;
+          openFirewall = true;
+          settings = {
+            "public" = {
+              "path" = "/public";
               "read only" = true;
-              browseable = "yes";
+              "browseable" = "yes";
               "guest ok" = "yes";
-              comment = "Public samba share.";
+              "comment" = "Public samba share.";
             };
+          };
         };
-    };
-
-  # client# [    4.542997] mount[777]: sh: systemd-ask-password: command not found
+      };
+  };
 
-  testScript =
-    ''
-      server.start()
-      server.wait_for_unit("samba.target")
-      server.succeed("mkdir -p /public; echo bar > /public/foo")
+  testScript = ''
+    server.start()
+    server.wait_for_unit("samba.target")
+    server.succeed("mkdir -p /public; echo bar > /public/foo")
 
-      client.start()
-      client.wait_for_unit("remote-fs.target")
-      client.succeed("[[ $(cat /public/foo) = bar ]]")
-    '';
+    client.start()
+    client.wait_for_unit("remote-fs.target")
+    client.succeed("[[ $(cat /public/foo) = bar ]]")
+  '';
 })
diff --git a/nixos/tests/scion/freestanding-deployment/default.nix b/nixos/tests/scion/freestanding-deployment/default.nix
index 0c9686fbfbadf..e060f9c312709 100644
--- a/nixos/tests/scion/freestanding-deployment/default.nix
+++ b/nixos/tests/scion/freestanding-deployment/default.nix
@@ -156,17 +156,51 @@ in
     # List of AS instances
     machines = [scion01, scion02, scion03, scion04, scion05]
 
+    # Functions to avoid many for loops
+    def start(allow_reboot=False):
+        for i in machines:
+            i.start(allow_reboot=allow_reboot)
+
+    def wait_for_unit(service_name):
+        for i in machines:
+            i.wait_for_unit(service_name)
+
+    def succeed(command):
+        for i in machines:
+            i.succeed(command)
+
+    def reboot():
+        for i in machines:
+            i.reboot()
+
+    def crash():
+        for i in machines:
+            i.crash()
+
+    # Start all machines, allowing reboot for later
+    start(allow_reboot=True)
+
     # Wait for scion-control.service on all instances
-    for i in machines:
-        i.wait_for_unit("scion-control.service")
+    wait_for_unit("scion-control.service")
 
     # Execute pingAll command on all instances
-    for i in machines:
-        i.succeed("${pingAll} >&2")
-
-    # Restart scion-dispatcher and ping again to test robustness
-    for i in machines:
-        i.succeed("systemctl restart scion-dispatcher >&2")
-        i.succeed("${pingAll} >&2")
+    succeed("${pingAll} >&2")
+
+    # Restart all scion services and ping again to test robustness
+    succeed("systemctl restart scion-* >&2")
+    succeed("${pingAll} >&2")
+
+    # Reboot machines, wait for service, and ping again
+    reboot()
+    wait_for_unit("scion-control.service")
+    succeed("${pingAll} >&2")
+
+    # Crash, start, wait for service, and ping again
+    crash()
+    start()
+    wait_for_unit("scion-control.service")
+    succeed("pkill -9 scion-* >&2")
+    wait_for_unit("scion-control.service")
+    succeed("${pingAll} >&2")
   '';
 })
diff --git a/nixos/tests/searx.nix b/nixos/tests/searx.nix
index 02a88f690db78..0008424f068b2 100644
--- a/nixos/tests/searx.nix
+++ b/nixos/tests/searx.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ...} :
+{ pkgs, ... }:
 
 {
   name = "searx";
@@ -7,108 +7,108 @@ import ./make-test-python.nix ({ pkgs, ...} :
   };
 
   # basic setup: searx running the built-in webserver
-  nodes.base = { ... }: {
-    imports = [ ../modules/profiles/minimal.nix ];
-
-    services.searx = {
-      enable = true;
-      environmentFile = pkgs.writeText "secrets" ''
-        WOLFRAM_API_KEY  = sometoken
-        SEARX_SECRET_KEY = somesecret
-      '';
+  nodes.base =
+    { ... }:
+    {
+      services.searx = {
+        enable = true;
+        environmentFile = pkgs.writeText "secrets" ''
+          WOLFRAM_API_KEY  = sometoken
+          SEARX_SECRET_KEY = somesecret
+        '';
 
-      settings.server =
-        { port = "8080";
+        settings.server = {
+          port = "8080";
           bind_address = "0.0.0.0";
           secret_key = "@SEARX_SECRET_KEY@";
         };
-      settings.engines = [
-        { name = "wolframalpha";
-          api_key = "@WOLFRAM_API_KEY@";
-          engine = "wolframalpha_api";
-        }
-        { name = "startpage";
-          shortcut = "start";
-        }
-      ];
-    };
+        settings.engines = [
+          {
+            name = "wolframalpha";
+            api_key = "@WOLFRAM_API_KEY@";
+            engine = "wolframalpha_api";
+          }
+          {
+            name = "startpage";
+            shortcut = "start";
+          }
+        ];
+      };
 
-  };
+    };
 
   # fancy setup: run in uWSGI and use nginx as proxy
-  nodes.fancy = { config, ... }: {
-    imports = [ ../modules/profiles/minimal.nix ];
-
-    services.searx = {
-      enable = true;
-      # searx refuses to run if unchanged
-      settings.server.secret_key = "somesecret";
-
-      runInUwsgi = true;
-      uwsgiConfig = {
-        # serve using the uwsgi protocol
-        socket = "/run/searx/uwsgi.sock";
-        chmod-socket = "660";
-
-        # use /searx as url "mountpoint"
-        mount = "/searx=searx.webapp:application";
-        module = "";
-        manage-script-name = true;
+  nodes.fancy =
+    { config, ... }:
+    {
+      services.searx = {
+        enable = true;
+        # searx refuses to run if unchanged
+        settings.server.secret_key = "somesecret";
+
+        runInUwsgi = true;
+        uwsgiConfig = {
+          # serve using the uwsgi protocol
+          socket = "/run/searx/uwsgi.sock";
+          chmod-socket = "660";
+
+          # use /searx as url "mountpoint"
+          mount = "/searx=searx.webapp:application";
+          module = "";
+          manage-script-name = true;
+        };
       };
-    };
 
-    # use nginx as reverse proxy
-    services.nginx.enable = true;
-    services.nginx.virtualHosts.localhost = {
-      locations."/searx".extraConfig =
-        ''
+      # use nginx as reverse proxy
+      services.nginx.enable = true;
+      services.nginx.virtualHosts.localhost = {
+        locations."/searx".extraConfig = ''
           include ${pkgs.nginx}/conf/uwsgi_params;
           uwsgi_pass unix:/run/searx/uwsgi.sock;
         '';
-      locations."/searx/static/".alias = "${config.services.searx.package}/share/static/";
-    };
-
-    # allow nginx access to the searx socket
-    users.users.nginx.extraGroups = [ "searx" ];
-
-  };
-
-  testScript =
-    ''
-      base.start()
-
-      with subtest("Settings have been merged"):
-          base.wait_for_unit("searx-init")
-          base.wait_for_file("/run/searx/settings.yml")
-          output = base.succeed(
-              "${pkgs.yq-go}/bin/yq eval"
-              " '.engines[] | select(.name==\"startpage\") | .shortcut'"
-              " /run/searx/settings.yml"
-          ).strip()
-          assert output == "start", "Settings not merged"
+        locations."/searx/static/".alias = "${config.services.searx.package}/share/static/";
+      };
 
-      with subtest("Environment variables have been substituted"):
-          base.succeed("grep -q somesecret /run/searx/settings.yml")
-          base.succeed("grep -q sometoken /run/searx/settings.yml")
-          base.copy_from_vm("/run/searx/settings.yml")
+      # allow nginx access to the searx socket
+      users.users.nginx.extraGroups = [ "searx" ];
 
-      with subtest("Basic setup is working"):
-          base.wait_for_open_port(8080)
-          base.wait_for_unit("searx")
-          base.succeed(
-              "${pkgs.curl}/bin/curl --fail http://localhost:8080"
-          )
-          base.shutdown()
+    };
 
-      with subtest("Nginx+uWSGI setup is working"):
-          fancy.start()
-          fancy.wait_for_open_port(80)
-          fancy.wait_for_unit("uwsgi")
-          fancy.succeed(
-              "${pkgs.curl}/bin/curl --fail http://localhost/searx >&2"
-          )
-          fancy.succeed(
-              "${pkgs.curl}/bin/curl --fail http://localhost/searx/static/themes/simple/js/leaflet.js >&2"
-          )
-    '';
-})
+  testScript = ''
+    base.start()
+
+    with subtest("Settings have been merged"):
+        base.wait_for_unit("searx-init")
+        base.wait_for_file("/run/searx/settings.yml")
+        output = base.succeed(
+            "${pkgs.yq-go}/bin/yq eval"
+            " '.engines[] | select(.name==\"startpage\") | .shortcut'"
+            " /run/searx/settings.yml"
+        ).strip()
+        assert output == "start", "Settings not merged"
+
+    with subtest("Environment variables have been substituted"):
+        base.succeed("grep -q somesecret /run/searx/settings.yml")
+        base.succeed("grep -q sometoken /run/searx/settings.yml")
+        base.copy_from_vm("/run/searx/settings.yml")
+
+    with subtest("Basic setup is working"):
+        base.wait_for_open_port(8080)
+        base.wait_for_unit("searx")
+        base.succeed(
+            "${pkgs.curl}/bin/curl --fail http://localhost:8080"
+        )
+        base.shutdown()
+
+    with subtest("Nginx+uWSGI setup is working"):
+        fancy.start()
+        fancy.wait_for_open_port(80)
+        fancy.wait_for_unit("uwsgi")
+        fancy.succeed(
+            "${pkgs.curl}/bin/curl --fail http://localhost/searx >&2"
+        )
+        fancy.succeed(
+            "${pkgs.curl}/bin/curl --fail http://localhost/searx/static/themes/simple/js/leaflet.js >&2"
+        )
+  '';
+}
diff --git a/nixos/tests/seatd.nix b/nixos/tests/seatd.nix
index 138a6cb1cf44c..9178492fdb0ef 100644
--- a/nixos/tests/seatd.nix
+++ b/nixos/tests/seatd.nix
@@ -39,7 +39,7 @@ in
           dwl -s 'foot touch /tmp/foot_started'
     '';
 
-    hardware.opengl.enable = true;
+    hardware.graphics.enable = true;
     virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
     services.seatd.enable = true;
   };
diff --git a/nixos/tests/sfxr-qt.nix b/nixos/tests/sfxr-qt.nix
index 976b9b11fc66a..cca3e5f3ea765 100644
--- a/nixos/tests/sfxr-qt.nix
+++ b/nixos/tests/sfxr-qt.nix
@@ -10,7 +10,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     ];
 
     services.xserver.enable = true;
-    sound.enable = true;
     environment.systemPackages = [ pkgs.sfxr-qt ];
   };
 
diff --git a/nixos/tests/shattered-pixel-dungeon.nix b/nixos/tests/shattered-pixel-dungeon.nix
index b4ac1670b5cad..cabf192c6002f 100644
--- a/nixos/tests/shattered-pixel-dungeon.nix
+++ b/nixos/tests/shattered-pixel-dungeon.nix
@@ -10,7 +10,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     ];
 
     services.xserver.enable = true;
-    sound.enable = true;
     environment.systemPackages = [ pkgs.shattered-pixel-dungeon ];
   };
 
diff --git a/nixos/tests/shiori.nix b/nixos/tests/shiori.nix
index d0f68b903f8c3..ba9b42235df28 100644
--- a/nixos/tests/shiori.nix
+++ b/nixos/tests/shiori.nix
@@ -1,80 +1,81 @@
-import ./make-test-python.nix ({ pkgs, lib, ...}:
+import ./make-test-python.nix ({ pkgs, lib, ... }:
 
-{
-  name = "shiori";
-  meta.maintainers = with lib.maintainers; [ minijackson ];
+  {
+    name = "shiori";
+    meta.maintainers = with lib.maintainers; [ minijackson ];
 
-  nodes.machine =
-    { ... }:
-    { services.shiori.enable = true; };
+    nodes.machine = { ... }: { services.shiori.enable = true; };
 
-  testScript = let
-    authJSON = pkgs.writeText "auth.json" (builtins.toJSON {
-      username = "shiori";
-      password = "gopher";
-      owner = true;
-    });
+    testScript = let
+      authJSON = pkgs.writeText "auth.json" (builtins.toJSON {
+        username = "shiori";
+        password = "gopher";
+        owner = true;
+      });
 
-  insertBookmark = {
-    url = "http://example.org";
-    title = "Example Bookmark";
-  };
+      insertBookmark = {
+        url = "http://example.org";
+        title = "Example Bookmark";
+      };
 
-  insertBookmarkJSON = pkgs.writeText "insertBookmark.json" (builtins.toJSON insertBookmark);
-  in ''
-    import json
+      insertBookmarkJSON =
+        pkgs.writeText "insertBookmark.json" (builtins.toJSON insertBookmark);
+    in ''
+      #import json
 
-    machine.wait_for_unit("shiori.service")
-    machine.wait_for_open_port(8080)
-    machine.succeed("curl --fail http://localhost:8080/")
-    machine.succeed("curl --fail --location http://localhost:8080/ | grep -i shiori")
+      machine.wait_for_unit("shiori.service")
+      machine.wait_for_open_port(8080)
+      machine.succeed("curl --fail http://localhost:8080/")
+      machine.succeed("curl --fail --location http://localhost:8080/ | grep -i shiori")
 
-    with subtest("login"):
-        auth_json = machine.succeed(
-            "curl --fail --location http://localhost:8080/api/login "
-            "-X POST -H 'Content-Type:application/json' -d @${authJSON}"
-        )
-        auth_ret = json.loads(auth_json)
-        session_id = auth_ret["session"]
+      # The test code below no longer works because the API authentication has changed.
 
-    with subtest("bookmarks"):
-        with subtest("first use no bookmarks"):
-            bookmarks_json = machine.succeed(
-                (
-                    "curl --fail --location http://localhost:8080/api/bookmarks "
-                    "-H 'X-Session-Id:{}'"
-                ).format(session_id)
-            )
+      #with subtest("login"):
+      #    auth_json = machine.succeed(
+      #        "curl --fail --location http://localhost:8080/api/login "
+      #        "-X POST -H 'Content-Type:application/json' -d @${authJSON}"
+      #    )
+      #    auth_ret = json.loads(auth_json)
+      #    session_id = auth_ret["session"]
 
-            if json.loads(bookmarks_json)["bookmarks"] != []:
-                raise Exception("Shiori have a bookmark on first use")
+      #with subtest("bookmarks"):
+      #    with subtest("first use no bookmarks"):
+      #        bookmarks_json = machine.succeed(
+      #            (
+      #                "curl --fail --location http://localhost:8080/api/bookmarks "
+      #                "-H 'X-Session-Id:{}'"
+      #            ).format(session_id)
+      #        )
 
-        with subtest("insert bookmark"):
-            machine.succeed(
-                (
-                    "curl --fail --location http://localhost:8080/api/bookmarks "
-                    "-X POST -H 'X-Session-Id:{}' "
-                    "-H 'Content-Type:application/json' -d @${insertBookmarkJSON}"
-                ).format(session_id)
-            )
+      #        if json.loads(bookmarks_json)["bookmarks"] != []:
+      #            raise Exception("Shiori have a bookmark on first use")
 
-        with subtest("get inserted bookmark"):
-            bookmarks_json = machine.succeed(
-                (
-                    "curl --fail --location http://localhost:8080/api/bookmarks "
-                    "-H 'X-Session-Id:{}'"
-                ).format(session_id)
-            )
+      #    with subtest("insert bookmark"):
+      #        machine.succeed(
+      #            (
+      #                "curl --fail --location http://localhost:8080/api/bookmarks "
+      #                "-X POST -H 'X-Session-Id:{}' "
+      #                "-H 'Content-Type:application/json' -d @${insertBookmarkJSON}"
+      #            ).format(session_id)
+      #        )
 
-            bookmarks = json.loads(bookmarks_json)["bookmarks"]
-            if len(bookmarks) != 1:
-                raise Exception("Shiori didn't save the bookmark")
+      #    with subtest("get inserted bookmark"):
+      #        bookmarks_json = machine.succeed(
+      #            (
+      #                "curl --fail --location http://localhost:8080/api/bookmarks "
+      #                "-H 'X-Session-Id:{}'"
+      #            ).format(session_id)
+      #        )
 
-            bookmark = bookmarks[0]
-            if (
-                bookmark["url"] != "${insertBookmark.url}"
-                or bookmark["title"] != "${insertBookmark.title}"
-            ):
-                raise Exception("Inserted bookmark doesn't have same URL or title")
-  '';
-})
+      #        bookmarks = json.loads(bookmarks_json)["bookmarks"]
+      #        if len(bookmarks) != 1:
+      #            raise Exception("Shiori didn't save the bookmark")
+
+      #        bookmark = bookmarks[0]
+      #        if (
+      #            bookmark["url"] != "${insertBookmark.url}"
+      #            or bookmark["title"] != "${insertBookmark.title}"
+      #        ):
+      #            raise Exception("Inserted bookmark doesn't have same URL or title")
+    '';
+  })
diff --git a/nixos/tests/simple.nix b/nixos/tests/simple.nix
index c36287b4e843b..afd49d481a65d 100644
--- a/nixos/tests/simple.nix
+++ b/nixos/tests/simple.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ pkgs, ...} : {
   name = "simple";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco ];
+    maintainers = [ ];
   };
 
   nodes.machine = { ... }: {
diff --git a/nixos/tests/slimserver.nix b/nixos/tests/slimserver.nix
index 95cbdcf4a2a15..abc0cd2ef1812 100644
--- a/nixos/tests/slimserver.nix
+++ b/nixos/tests/slimserver.nix
@@ -8,8 +8,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
       enable = true;
       extraArguments = "-s 127.0.0.1 -d slimproto=info";
     };
-    sound.enable = true;
-    boot.initrd.kernelModules = ["snd-dummy"];
+    boot.kernelModules = ["snd-dummy"];
   };
 
   testScript =
diff --git a/nixos/tests/smokeping.nix b/nixos/tests/smokeping.nix
index 04f8139642918..fe1ecad9969b0 100644
--- a/nixos/tests/smokeping.nix
+++ b/nixos/tests/smokeping.nix
@@ -11,7 +11,6 @@ import ./make-test-python.nix ({ pkgs, ...} : {
         networking.domain = "example.com"; # FQDN: sm.example.com
         services.smokeping = {
           enable = true;
-          port = 8081;
           mailHost = "127.0.0.2";
           probeConfig = ''
             + FPing
@@ -25,12 +24,19 @@ import ./make-test-python.nix ({ pkgs, ...} : {
   testScript = ''
     start_all()
     sm.wait_for_unit("smokeping")
-    sm.wait_for_unit("thttpd")
+    sm.wait_for_unit("nginx")
     sm.wait_for_file("/var/lib/smokeping/data/Local/LocalMachine.rrd")
-    sm.succeed("curl -s -f localhost:8081/smokeping.fcgi?target=Local")
+    sm.succeed("curl -s -f localhost/smokeping.fcgi?target=Local")
     # Check that there's a helpful page without explicit path as well.
-    sm.succeed("curl -s -f localhost:8081")
+    sm.succeed("curl -s -f localhost")
     sm.succeed("ls /var/lib/smokeping/cache/Local/LocalMachine_mini.png")
     sm.succeed("ls /var/lib/smokeping/cache/index.html")
+
+    # stop and start the service like nixos-rebuild would do
+    # see https://github.com/NixOS/nixpkgs/issues/265953)
+    sm.succeed("systemctl stop smokeping")
+    sm.succeed("systemctl start smokeping")
+    # ensure all services restarted properly
+    sm.succeed("systemctl --failed | grep -q '0 loaded units listed'")
   '';
 })
diff --git a/nixos/tests/snapper.nix b/nixos/tests/snapper.nix
index 674523584fdaa..0369419930f15 100644
--- a/nixos/tests/snapper.nix
+++ b/nixos/tests/snapper.nix
@@ -19,7 +19,9 @@ import ./make-test-python.nix ({ ... }:
     services.snapper.filters = "/nix";
   };
 
-  testScript = ''
+  testScript = { nodes, ... }: let
+    inherit (nodes.machine.services.snapper) snapshotRootOnBoot;
+  in ''
     machine.succeed("btrfs subvolume create /home/.snapshots")
     machine.succeed("snapper -c home list")
     machine.succeed("snapper -c home create --description empty")
@@ -31,5 +33,6 @@ import ./make-test-python.nix ({ ... }:
     machine.succeed("snapper -c home delete 2")
     machine.succeed("systemctl --wait start snapper-timeline.service")
     machine.succeed("systemctl --wait start snapper-cleanup.service")
+    machine.${if snapshotRootOnBoot then "succeed" else "fail"}("systemctl cat snapper-boot.service")
   '';
 })
diff --git a/nixos/tests/sogo.nix b/nixos/tests/sogo.nix
index e9059a2ab7734..84d219659bdb2 100644
--- a/nixos/tests/sogo.nix
+++ b/nixos/tests/sogo.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ pkgs, ... }: {
   name = "sogo";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [];
+    maintainers = [ ];
   };
 
   nodes = {
diff --git a/nixos/tests/soju.nix b/nixos/tests/soju.nix
index 23da36f7b3aba..f13c447fb5f6b 100644
--- a/nixos/tests/soju.nix
+++ b/nixos/tests/soju.nix
@@ -8,7 +8,7 @@ let
 in
 {
   name = "soju";
-  meta.maintainers = with lib.maintainers; [ Benjamin-L ];
+  meta.maintainers = [ ];
 
   nodes.machine = { ... }: {
     services.soju = {
diff --git a/nixos/tests/sourcehut/nodes/common.nix b/nixos/tests/sourcehut/nodes/common.nix
index f0a81358f9721..1326a9e900576 100644
--- a/nixos/tests/sourcehut/nodes/common.nix
+++ b/nixos/tests/sourcehut/nodes/common.nix
@@ -44,37 +44,37 @@ in
       pgp-privkey = toString (pkgs.writeText "sourcehut.pgp-privkey" ''
         -----BEGIN PGP PRIVATE KEY BLOCK-----
 
-        lFgEYqDRORYJKwYBBAHaRw8BAQdAehGoy36FUx2OesYm07be2rtLyvR5Pb/ltstd
-        Gk7hYQoAAP9X4oPmxxrHN8LewBpWITdBomNqlHoiP7mI0nz/BOPJHxEktDZuaXhv
+        lFgEZrFBKRYJKwYBBAHaRw8BAQdAS1Ffiytk0h0z0jfaT3qyiDUV/plVIUwOg1Yr
+        AXP2YmsAAP0W6QMC3G2G41rzCGLeSHeGibor1+XuxvcwUpVdW7ge+BH/tDZuaXhv
         cy90ZXN0cy9zb3VyY2VodXQgPHJvb3QraHV0QHNvdXJjZWh1dC5sb2NhbGRvbWFp
-        bj6IlwQTFgoAPxYhBPqjgjnL8RHN4JnADNicgXaYm0jJBQJioNE5AhsDBQkDwmcA
-        BgsJCAcDCgUVCgkICwUWAwIBAAIeBQIXgAAKCRDYnIF2mJtIySVCAP9e2nHsVHSi
-        2B1YGZpVG7Xf36vxljmMkbroQy+0gBPwRwEAq+jaiQqlbGhQ7R/HMFcAxBIVsq8h
-        Aw1rngsUd0o3dAicXQRioNE5EgorBgEEAZdVAQUBAQdAXZV2Sd5ZNBVTBbTGavMv
-        D6ORrUh8z7TI/3CsxCE7+yADAQgHAAD/c1RU9xH+V/uI1fE7HIn/zL0LUPpsuce2
-        cH++g4u3kBgTOYh+BBgWCgAmFiEE+qOCOcvxEc3gmcAM2JyBdpibSMkFAmKg0TkC
-        GwwFCQPCZwAACgkQ2JyBdpibSMlKagD/cTre6p1m8QuJ7kwmCFRSz5tBzIuYMMgN
-        xtT7dmS91csA/35fWsOykSiFRojQ7ccCSUTHL7ApF2EbL968tP/D2hIG
-        =Hjoc
+        bj6IkwQTFgoAOxYhBMISh2Z08FCi969cq9R2wSP9QF2bBQJmsUEpAhsDBQsJCAcC
+        AiICBhUKCQgLAgQWAgMBAh4HAheAAAoJENR2wSP9QF2b4JMA+wQLdxVcod/ppyvH
+        QguGqqhkpk8KquCddOuFnQVAfHFWAQCK5putVk4mGzsoLTbOJCSGRC4pjEktZawQ
+        MTqJmnOuC5xdBGaxQSkSCisGAQQBl1UBBQEBB0Aed6UYJyighTY+KuPNQ439st3x
+        x04T1j58sx3AnKgYewMBCAcAAP9WLB79HO1zFRqTCnk7GIEWWogMFKVpazeBUNu9
+        h9rzCA2+iHgEGBYKACAWIQTCEodmdPBQovevXKvUdsEj/UBdmwUCZrFBKQIbDAAK
+        CRDUdsEj/UBdmwgJAQDVk/px/pSzqreSeDLzxlb6dOo+N1KcicsJ0akhSJUcvwD9
+        EPhpEDZu/UBKchAutOhWwz+y6pyoF4Vt7XG+jbJQtA4=
+        =KaQc
         -----END PGP PRIVATE KEY BLOCK-----
       '');
       pgp-pubkey = pkgs.writeText "sourcehut.pgp-pubkey" ''
         -----BEGIN PGP PUBLIC KEY BLOCK-----
 
-        mDMEYqDRORYJKwYBBAHaRw8BAQdAehGoy36FUx2OesYm07be2rtLyvR5Pb/ltstd
-        Gk7hYQq0Nm5peG9zL3Rlc3RzL3NvdXJjZWh1dCA8cm9vdCtodXRAc291cmNlaHV0
-        LmxvY2FsZG9tYWluPoiXBBMWCgA/FiEE+qOCOcvxEc3gmcAM2JyBdpibSMkFAmKg
-        0TkCGwMFCQPCZwAGCwkIBwMKBRUKCQgLBRYDAgEAAh4FAheAAAoJENicgXaYm0jJ
-        JUIA/17acexUdKLYHVgZmlUbtd/fq/GWOYyRuuhDL7SAE/BHAQCr6NqJCqVsaFDt
-        H8cwVwDEEhWyryEDDWueCxR3Sjd0CLg4BGKg0TkSCisGAQQBl1UBBQEBB0BdlXZJ
-        3lk0FVMFtMZq8y8Po5GtSHzPtMj/cKzEITv7IAMBCAeIfgQYFgoAJhYhBPqjgjnL
-        8RHN4JnADNicgXaYm0jJBQJioNE5AhsMBQkDwmcAAAoJENicgXaYm0jJSmoA/3E6
-        3uqdZvELie5MJghUUs+bQcyLmDDIDcbU+3ZkvdXLAP9+X1rDspEohUaI0O3HAklE
-        xy+wKRdhGy/evLT/w9oSBg==
-        =pJD7
+        mDMEZrFBKRYJKwYBBAHaRw8BAQdAS1Ffiytk0h0z0jfaT3qyiDUV/plVIUwOg1Yr
+        AXP2Ymu0Nm5peG9zL3Rlc3RzL3NvdXJjZWh1dCA8cm9vdCtodXRAc291cmNlaHV0
+        LmxvY2FsZG9tYWluPoiTBBMWCgA7FiEEwhKHZnTwUKL3r1yr1HbBI/1AXZsFAmax
+        QSkCGwMFCwkIBwICIgIGFQoJCAsCBBYCAwECHgcCF4AACgkQ1HbBI/1AXZvgkwD7
+        BAt3FVyh3+mnK8dCC4aqqGSmTwqq4J1064WdBUB8cVYBAIrmm61WTiYbOygtNs4k
+        JIZELimMSS1lrBAxOomac64LuDgEZrFBKRIKKwYBBAGXVQEFAQEHQB53pRgnKKCF
+        Nj4q481Djf2y3fHHThPWPnyzHcCcqBh7AwEIB4h4BBgWCgAgFiEEwhKHZnTwUKL3
+        r1yr1HbBI/1AXZsFAmaxQSkCGwwACgkQ1HbBI/1AXZsICQEA1ZP6cf6Us6q3kngy
+        88ZW+nTqPjdSnInLCdGpIUiVHL8A/RD4aRA2bv1ASnIQLrToVsM/suqcqBeFbe1x
+        vo2yULQO
+        =luxZ
         -----END PGP PUBLIC KEY BLOCK-----
       '';
-      pgp-key-id = "0xFAA38239CBF111CDE099C00CD89C8176989B48C9";
+      pgp-key-id = "0xC212876674F050A2F7AF5CABD476C123FD405D9B";
     };
   };
 
diff --git a/nixos/tests/stalwart-mail.nix b/nixos/tests/stalwart-mail.nix
index 634c0e2e39261..173b4fce4ad5d 100644
--- a/nixos/tests/stalwart-mail.nix
+++ b/nixos/tests/stalwart-mail.nix
@@ -18,8 +18,8 @@ in import ./make-test-python.nix ({ lib, ... }: {
         server.hostname = domain;
 
         certificate."snakeoil" = {
-          cert = "file://${certs.${domain}.cert}";
-          private-key = "file://${certs.${domain}.key}";
+          cert = "%{file:${certs.${domain}.cert}}%";
+          private-key = "%{file:${certs.${domain}.key}}%";
         };
 
         server.tls = {
@@ -40,24 +40,24 @@ in import ./make-test-python.nix ({ lib, ... }: {
           };
         };
 
-        session.auth.mechanisms = [ "PLAIN" ];
-        session.auth.directory = "in-memory";
-        storage.directory = "in-memory";  # shared with imap
+        session.auth.mechanisms = "[plain]";
+        session.auth.directory = "'in-memory'";
+        storage.directory = "in-memory";
 
-        session.rcpt.directory = "in-memory";
-        queue.outbound.next-hop = [ "local" ];
+        session.rcpt.directory = "'in-memory'";
+        queue.outbound.next-hop = "'local'";
 
         directory."in-memory" = {
           type = "memory";
           principals = [
             {
-              type = "individual";
+              class = "individual";
               name = "alice";
               secret = "foobar";
               email = [ "alice@${domain}" ];
             }
             {
-              type = "individual";
+              class = "individual";
               name = "bob";
               secret = "foobar";
               email = [ "bob@${domain}" ];
@@ -115,6 +115,6 @@ in import ./make-test-python.nix ({ lib, ... }: {
   '';
 
   meta = {
-    maintainers = with lib.maintainers; [ happysalada pacien ];
+    maintainers = with lib.maintainers; [ happysalada pacien onny ];
   };
 })
diff --git a/nixos/tests/step-ca.nix b/nixos/tests/step-ca.nix
index a855b590232dd..68364e278d568 100644
--- a/nixos/tests/step-ca.nix
+++ b/nixos/tests/step-ca.nix
@@ -16,7 +16,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
           { config, pkgs, ... }: {
             services.step-ca = {
               enable = true;
-              address = "0.0.0.0";
+              address = "[::]";
               port = 8443;
               openFirewall = true;
               intermediatePasswordFile = "${test-certificates}/intermediate-password-file";
@@ -62,6 +62,24 @@ import ./make-test-python.nix ({ pkgs, ... }:
             };
           };
 
+        caclientcaddy =
+          { config, pkgs, ... }: {
+            security.pki.certificateFiles = [ "${test-certificates}/root_ca.crt" ];
+
+            networking.firewall.allowedTCPPorts = [ 80 443 ];
+
+            services.caddy = {
+              enable = true;
+              virtualHosts."caclientcaddy".extraConfig = ''
+                respond "Welcome to Caddy!"
+
+                tls caddy@example.org {
+                  ca https://caserver:8443/acme/acme/directory
+                }
+              '';
+            };
+          };
+
         catester = { config, pkgs, ... }: {
           security.pki.certificateFiles = [ "${test-certificates}/root_ca.crt" ];
         };
@@ -71,7 +89,12 @@ import ./make-test-python.nix ({ pkgs, ... }:
       ''
         catester.start()
         caserver.wait_for_unit("step-ca.service")
+        caserver.wait_until_succeeds("journalctl -o cat -u step-ca.service | grep '${pkgs.step-ca.version}'")
+
         caclient.wait_for_unit("acme-finished-caclient.target")
         catester.succeed("curl https://caclient/ | grep \"Welcome to nginx!\"")
+
+        caclientcaddy.wait_for_unit("caddy.service")
+        catester.succeed("curl https://caclientcaddy/ | grep \"Welcome to Caddy!\"")
       '';
   })
diff --git a/nixos/tests/stub-ld.nix b/nixos/tests/stub-ld.nix
index 25161301741b7..72b0aebf3e6ce 100644
--- a/nixos/tests/stub-ld.nix
+++ b/nixos/tests/stub-ld.nix
@@ -45,10 +45,10 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
           ${if32 "machine.succeed('test -L /${libDir32}/${ldsoBasename32}')"}
 
       with subtest("Try FHS executable"):
-          machine.copy_from_host('${test-exec.${pkgs.system}}','test-exec')
+          machine.copy_from_host('${test-exec.${pkgs.stdenv.hostPlatform.system}}','test-exec')
           machine.succeed('if test-exec/${exec-name} 2>outfile; then false; else [ $? -eq 127 ];fi')
           machine.succeed('grep -qi nixos outfile')
-          ${if32 "machine.copy_from_host('${test-exec.${pkgs32.system}}','test-exec32')"}
+          ${if32 "machine.copy_from_host('${test-exec.${pkgs32.stdenv.hostPlatform.system}}','test-exec32')"}
           ${if32 "machine.succeed('if test-exec32/${exec-name} 2>outfile32; then false; else [ $? -eq 127 ];fi')"}
           ${if32 "machine.succeed('grep -qi nixos outfile32')"}
 
diff --git a/nixos/tests/swayfx.nix b/nixos/tests/swayfx.nix
index 77844ec80ae1d..fe1caf8fffa8b 100644
--- a/nixos/tests/swayfx.nix
+++ b/nixos/tests/swayfx.nix
@@ -3,7 +3,7 @@ import ./make-test-python.nix (
   {
     name = "swayfx";
     meta = {
-      maintainers = with lib.maintainers; [ eclairevoyant ];
+      maintainers = with lib.maintainers; [ ];
     };
 
     # testScriptWithTypes:49: error: Cannot call function of unknown type
diff --git a/nixos/tests/switch-test.nix b/nixos/tests/switch-test.nix
index 4a7bcd5a82264..84c6e90689b37 100644
--- a/nixos/tests/switch-test.nix
+++ b/nixos/tests/switch-test.nix
@@ -1,6 +1,6 @@
 # Test configuration switching.
 
-import ./make-test-python.nix ({ lib, pkgs, ...} : let
+import ./make-test-python.nix ({ lib, pkgs, ng, ...} : let
 
   # Simple service that can either be socket-activated or that will
   # listen on port 1234 if not socket-activated.
@@ -48,6 +48,8 @@ in {
 
   nodes = {
     machine = { pkgs, lib, ... }: {
+      system.switch.enableNg = ng;
+
       environment.systemPackages = [ pkgs.socat ]; # for the socket activation stuff
       users.mutableUsers = false;
 
@@ -589,6 +591,7 @@ in {
     };
 
     other = {
+      system.switch.enable = true;
       users.mutableUsers = true;
     };
   };
diff --git a/nixos/tests/sx.nix b/nixos/tests/sx.nix
new file mode 100644
index 0000000000000..1cdc4858cf00d
--- /dev/null
+++ b/nixos/tests/sx.nix
@@ -0,0 +1,63 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+  {
+    name = "sx";
+    meta.maintainers = with lib.maintainers; [
+      figsoda
+      thiagokokada
+    ];
+
+    nodes.machine =
+      { ... }:
+      {
+        imports = [ ./common/user-account.nix ];
+
+        environment.systemPackages = with pkgs; [ icewm ];
+
+        services.getty.autologinUser = "alice";
+
+        services.xserver = {
+          enable = true;
+          displayManager.sx.enable = true;
+        };
+
+        # Create sxrc file on login and start sx
+        programs.bash.loginShellInit =
+          # bash
+          ''
+            mkdir -p "$HOME/.config/sx"
+            echo 'exec icewm' > "$HOME/.config/sx/sxrc"
+            chmod +x "$HOME/.config/sx/sxrc"
+
+            sx
+          '';
+      };
+
+    testScript =
+      { nodes, ... }:
+      let
+        user = nodes.machine.users.users.alice;
+      in
+      # python
+      ''
+        start_all()
+
+        machine.wait_for_unit("multi-user.target")
+
+        xauthority = "${user.home}/.local/share/sx/xauthority"
+        machine.wait_for_file(xauthority)
+        machine.succeed(f"xauth merge {xauthority}")
+
+        def icewm_is_visible(_last_try: bool) -> bool:
+            # sx will set DISPLAY as the TTY number we started, in this case
+            # TTY1:
+            # https://github.com/Earnestly/sx/blob/master/sx#L41.
+            # We can't use `machine.wait_for_window` here since we are running
+            # X as alice and not root.
+            return "IceWM" in machine.succeed("DISPLAY=:1 xwininfo -root -tree")
+
+        # Adding a retry logic to increase reliability
+        retry(icewm_is_visible)
+      '';
+  }
+)
diff --git a/nixos/tests/systemd-analyze.nix b/nixos/tests/systemd-analyze.nix
index 31588e2b41aa5..37c20d5fe5b65 100644
--- a/nixos/tests/systemd-analyze.nix
+++ b/nixos/tests/systemd-analyze.nix
@@ -9,7 +9,6 @@ import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
   nodes.machine =
     { pkgs, lib, ... }:
     { boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest;
-      sound.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
     };
 
   testScript = ''
diff --git a/nixos/tests/systemd-boot-ovmf-broken-fat-driver.patch b/nixos/tests/systemd-boot-ovmf-broken-fat-driver.patch
deleted file mode 100644
index ef547c02f9187..0000000000000
--- a/nixos/tests/systemd-boot-ovmf-broken-fat-driver.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From d87a7513c6f2f2824203032ef27caeb84892ed7e Mon Sep 17 00:00:00 2001
-From: Will Fancher <elvishjerricco@gmail.com>
-Date: Tue, 30 May 2023 16:53:20 -0400
-Subject: [PATCH] Intentionally break the fat driver
-
----
- FatPkg/EnhancedFatDxe/ReadWrite.c | 5 +++++
- 1 file changed, 5 insertions(+)
-
-diff --git a/FatPkg/EnhancedFatDxe/ReadWrite.c b/FatPkg/EnhancedFatDxe/ReadWrite.c
-index 8f525044d1f1..32c62ff7817b 100644
---- a/FatPkg/EnhancedFatDxe/ReadWrite.c
-+++ b/FatPkg/EnhancedFatDxe/ReadWrite.c
-@@ -216,6 +216,11 @@ FatIFileAccess (
-   Volume = OFile->Volume;

-   Task   = NULL;

- 

-+  if (*BufferSize > (10U * 1024U * 1024U)) {

-+    IFile->Position += 10U * 1024U * 1024U;

-+    return EFI_BAD_BUFFER_SIZE;

-+  }

-+

-   //

-   // Write to a directory is unsupported

-   //

diff --git a/nixos/tests/systemd-boot.nix b/nixos/tests/systemd-boot.nix
index 54c380602bd40..79bfcb84ebd76 100644
--- a/nixos/tests/systemd-boot.nix
+++ b/nixos/tests/systemd-boot.nix
@@ -13,6 +13,7 @@ let
     boot.loader.systemd-boot.enable = true;
     boot.loader.efi.canTouchEfiVariables = true;
     environment.systemPackages = [ pkgs.efibootmgr ];
+    system.switch.enable = true;
   };
 
   commonXbootldr = { config, lib, pkgs, ... }:
@@ -169,10 +170,23 @@ in
       imports = [ common ];
       specialisation.something.configuration = {
         boot.loader.systemd-boot.sortKey = "something";
+
+        # Since qemu will dynamically create a devicetree blob when starting
+        # up, it is not straight forward to create an export of that devicetree
+        # blob without knowing before-hand all the flags we would pass to qemu
+        # (we would then be able to use `dumpdtb`). Thus, the following config
+        # will not boot, but it does allow us to assert that the boot entry has
+        # the correct contents.
+        boot.loader.systemd-boot.installDeviceTree = pkgs.stdenv.hostPlatform.isAarch64;
+        hardware.deviceTree.name = "dummy.dtb";
+        hardware.deviceTree.package = lib.mkForce (pkgs.runCommand "dummy-devicetree-package" { } ''
+          mkdir -p $out
+          cp ${pkgs.emptyFile} $out/dummy.dtb
+        '');
       };
     };
 
-    testScript = ''
+    testScript = { nodes, ... }: ''
       machine.start()
       machine.wait_for_unit("multi-user.target")
 
@@ -185,6 +199,10 @@ in
       machine.succeed(
           "grep 'sort-key something' /boot/loader/entries/nixos-generation-1-specialisation-something.conf"
       )
+    '' + pkgs.lib.optionalString pkgs.stdenv.hostPlatform.isAarch64 ''
+      machine.succeed(
+          r"grep 'devicetree /EFI/nixos/[a-z0-9]\{32\}.*dummy' /boot/loader/entries/nixos-generation-1-specialisation-something.conf"
+      )
     '';
   };
 
@@ -224,22 +242,37 @@ in
     testScript = ''
       machine.succeed("mount -o remount,rw /boot")
 
-      # Replace version inside sd-boot with something older. See magic[] string in systemd src/boot/efi/boot.c
-      machine.succeed(
-          """
-        find /boot -iname '*boot*.efi' -print0 | \
-        xargs -0 -I '{}' sed -i 's/#### LoaderInfo: systemd-boot .* ####/#### LoaderInfo: systemd-boot 000.0-1-notnixos ####/' '{}'
-      """
-      )
-
-      output = machine.succeed("/run/current-system/bin/switch-to-configuration boot")
+      def switch():
+          # Replace version inside sd-boot with something older. See magic[] string in systemd src/boot/efi/boot.c
+          machine.succeed(
+            """
+            find /boot -iname '*boot*.efi' -print0 | \
+            xargs -0 -I '{}' sed -i 's/#### LoaderInfo: systemd-boot .* ####/#### LoaderInfo: systemd-boot 000.0-1-notnixos ####/' '{}'
+            """
+          )
+          return machine.succeed("/run/current-system/bin/switch-to-configuration boot 2>&1")
+
+      output = switch()
       assert "updating systemd-boot from 000.0-1-notnixos to " in output, "Couldn't find systemd-boot update message"
+      assert 'to "/boot/EFI/systemd/systemd-bootx64.efi"' in output, "systemd-boot not copied to to /boot/EFI/systemd/systemd-bootx64.efi"
+      assert 'to "/boot/EFI/BOOT/BOOTX64.EFI"' in output, "systemd-boot not copied to to /boot/EFI/BOOT/BOOTX64.EFI"
+
+      with subtest("Test that updating works with lowercase bootx64.efi"):
+          machine.succeed(
+              # Move to tmp file name first, otherwise mv complains the new location is the same
+              "mv /boot/EFI/BOOT/BOOTX64.EFI /boot/EFI/BOOT/bootx64.efi.new",
+              "mv /boot/EFI/BOOT/bootx64.efi.new /boot/EFI/BOOT/bootx64.efi",
+          )
+          output = switch()
+          assert "updating systemd-boot from 000.0-1-notnixos to " in output, "Couldn't find systemd-boot update message"
+          assert 'to "/boot/EFI/systemd/systemd-bootx64.efi"' in output, "systemd-boot not copied to to /boot/EFI/systemd/systemd-bootx64.efi"
+          assert 'to "/boot/EFI/BOOT/BOOTX64.EFI"' in output, "systemd-boot not copied to to /boot/EFI/BOOT/BOOTX64.EFI"
     '';
   };
 
-  memtest86 = makeTest {
+  memtest86 = with pkgs.lib; optionalAttrs (meta.availableOn { inherit system; } pkgs.memtest86plus) (makeTest {
     name = "systemd-boot-memtest86";
-    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+    meta.maintainers = with maintainers; [ julienmalka ];
 
     nodes.machine = { pkgs, lib, ... }: {
       imports = [ common ];
@@ -250,11 +283,11 @@ in
       machine.succeed("test -e /boot/loader/entries/memtest86.conf")
       machine.succeed("test -e /boot/efi/memtest86/memtest.efi")
     '';
-  };
+  });
 
   netbootxyz = makeTest {
     name = "systemd-boot-netbootxyz";
-    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+    meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
 
     nodes.machine = { pkgs, lib, ... }: {
       imports = [ common ];
@@ -269,7 +302,7 @@ in
 
   memtestSortKey = makeTest {
     name = "systemd-boot-memtest-sortkey";
-    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+    meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
 
     nodes.machine = { pkgs, lib, ... }: {
       imports = [ common ];
@@ -307,7 +340,7 @@ in
 
   extraEntries = makeTest {
     name = "systemd-boot-extra-entries";
-    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+    meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
 
     nodes.machine = { pkgs, lib, ... }: {
       imports = [ common ];
@@ -326,7 +359,7 @@ in
 
   extraFiles = makeTest {
     name = "systemd-boot-extra-files";
-    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+    meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
 
     nodes.machine = { pkgs, lib, ... }: {
       imports = [ common ];
@@ -343,7 +376,7 @@ in
 
   switch-test = makeTest {
     name = "systemd-boot-switch-test";
-    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+    meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
 
     nodes = {
       inherit common;
@@ -428,32 +461,6 @@ in
       '';
   };
 
-  # Some UEFI firmwares fail on large reads. Now that systemd-boot loads initrd
-  # itself, systems with such firmware won't boot without this fix
-  uefiLargeFileWorkaround = makeTest {
-    name = "uefi-large-file-workaround";
-    meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
-    nodes.machine = { pkgs, ... }: {
-      imports = [common];
-      virtualisation.efi.OVMF = pkgs.OVMF.overrideAttrs (old: {
-        # This patch deliberately breaks the FAT driver in EDK2 to
-        # exhibit (part of) the firmware bug that we are testing
-        # for. Files greater than 10MiB will fail to be read in a
-        # single Read() call, so systemd-boot will fail to load the
-        # initrd without a workaround. The number 10MiB was chosen
-        # because if it were smaller than the kernel size, even the
-        # LoadImage call would fail, which is not the failure mode
-        # we're testing for. It needs to be between the kernel size
-        # and the initrd size.
-        patches = old.patches or [] ++ [ ./systemd-boot-ovmf-broken-fat-driver.patch ];
-      });
-    };
-
-    testScript = ''
-      machine.wait_for_unit("multi-user.target")
-    '';
-  };
-
   no-bootspec = makeTest
     {
       name = "systemd-boot-no-bootspec";
diff --git a/nixos/tests/systemd-confinement.nix b/nixos/tests/systemd-confinement.nix
deleted file mode 100644
index bde5b770ea50d..0000000000000
--- a/nixos/tests/systemd-confinement.nix
+++ /dev/null
@@ -1,184 +0,0 @@
-import ./make-test-python.nix {
-  name = "systemd-confinement";
-
-  nodes.machine = { pkgs, lib, ... }: let
-    testServer = pkgs.writeScript "testserver.sh" ''
-      #!${pkgs.runtimeShell}
-      export PATH=${lib.escapeShellArg "${pkgs.coreutils}/bin"}
-      ${lib.escapeShellArg pkgs.runtimeShell} 2>&1
-      echo "exit-status:$?"
-    '';
-
-    testClient = pkgs.writeScriptBin "chroot-exec" ''
-      #!${pkgs.runtimeShell} -e
-      output="$(echo "$@" | nc -NU "/run/test$(< /teststep).sock")"
-      ret="$(echo "$output" | sed -nre '$s/^exit-status:([0-9]+)$/\1/p')"
-      echo "$output" | head -n -1
-      exit "''${ret:-1}"
-    '';
-
-    mkTestStep = num: {
-      testScript,
-      config ? {},
-      serviceName ? "test${toString num}",
-    }: {
-      systemd.sockets.${serviceName} = {
-        description = "Socket for Test Service ${toString num}";
-        wantedBy = [ "sockets.target" ];
-        socketConfig.ListenStream = "/run/test${toString num}.sock";
-        socketConfig.Accept = true;
-      };
-
-      systemd.services."${serviceName}@" = {
-        description = "Confined Test Service ${toString num}";
-        confinement = (config.confinement or {}) // { enable = true; };
-        serviceConfig = (config.serviceConfig or {}) // {
-          ExecStart = testServer;
-          StandardInput = "socket";
-        };
-      } // removeAttrs config [ "confinement" "serviceConfig" ];
-
-      __testSteps = lib.mkOrder num (''
-        machine.succeed("echo ${toString num} > /teststep")
-      '' + testScript);
-    };
-
-  in {
-    imports = lib.imap1 mkTestStep [
-      { config.confinement.mode = "chroot-only";
-        testScript = ''
-          with subtest("chroot-only confinement"):
-              paths = machine.succeed('chroot-exec ls -1 / | paste -sd,').strip()
-              assert_eq(paths, "bin,nix,run")
-              uid = machine.succeed('chroot-exec id -u').strip()
-              assert_eq(uid, "0")
-              machine.succeed("chroot-exec chown 65534 /bin")
-        '';
-      }
-      { testScript = ''
-          with subtest("full confinement with APIVFS"):
-              machine.fail("chroot-exec ls -l /etc")
-              machine.fail("chroot-exec chown 65534 /bin")
-              assert_eq(machine.succeed('chroot-exec id -u').strip(), "0")
-              machine.succeed("chroot-exec chown 0 /bin")
-        '';
-      }
-      { config.serviceConfig.BindReadOnlyPaths = [ "/etc" ];
-        testScript = ''
-          with subtest("check existence of bind-mounted /etc"):
-              passwd = machine.succeed('chroot-exec cat /etc/passwd').strip()
-              assert len(passwd) > 0, "/etc/passwd must not be empty"
-        '';
-      }
-      { config.serviceConfig.User = "chroot-testuser";
-        config.serviceConfig.Group = "chroot-testgroup";
-        testScript = ''
-          with subtest("check if User/Group really runs as non-root"):
-              machine.succeed("chroot-exec ls -l /dev")
-              uid = machine.succeed('chroot-exec id -u').strip()
-              assert uid != "0", "UID of chroot-testuser shouldn't be 0"
-              machine.fail("chroot-exec touch /bin/test")
-        '';
-      }
-      (let
-        symlink = pkgs.runCommand "symlink" {
-          target = pkgs.writeText "symlink-target" "got me\n";
-        } "ln -s \"$target\" \"$out\"";
-      in {
-        config.confinement.packages = lib.singleton symlink;
-        testScript = ''
-          with subtest("check if symlinks are properly bind-mounted"):
-              machine.fail("chroot-exec test -e /etc")
-              text = machine.succeed('chroot-exec cat ${symlink}').strip()
-              assert_eq(text, "got me")
-        '';
-      })
-      { config.serviceConfig.User = "chroot-testuser";
-        config.serviceConfig.Group = "chroot-testgroup";
-        config.serviceConfig.StateDirectory = "testme";
-        testScript = ''
-          with subtest("check if StateDirectory works"):
-              machine.succeed("chroot-exec touch /tmp/canary")
-              machine.succeed('chroot-exec "echo works > /var/lib/testme/foo"')
-              machine.succeed('test "$(< /var/lib/testme/foo)" = works')
-              machine.succeed("test ! -e /tmp/canary")
-        '';
-      }
-      { testScript = ''
-          with subtest("check if /bin/sh works"):
-              machine.succeed(
-                  "chroot-exec test -e /bin/sh",
-                  'test "$(chroot-exec \'/bin/sh -c "echo bar"\')" = bar',
-              )
-        '';
-      }
-      { config.confinement.binSh = null;
-        testScript = ''
-          with subtest("check if suppressing /bin/sh works"):
-              machine.succeed("chroot-exec test ! -e /bin/sh")
-              machine.succeed('test "$(chroot-exec \'/bin/sh -c "echo foo"\')" != foo')
-        '';
-      }
-      { config.confinement.binSh = "${pkgs.hello}/bin/hello";
-        testScript = ''
-          with subtest("check if we can set /bin/sh to something different"):
-              machine.succeed("chroot-exec test -e /bin/sh")
-              machine.succeed('test "$(chroot-exec /bin/sh -g foo)" = foo')
-        '';
-      }
-      { config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
-        testScript = ''
-          with subtest("check if only Exec* dependencies are included"):
-              machine.succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" != eek')
-        '';
-      }
-      { config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
-        config.confinement.fullUnit = true;
-        testScript = ''
-          with subtest("check if all unit dependencies are included"):
-              machine.succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" = eek')
-        '';
-      }
-      { serviceName = "shipped-unitfile";
-        config.confinement.mode = "chroot-only";
-        testScript = ''
-          with subtest("check if shipped unit file still works"):
-              machine.succeed(
-                  'chroot-exec \'kill -9 $$ 2>&1 || :\' | '
-                  'grep -q "Too many levels of symbolic links"'
-              )
-        '';
-      }
-    ];
-
-    options.__testSteps = lib.mkOption {
-      type = lib.types.lines;
-      description = "All of the test steps combined as a single script.";
-    };
-
-    config.environment.systemPackages = lib.singleton testClient;
-    config.systemd.packages = lib.singleton (pkgs.writeTextFile {
-      name = "shipped-unitfile";
-      destination = "/etc/systemd/system/shipped-unitfile@.service";
-      text = ''
-        [Service]
-        SystemCallFilter=~kill
-        SystemCallErrorNumber=ELOOP
-      '';
-    });
-
-    config.users.groups.chroot-testgroup = {};
-    config.users.users.chroot-testuser = {
-      isSystemUser = true;
-      description = "Chroot Test User";
-      group = "chroot-testgroup";
-    };
-  };
-
-  testScript = { nodes, ... }: ''
-    def assert_eq(a, b):
-        assert a == b, f"{a} != {b}"
-
-    machine.wait_for_unit("multi-user.target")
-  '' + nodes.machine.config.__testSteps;
-}
diff --git a/nixos/tests/systemd-confinement/checkperms.py b/nixos/tests/systemd-confinement/checkperms.py
new file mode 100644
index 0000000000000..3c7ba279a3d20
--- /dev/null
+++ b/nixos/tests/systemd-confinement/checkperms.py
@@ -0,0 +1,187 @@
+import errno
+import os
+
+from enum import IntEnum
+from pathlib import Path
+
+
+class Accessibility(IntEnum):
+    """
+    The level of accessibility we have on a file or directory.
+
+    This is needed to assess the attack surface on the file system namespace we
+    have within a confined service. Higher levels mean more permissions for the
+    user and thus a bigger attack surface.
+    """
+    NONE = 0
+
+    # Directories can be listed or files can be read.
+    READABLE = 1
+
+    # This is for special file systems such as procfs and for stuff such as
+    # FIFOs or character special files. The reason why this has a lower value
+    # than WRITABLE is because those files are more restricted on what and how
+    # they can be written to.
+    SPECIAL = 2
+
+    # Another special case are sticky directories, which do allow write access
+    # but restrict deletion. This does *not* apply to sticky directories that
+    # are read-only.
+    STICKY = 3
+
+    # Essentially full permissions, the kind of accessibility we want to avoid
+    # in most cases.
+    WRITABLE = 4
+
+    def assert_on(self, path: Path) -> None:
+        """
+        Raise an AssertionError if the given 'path' allows for more
+        accessibility than 'self'.
+        """
+        actual = self.NONE
+
+        if path.is_symlink():
+            actual = self.READABLE
+        elif path.is_dir():
+            writable = True
+
+            dummy_file = path / 'can_i_write'
+            try:
+                dummy_file.touch()
+            except OSError as e:
+                if e.errno in [errno.EROFS, errno.EACCES]:
+                    writable = False
+                else:
+                    raise
+            else:
+                dummy_file.unlink()
+
+            if writable:
+                # The reason why we test this *after* we made sure it's
+                # writable is because we could have a sticky directory where
+                # the current user doesn't have write access.
+                if path.stat().st_mode & 0o1000 == 0o1000:
+                    actual = self.STICKY
+                else:
+                    actual = self.WRITABLE
+            else:
+                actual = self.READABLE
+        elif path.is_file():
+            try:
+                with path.open('rb') as fp:
+                    fp.read(1)
+                actual = self.READABLE
+            except PermissionError:
+                pass
+
+            writable = True
+            try:
+                with path.open('ab') as fp:
+                    fp.write('x')
+                    size = fp.tell()
+                    fp.truncate(size)
+            except PermissionError:
+                writable = False
+            except OSError as e:
+                if e.errno == errno.ETXTBSY:
+                    writable = os.access(path, os.W_OK)
+                elif e.errno == errno.EROFS:
+                    writable = False
+                else:
+                    raise
+
+            # Let's always try to fail towards being writable, so if *either*
+            # access(2) or a real write is successful it's writable. This is to
+            # make sure we don't accidentally introduce no-ops if we have bugs
+            # in the more complicated real write code above.
+            if writable or os.access(path, os.W_OK):
+                actual = self.WRITABLE
+        else:
+            # We need to be very careful when writing to or reading from
+            # special files (eg.  FIFOs), since they can possibly block. So if
+            # it's not a file, just trust that access(2) won't lie.
+            if os.access(path, os.R_OK):
+                actual = self.READABLE
+
+            if os.access(path, os.W_OK):
+                actual = self.SPECIAL
+
+        if actual > self:
+            stat = path.stat()
+            details = ', '.join([
+                f'permissions: {stat.st_mode & 0o7777:o}',
+                f'uid: {stat.st_uid}',
+                f'group: {stat.st_gid}',
+            ])
+
+            raise AssertionError(
+                f'Expected at most {self!r} but got {actual!r} for path'
+                f' {path} ({details}).'
+            )
+
+
+def is_special_fs(path: Path) -> bool:
+    """
+    Check whether the given path truly is a special file system such as procfs
+    or sysfs.
+    """
+    try:
+        if path == Path('/proc'):
+            return (path / 'version').read_text().startswith('Linux')
+        elif path == Path('/sys'):
+            return b'Linux' in (path / 'kernel' / 'notes').read_bytes()
+    except FileNotFoundError:
+        pass
+    return False
+
+
+def is_empty_dir(path: Path) -> bool:
+    try:
+        next(path.iterdir())
+        return False
+    except (StopIteration, PermissionError):
+        return True
+
+
+def _assert_permissions_in_directory(
+    directory: Path,
+    accessibility: Accessibility,
+    subdirs: dict[Path, Accessibility],
+) -> None:
+    accessibility.assert_on(directory)
+
+    for file in directory.iterdir():
+        if is_special_fs(file):
+            msg = f'Got unexpected special filesystem at {file}.'
+            assert subdirs.pop(file) == Accessibility.SPECIAL, msg
+        elif not file.is_symlink() and file.is_dir():
+            subdir_access = subdirs.pop(file, accessibility)
+            if is_empty_dir(file):
+                # Whenever we got an empty directory, we check the permission
+                # constraints on the current directory (except if specified
+                # explicitly in subdirs) because for example if we're non-root
+                # (the constraints of the current directory are thus
+                # Accessibility.READABLE), we really have to make sure that
+                # empty directories are *never* writable.
+                subdir_access.assert_on(file)
+            else:
+                _assert_permissions_in_directory(file, subdir_access, subdirs)
+        else:
+            subdirs.pop(file, accessibility).assert_on(file)
+
+
+def assert_permissions(subdirs: dict[str, Accessibility]) -> None:
+    """
+    Recursively check whether the file system conforms to the accessibility
+    specification we specified via 'subdirs'.
+    """
+    root = Path('/')
+    absolute_subdirs = {root / p: a for p, a in subdirs.items()}
+    _assert_permissions_in_directory(
+        root,
+        Accessibility.WRITABLE if os.getuid() == 0 else Accessibility.READABLE,
+        absolute_subdirs,
+    )
+    for file in absolute_subdirs.keys():
+        msg = f'Expected {file} to exist, but it was nowwhere to be found.'
+        raise AssertionError(msg)
diff --git a/nixos/tests/systemd-confinement/default.nix b/nixos/tests/systemd-confinement/default.nix
new file mode 100644
index 0000000000000..4ca37b3b9126e
--- /dev/null
+++ b/nixos/tests/systemd-confinement/default.nix
@@ -0,0 +1,274 @@
+import ../make-test-python.nix {
+  name = "systemd-confinement";
+
+  nodes.machine = { pkgs, lib, ... }: let
+    testLib = pkgs.python3Packages.buildPythonPackage {
+      name = "confinement-testlib";
+      unpackPhase = ''
+        cat > setup.py <<EOF
+        from setuptools import setup
+        setup(name='confinement-testlib', py_modules=["checkperms"])
+        EOF
+        cp ${./checkperms.py} checkperms.py
+      '';
+    };
+
+    mkTest = name: testScript: pkgs.writers.writePython3 "${name}.py" {
+      libraries = [ pkgs.python3Packages.pytest testLib ];
+    } ''
+      # This runs our test script by using pytest's assertion rewriting, so
+      # that whenever we use "assert <something>", the actual values are
+      # printed rather than getting a generic AssertionError or the need to
+      # pass an explicit assertion error message.
+      import ast
+      from pathlib import Path
+      from _pytest.assertion.rewrite import rewrite_asserts
+
+      script = Path('${pkgs.writeText "${name}-main.py" ''
+        import errno, os, pytest, signal
+        from subprocess import run
+        from checkperms import Accessibility, assert_permissions
+
+        ${testScript}
+      ''}') # noqa
+      filename = str(script)
+      source = script.read_bytes()
+
+      tree = ast.parse(source, filename=filename)
+      rewrite_asserts(tree, source, filename)
+      exec(compile(tree, filename, 'exec', dont_inherit=True))
+    '';
+
+    mkTestStep = num: {
+      description,
+      testScript,
+      config ? {},
+      serviceName ? "test${toString num}",
+      rawUnit ? null,
+    }: {
+      systemd.packages = lib.optional (rawUnit != null) (pkgs.writeTextFile {
+        name = serviceName;
+        destination = "/etc/systemd/system/${serviceName}.service";
+        text = rawUnit;
+      });
+
+      systemd.services.${serviceName} = {
+        inherit description;
+        requiredBy = [ "multi-user.target" ];
+        confinement = (config.confinement or {}) // { enable = true; };
+        serviceConfig = (config.serviceConfig or {}) // {
+          ExecStart = mkTest serviceName testScript;
+          Type = "oneshot";
+        };
+      } // removeAttrs config [ "confinement" "serviceConfig" ];
+    };
+
+    parametrisedTests = lib.concatMap ({ user, privateTmp }: let
+      withTmp = if privateTmp then "with PrivateTmp" else "without PrivateTmp";
+
+      serviceConfig = if user == "static-user" then {
+        User = "chroot-testuser";
+        Group = "chroot-testgroup";
+      } else if user == "dynamic-user" then {
+        DynamicUser = true;
+      } else {};
+
+    in [
+      { description = "${user}, chroot-only confinement ${withTmp}";
+        config = {
+          confinement.mode = "chroot-only";
+          # Only set if privateTmp is true to ensure that the default is false.
+          serviceConfig = serviceConfig // lib.optionalAttrs privateTmp {
+            PrivateTmp = true;
+          };
+        };
+        testScript = if user == "root" then ''
+          assert os.getuid() == 0
+          assert os.getgid() == 0
+
+          assert_permissions({
+            'bin': Accessibility.READABLE,
+            'nix': Accessibility.READABLE,
+            'run': Accessibility.READABLE,
+            ${lib.optionalString privateTmp "'tmp': Accessibility.STICKY,"}
+            ${lib.optionalString privateTmp "'var': Accessibility.READABLE,"}
+            ${lib.optionalString privateTmp "'var/tmp': Accessibility.STICKY,"}
+          })
+        '' else ''
+          assert os.getuid() != 0
+          assert os.getgid() != 0
+
+          assert_permissions({
+            'bin': Accessibility.READABLE,
+            'nix': Accessibility.READABLE,
+            'run': Accessibility.READABLE,
+            ${lib.optionalString privateTmp "'tmp': Accessibility.STICKY,"}
+            ${lib.optionalString privateTmp "'var': Accessibility.READABLE,"}
+            ${lib.optionalString privateTmp "'var/tmp': Accessibility.STICKY,"}
+          })
+        '';
+      }
+      { description = "${user}, full APIVFS confinement ${withTmp}";
+        config = {
+          # Only set if privateTmp is false to ensure that the default is true.
+          serviceConfig = serviceConfig // lib.optionalAttrs (!privateTmp) {
+            PrivateTmp = false;
+          };
+        };
+        testScript = if user == "root" then ''
+          assert os.getuid() == 0
+          assert os.getgid() == 0
+
+          assert_permissions({
+            'bin': Accessibility.READABLE,
+            'nix': Accessibility.READABLE,
+            ${lib.optionalString privateTmp "'tmp': Accessibility.STICKY,"}
+            'run': Accessibility.WRITABLE,
+
+            'proc': Accessibility.SPECIAL,
+            'sys': Accessibility.SPECIAL,
+            'dev': Accessibility.WRITABLE,
+
+            ${lib.optionalString privateTmp "'var': Accessibility.READABLE,"}
+            ${lib.optionalString privateTmp "'var/tmp': Accessibility.STICKY,"}
+          })
+        '' else ''
+          assert os.getuid() != 0
+          assert os.getgid() != 0
+
+          assert_permissions({
+            'bin': Accessibility.READABLE,
+            'nix': Accessibility.READABLE,
+            ${lib.optionalString privateTmp "'tmp': Accessibility.STICKY,"}
+            'run': Accessibility.STICKY,
+
+            'proc': Accessibility.SPECIAL,
+            'sys': Accessibility.SPECIAL,
+            'dev': Accessibility.SPECIAL,
+            'dev/shm': Accessibility.STICKY,
+            'dev/mqueue': Accessibility.STICKY,
+
+            ${lib.optionalString privateTmp "'var': Accessibility.READABLE,"}
+            ${lib.optionalString privateTmp "'var/tmp': Accessibility.STICKY,"}
+          })
+        '';
+      }
+    ]) (lib.cartesianProduct {
+      user = [ "root" "dynamic-user" "static-user" ];
+      privateTmp = [ true false ];
+    });
+
+  in {
+    imports = lib.imap1 mkTestStep (parametrisedTests ++ [
+      { description = "existence of bind-mounted /etc";
+        config.serviceConfig.BindReadOnlyPaths = [ "/etc" ];
+        testScript = ''
+          assert Path('/etc/passwd').read_text()
+        '';
+      }
+      (let
+        symlink = pkgs.runCommand "symlink" {
+          target = pkgs.writeText "symlink-target" "got me";
+        } "ln -s \"$target\" \"$out\"";
+      in {
+        description = "check if symlinks are properly bind-mounted";
+        config.confinement.packages = lib.singleton symlink;
+        testScript = ''
+          assert Path('${symlink}').read_text() == 'got me'
+        '';
+      })
+      { description = "check if StateDirectory works";
+        config.serviceConfig.User = "chroot-testuser";
+        config.serviceConfig.Group = "chroot-testgroup";
+        config.serviceConfig.StateDirectory = "testme";
+
+        # We restart on purpose here since we want to check whether the state
+        # directory actually persists.
+        config.serviceConfig.Restart = "on-failure";
+        config.serviceConfig.RestartMode = "direct";
+
+        testScript = ''
+          assert not Path('/tmp/canary').exists()
+          Path('/tmp/canary').touch()
+
+          if (foo := Path('/var/lib/testme/foo')).exists():
+            assert Path('/var/lib/testme/foo').read_text() == 'works'
+          else:
+            Path('/var/lib/testme/foo').write_text('works')
+            print('<4>Exiting with failure to check persistence on restart.')
+            raise SystemExit(1)
+        '';
+      }
+      { description = "check if /bin/sh works";
+        testScript = ''
+          assert Path('/bin/sh').exists()
+
+          result = run(
+            ['/bin/sh', '-c', 'echo -n bar'],
+            capture_output=True,
+            check=True,
+          )
+          assert result.stdout == b'bar'
+        '';
+      }
+      { description = "check if suppressing /bin/sh works";
+        config.confinement.binSh = null;
+        testScript = ''
+          assert not Path('/bin/sh').exists()
+          with pytest.raises(FileNotFoundError):
+            run(['/bin/sh', '-c', 'echo foo'])
+        '';
+      }
+      { description = "check if we can set /bin/sh to something different";
+        config.confinement.binSh = "${pkgs.hello}/bin/hello";
+        testScript = ''
+          assert Path('/bin/sh').exists()
+          result = run(
+            ['/bin/sh', '-g', 'foo'],
+            capture_output=True,
+            check=True,
+          )
+          assert result.stdout == b'foo\n'
+        '';
+      }
+      { description = "check if only Exec* dependencies are included";
+        config.environment.FOOBAR = pkgs.writeText "foobar" "eek";
+        testScript = ''
+          with pytest.raises(FileNotFoundError):
+            Path(os.environ['FOOBAR']).read_text()
+        '';
+      }
+      { description = "check if fullUnit includes all dependencies";
+        config.environment.FOOBAR = pkgs.writeText "foobar" "eek";
+        config.confinement.fullUnit = true;
+        testScript = ''
+          assert Path(os.environ['FOOBAR']).read_text() == 'eek'
+        '';
+      }
+      { description = "check if shipped unit file still works";
+        config.confinement.mode = "chroot-only";
+        rawUnit = ''
+          [Service]
+          SystemCallFilter=~kill
+          SystemCallErrorNumber=ELOOP
+        '';
+        testScript = ''
+          with pytest.raises(OSError) as excinfo:
+            os.kill(os.getpid(), signal.SIGKILL)
+          assert excinfo.value.errno == errno.ELOOP
+        '';
+      }
+    ]);
+
+    config.users.groups.chroot-testgroup = {};
+    config.users.users.chroot-testuser = {
+      isSystemUser = true;
+      description = "Chroot Test User";
+      group = "chroot-testgroup";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+  '';
+}
diff --git a/nixos/tests/systemd-homed.nix b/nixos/tests/systemd-homed.nix
index ecc92e98eddc7..5e723f6769452 100644
--- a/nixos/tests/systemd-homed.nix
+++ b/nixos/tests/systemd-homed.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ pkgs, lib, ... }:
 let
-  password = "foobar";
-  newPass = "barfoo";
+  password = "foobarfoo";
+  newPass = "barfoobar";
 in
 {
   name = "systemd-homed";
diff --git a/nixos/tests/systemd-initrd-luks-fido2.nix b/nixos/tests/systemd-initrd-luks-fido2.nix
index f9f75ab7f301c..4441ad061ee42 100644
--- a/nixos/tests/systemd-initrd-luks-fido2.nix
+++ b/nixos/tests/systemd-initrd-luks-fido2.nix
@@ -9,8 +9,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
       # Booting off the encrypted disk requires having a Nix store available for the init script
       mountHostNixStore = true;
       useEFIBoot = true;
-      qemu.package = lib.mkForce (pkgs.qemu_test.override { canokeySupport = true; });
-      qemu.options = [ "-device canokey,file=/tmp/canokey-file" ];
+      qemu.options = [ "-device pci-ohci,id=usb-bus" "-device canokey,bus=usb-bus.0,file=/tmp/canokey-file" ];
     };
     boot.loader.systemd-boot.enable = true;
 
diff --git a/nixos/tests/systemd-initrd-luks-unl0kr.nix b/nixos/tests/systemd-initrd-luks-unl0kr.nix
index 0658a098cfa2b..83b52646d112d 100644
--- a/nixos/tests/systemd-initrd-luks-unl0kr.nix
+++ b/nixos/tests/systemd-initrd-luks-unl0kr.nix
@@ -2,8 +2,8 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: let
   passphrase = "secret";
 in {
   name = "systemd-initrd-luks-unl0kr";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ tomfitzhenry ];
+  meta = {
+    maintainers = [];
   };
 
   enableOCR = true;
diff --git a/nixos/tests/systemd-initrd-modprobe.nix b/nixos/tests/systemd-initrd-modprobe.nix
index 0f93492176b44..e563552a645fd 100644
--- a/nixos/tests/systemd-initrd-modprobe.nix
+++ b/nixos/tests/systemd-initrd-modprobe.nix
@@ -4,21 +4,21 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
   nodes.machine = { pkgs, ... }: {
     testing.initrdBackdoor = true;
     boot.initrd.systemd.enable = true;
-    boot.initrd.kernelModules = [ "loop" ]; # Load module in initrd.
+    boot.initrd.kernelModules = [ "tcp_hybla" ]; # Load module in initrd.
     boot.extraModprobeConfig = ''
-      options loop max_loop=42
+      options tcp_hybla rtt0=42
     '';
   };
 
   testScript = ''
     machine.wait_for_unit("initrd.target")
-    max_loop = machine.succeed("cat /sys/module/loop/parameters/max_loop")
-    assert int(max_loop) == 42, "Parameter should be respected for initrd kernel modules"
+    rtt = machine.succeed("cat /sys/module/tcp_hybla/parameters/rtt0")
+    assert int(rtt) == 42, "Parameter should be respected for initrd kernel modules"
 
     # Make sure it sticks in stage 2
     machine.switch_root()
     machine.wait_for_unit("multi-user.target")
-    max_loop = machine.succeed("cat /sys/module/loop/parameters/max_loop")
-    assert int(max_loop) == 42, "Parameter should be respected for initrd kernel modules"
+    rtt = machine.succeed("cat /sys/module/tcp_hybla/parameters/rtt0")
+    assert int(rtt) == 42, "Parameter should be respected for initrd kernel modules"
   '';
 })
diff --git a/nixos/tests/systemd-machinectl.nix b/nixos/tests/systemd-machinectl.nix
index 9d761c6d4d8b8..40ea4905ec67b 100644
--- a/nixos/tests/systemd-machinectl.nix
+++ b/nixos/tests/systemd-machinectl.nix
@@ -16,7 +16,7 @@ let
 
     imports = [ ../modules/profiles/minimal.nix ];
 
-    system.stateVersion = config.system.nixos.version;
+    system.stateVersion = config.system.nixos.release;
   };
 
   containerSystem = (import ../lib/eval-config.nix {
@@ -76,10 +76,23 @@ in
       };
     };
 
+    systemd.nspawn.${containerName} = {
+      filesConfig = {
+        # workaround to fix kernel namespaces; needed for Nix sandbox
+        # https://github.com/systemd/systemd/issues/27994#issuecomment-1704005670
+        Bind = "/proc:/run/proc";
+      };
+    };
+
     systemd.services."systemd-nspawn@${containerName}" = {
       serviceConfig.Environment = [
         # Disable tmpfs for /tmp
         "SYSTEMD_NSPAWN_TMPFS_TMP=0"
+
+        # force unified cgroup delegation, which would be the default
+        # if systemd could check the capabilities of the installed systemd.
+        # see also: https://github.com/NixOS/nixpkgs/pull/198526
+        "SYSTEMD_NSPAWN_UNIFIED_HIERARCHY=1"
       ];
       overrideStrategy = "asDropin";
     };
@@ -121,6 +134,17 @@ in
     machine.succeed("machinectl start ${containerName}");
     machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target");
 
+    # Test systemd-nspawn configured unified cgroup delegation
+    # see also:
+    # https://github.com/systemd/systemd/blob/main/docs/CGROUP_DELEGATION.md#three-different-tree-setups-
+    machine.succeed('systemd-run --pty --wait -M ${containerName} /run/current-system/sw/bin/stat --format="%T" --file-system /sys/fs/cgroup > fstype')
+    machine.succeed('test $(tr -d "\\r" < fstype) = cgroup2fs')
+
+    # Test if systemd-nspawn provides a working environment for nix to build derivations
+    # https://nixos.org/guides/nix-pills/07-working-derivation
+    machine.succeed('systemd-run --pty --wait -M ${containerName} /run/current-system/sw/bin/nix-instantiate --expr \'derivation { name = "myname"; builder = "/bin/sh"; args = [ "-c" "echo foo > $out" ]; system = "${pkgs.system}"; }\' --add-root /tmp/drv')
+    machine.succeed('systemd-run --pty --wait -M ${containerName} /run/current-system/sw/bin/nix-store --option substitute false --realize /tmp/drv')
+
     # Test nss_mymachines without nscd
     machine.succeed('LD_LIBRARY_PATH="/run/current-system/sw/lib" getent -s hosts:mymachines hosts ${containerName}');
 
diff --git a/nixos/tests/systemd-networkd-dhcpserver-static-leases.nix b/nixos/tests/systemd-networkd-dhcpserver-static-leases.nix
index f6d5411aa5cad..8c0ebeee97c77 100644
--- a/nixos/tests/systemd-networkd-dhcpserver-static-leases.nix
+++ b/nixos/tests/systemd-networkd-dhcpserver-static-leases.nix
@@ -28,10 +28,8 @@ import ./make-test-python.nix ({ lib, ... }: {
               Address = "10.0.0.1/24";
             };
             dhcpServerStaticLeases = [{
-              dhcpServerStaticLeaseConfig = {
-                MACAddress = "02:de:ad:be:ef:01";
-                Address = "10.0.0.10";
-              };
+              MACAddress = "02:de:ad:be:ef:01";
+              Address = "10.0.0.10";
             }];
           };
         };
@@ -41,20 +39,30 @@ import ./make-test-python.nix ({ lib, ... }: {
     client = {
       virtualisation.vlans = [ 1 ];
       systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      systemd.network = {
+        enable = true;
+        links."10-eth1" = {
+          matchConfig.OriginalName = "eth1";
+          linkConfig.MACAddress = "02:de:ad:be:ef:01";
+        };
+        networks."40-eth1" = {
+          matchConfig.Name = "eth1";
+          networkConfig = {
+            DHCP = "ipv4";
+            IPv6AcceptRA = false;
+          };
+          # This setting is important to have the router assign the
+          # configured lease based on the client's MAC address. Also see:
+          # https://github.com/systemd/systemd/issues/21368#issuecomment-982193546
+          dhcpV4Config.ClientIdentifier = "mac";
+          linkConfig.RequiredForOnline = "routable";
+        };
+      };
       networking = {
-        useNetworkd = true;
         useDHCP = false;
         firewall.enable = false;
-        interfaces.eth1 = {
-          useDHCP = true;
-          macAddress = "02:de:ad:be:ef:01";
-        };
+        interfaces.eth1 = lib.mkForce {};
       };
-
-      # This setting is important to have the router assign the
-      # configured lease based on the client's MAC address. Also see:
-      # https://github.com/systemd/systemd/issues/21368#issuecomment-982193546
-      systemd.network.networks."40-eth1".dhcpV4Config.ClientIdentifier = "mac";
     };
   };
   testScript = ''
diff --git a/nixos/tests/systemd-networkd-dhcpserver.nix b/nixos/tests/systemd-networkd-dhcpserver.nix
index 665d8b5a05291..fda0c9d641938 100644
--- a/nixos/tests/systemd-networkd-dhcpserver.nix
+++ b/nixos/tests/systemd-networkd-dhcpserver.nix
@@ -54,7 +54,7 @@ import ./make-test-python.nix ({pkgs, ...}: {
             name = "eth1";
             networkConfig.Bridge = "br0";
             bridgeVLANs = [
-              { bridgeVLANConfig = { PVID = 2; EgressUntagged = 2; }; }
+              { PVID = 2; EgressUntagged = 2; }
             ];
           };
           "02-br0" = {
@@ -69,8 +69,8 @@ import ./make-test-python.nix ({pkgs, ...}: {
               PoolSize = 1;
             };
             bridgeVLANs = [
-              { bridgeVLANConfig = { PVID = 1; EgressUntagged = 1; }; }
-              { bridgeVLANConfig = { VLAN = 2; }; }
+              { PVID = 1; EgressUntagged = 1; }
+              { VLAN = 2; }
             ];
           };
           "02-vlan2" = {
diff --git a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
index 1e55341657bdb..2ea6d0effd536 100644
--- a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
+++ b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
@@ -40,7 +40,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
             address = [
               "2001:DB8::1/64"
             ];
-            networkConfig.IPForward = true;
+            networkConfig.IPv4Forwarding = true;
+            networkConfig.IPv6Forwarding = true;
           };
         };
       };
@@ -66,6 +67,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           settings = {
             interfaces-config.interfaces = [ "eth1" ];
             subnet6 = [ {
+              id = 1;
               interface = "eth1";
               subnet = "2001:DB8::/32";
               pd-pools = [ {
@@ -173,6 +175,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         # for fowarding/input from the configured interfaces so you do not have
         # to manage multiple places
         firewall.enable = false;
+        interfaces.eth1.ipv6.addresses = lib.mkForce [ ];
       };
 
       systemd.network = {
@@ -258,7 +261,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           "01-lo" = {
             name = "lo";
             addresses = [
-              { addressConfig.Address = "FD42::1/128"; }
+              { Address = "FD42::1/128"; }
             ];
           };
         };
@@ -274,6 +277,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       networking = {
         useNetworkd = true;
         useDHCP = false;
+        interfaces.eth1.ipv6.addresses = lib.mkForce [ ];
       };
     };
   };
diff --git a/nixos/tests/systemd-networkd-vrf.nix b/nixos/tests/systemd-networkd-vrf.nix
index d4227526a30d4..a7875bb177faf 100644
--- a/nixos/tests/systemd-networkd-vrf.nix
+++ b/nixos/tests/systemd-networkd-vrf.nix
@@ -16,7 +16,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
         linkConfig.RequiredForOnline = "no";
         networkConfig = {
           Address = "192.168.${toString vlan}.${toString id}/24";
-          IPForward = "yes";
+          IPv4Forwarding = "yes";
+          IPv6Forwarding = "yes";
         };
       };
     };
@@ -57,16 +58,18 @@ in {
 
         networks."10-vrf1" = {
           matchConfig.Name = "vrf1";
-          networkConfig.IPForward = "yes";
+          networkConfig.IPv4Forwarding = "yes";
+          networkConfig.IPv6Forwarding = "yes";
           routes = [
-            { routeConfig = { Destination = "192.168.1.2"; Metric = 100; }; }
+            { Destination = "192.168.1.2"; Metric = 100; }
           ];
         };
         networks."10-vrf2" = {
           matchConfig.Name = "vrf2";
-          networkConfig.IPForward = "yes";
+          networkConfig.IPv4Forwarding = "yes";
+          networkConfig.IPv6Forwarding = "yes";
           routes = [
-            { routeConfig = { Destination = "192.168.2.3"; Metric = 100; }; }
+            { Destination = "192.168.2.3"; Metric = 100; }
           ];
         };
 
@@ -76,7 +79,8 @@ in {
           networkConfig = {
             VRF = "vrf1";
             Address = "192.168.1.1/24";
-            IPForward = "yes";
+            IPv4Forwarding = "yes";
+            IPv6Forwarding = "yes";
           };
         };
         networks."10-eth2" = {
@@ -85,7 +89,8 @@ in {
           networkConfig = {
             VRF = "vrf2";
             Address = "192.168.2.1/24";
-            IPForward = "yes";
+            IPv4Forwarding = "yes";
+            IPv6Forwarding = "yes";
           };
         };
       };
diff --git a/nixos/tests/systemd-networkd.nix b/nixos/tests/systemd-networkd.nix
index 6b241b93d5118..a595fb9cba4ac 100644
--- a/nixos/tests/systemd-networkd.nix
+++ b/nixos/tests/systemd-networkd.nix
@@ -23,13 +23,13 @@ let generateNodeConf = { lib, pkgs, config, privk, pubk, peerId, nodeId, ...}: {
               ListenPort = 51820;
               FirewallMark = 42;
             };
-            wireguardPeers = [ {wireguardPeerConfig={
+            wireguardPeers = [ {
               Endpoint = "192.168.1.${peerId}:51820";
               PublicKey = pubk;
               PresharedKeyFile = pkgs.writeText "psk.key" "yTL3sCOL33Wzi6yCnf9uZQl/Z8laSE+zwpqOHC4HhFU=";
               AllowedIPs = [ "10.0.0.${peerId}/32" ];
               PersistentKeepalive = 15;
-            };}];
+            } ];
           };
         };
         networks = {
@@ -41,8 +41,8 @@ let generateNodeConf = { lib, pkgs, config, privk, pubk, peerId, nodeId, ...}: {
             matchConfig = { Name = "wg0"; };
             address = [ "10.0.0.${nodeId}/32" ];
             routes = [
-              { routeConfig = { Gateway = "10.0.0.${nodeId}"; Destination = "10.0.0.0/24"; }; }
-              { routeConfig = { Gateway = "10.0.0.${nodeId}"; Destination = "10.0.0.0/24"; Table = "custom"; }; }
+              { Gateway = "10.0.0.${nodeId}"; Destination = "10.0.0.0/24"; }
+              { Gateway = "10.0.0.${nodeId}"; Destination = "10.0.0.0/24"; Table = "custom"; }
             ];
           };
           "30-eth1" = {
@@ -52,11 +52,13 @@ let generateNodeConf = { lib, pkgs, config, privk, pubk, peerId, nodeId, ...}: {
               "fe80::${nodeId}/64"
             ];
             routingPolicyRules = [
-              { routingPolicyRuleConfig = { Table = 10; IncomingInterface = "eth1"; Family = "both"; };}
-              { routingPolicyRuleConfig = { Table = 20; OutgoingInterface = "eth1"; };}
-              { routingPolicyRuleConfig = { Table = 30; From = "192.168.1.1"; To = "192.168.1.2"; SourcePort = 666 ; DestinationPort = 667; };}
-              { routingPolicyRuleConfig = { Table = 40; IPProtocol = "tcp"; InvertRule = true; };}
-              { routingPolicyRuleConfig = { Table = 50; IncomingInterface = "eth1"; Family = "ipv4"; };}
+              { Table = 10; IncomingInterface = "eth1"; Family = "both"; }
+              { Table = 20; OutgoingInterface = "eth1"; }
+              { Table = 30; From = "192.168.1.1"; To = "192.168.1.2"; SourcePort = 666 ; DestinationPort = 667; }
+              { Table = 40; IPProtocol = "tcp"; InvertRule = true; }
+              { Table = 50; IncomingInterface = "eth1"; Family = "ipv4"; }
+              { Table = 60; FirewallMark = 4; }
+              { Table = 70; FirewallMark = "16/0x1f"; }
             ];
           };
         };
@@ -119,5 +121,9 @@ testScript = ''
     )
     # IPProtocol + InvertRule
     node1.succeed("sudo ip rule | grep 'not from all ipproto tcp lookup 40'")
+    # FirewallMark without a mask
+    node1.succeed("sudo ip rule | grep 'from all fwmark 0x4 lookup 60'")
+    # FirewallMark with a mask
+    node1.succeed("sudo ip rule | grep 'from all fwmark 0x10/0x1f lookup 70'")
 '';
 })
diff --git a/nixos/tests/systemd-resolved.nix b/nixos/tests/systemd-resolved.nix
new file mode 100644
index 0000000000000..3eedc17f4b34f
--- /dev/null
+++ b/nixos/tests/systemd-resolved.nix
@@ -0,0 +1,75 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "systemd-resolved";
+  meta.maintainers = [ lib.maintainers.elvishjerricco ];
+
+  nodes.server = { lib, config, ... }: let
+    exampleZone = pkgs.writeTextDir "example.com.zone" ''
+      @ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
+      @       A       ${(lib.head config.networking.interfaces.eth1.ipv4.addresses).address}
+      @       AAAA    ${(lib.head config.networking.interfaces.eth1.ipv6.addresses).address}
+    '';
+  in {
+    networking.firewall.enable = false;
+    networking.useDHCP = false;
+
+    networking.interfaces.eth1.ipv6.addresses = lib.mkForce [
+      { address = "fd00::1"; prefixLength = 64; }
+    ];
+
+    services.knot = {
+      enable = true;
+      settings = {
+        server.listen = [
+          "0.0.0.0@53"
+          "::@53"
+        ];
+        template.default.storage = exampleZone;
+        zone."example.com".file = "example.com.zone";
+      };
+    };
+  };
+
+  nodes.client = { nodes, ... }: let
+    inherit (lib.head nodes.server.networking.interfaces.eth1.ipv4.addresses) address;
+  in {
+    networking.nameservers = [ address ];
+    networking.interfaces.eth1.ipv6.addresses = lib.mkForce [
+      { address = "fd00::2"; prefixLength = 64; }
+    ];
+    services.resolved.enable = true;
+    services.resolved.fallbackDns = [ ];
+    networking.useNetworkd = true;
+    networking.useDHCP = false;
+    systemd.network.networks."40-eth0".enable = false;
+
+    testing.initrdBackdoor = true;
+    boot.initrd = {
+      systemd.enable = true;
+      systemd.initrdBin = [ pkgs.iputils ];
+      network.enable = true;
+      services.resolved.enable = true;
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    address4 = (lib.head nodes.server.networking.interfaces.eth1.ipv4.addresses).address;
+    address6 = (lib.head nodes.server.networking.interfaces.eth1.ipv6.addresses).address;
+  in ''
+    start_all()
+    server.wait_for_unit("multi-user.target")
+
+    def test_client():
+        query = client.succeed("resolvectl query example.com")
+        assert "${address4}" in query
+        assert "${address6}" in query
+        client.succeed("ping -4 -c 1 example.com")
+        client.succeed("ping -6 -c 1 example.com")
+
+    client.wait_for_unit("initrd.target")
+    test_client()
+    client.switch_root()
+
+    client.wait_for_unit("multi-user.target")
+    test_client()
+  '';
+})
diff --git a/nixos/tests/systemd-sysusers-immutable.nix b/nixos/tests/systemd-sysusers-immutable.nix
index 42cbf84d175e4..4d65b52a0d336 100644
--- a/nixos/tests/systemd-sysusers-immutable.nix
+++ b/nixos/tests/systemd-sysusers-immutable.nix
@@ -2,8 +2,8 @@
 
 let
   rootPassword = "$y$j9T$p6OI0WN7.rSfZBOijjRdR.$xUOA2MTcB48ac.9Oc5fz8cxwLv1mMqabnn333iOzSA6";
-  normaloPassword = "$y$j9T$3aiOV/8CADAK22OK2QT3/0$67OKd50Z4qTaZ8c/eRWHLIM.o3ujtC1.n9ysmJfv639";
-  newNormaloPassword = "mellow";
+  sysuserPassword = "$y$j9T$3aiOV/8CADAK22OK2QT3/0$67OKd50Z4qTaZ8c/eRWHLIM.o3ujtC1.n9ysmJfv639";
+  newSysuserPassword = "mellow";
 in
 
 {
@@ -16,49 +16,48 @@ in
     systemd.sysusers.enable = true;
     users.mutableUsers = false;
 
-    # Override the empty root password set by the test instrumentation
-    users.users.root.hashedPasswordFile = lib.mkForce null;
-    users.users.root.initialHashedPassword = rootPassword;
-    users.users.normalo = {
-      isNormalUser = true;
-      initialHashedPassword = normaloPassword;
+
+    # Read this password file at runtime from outside the Nix store.
+    environment.etc."rootpw.secret".text = rootPassword;
+    # Override the empty root password set by the test instrumentation.
+    users.users.root.hashedPasswordFile = lib.mkForce "/etc/rootpw.secret";
+
+    users.users.sysuser = {
+      isSystemUser = true;
+      group = "wheel";
+      home = "/sysuser";
+      initialHashedPassword = sysuserPassword;
     };
 
     specialisation.new-generation.configuration = {
-      users.users.new-normalo = {
-        isNormalUser = true;
-        initialPassword = newNormaloPassword;
+      users.users.new-sysuser = {
+        isSystemUser = true;
+        group = "wheel";
+        home = "/new-sysuser";
+        initialPassword = newSysuserPassword;
       };
     };
   };
 
   testScript = ''
-    with subtest("Users are not created with systemd-sysusers"):
-      machine.fail("systemctl status systemd-sysusers.service")
-      machine.fail("ls /etc/sysusers.d")
-
-    with subtest("Correct mode on the password files"):
-      assert machine.succeed("stat -c '%a' /etc/passwd") == "644\n"
-      assert machine.succeed("stat -c '%a' /etc/group") == "644\n"
-      assert machine.succeed("stat -c '%a' /etc/shadow") == "0\n"
-      assert machine.succeed("stat -c '%a' /etc/gshadow") == "0\n"
-
     with subtest("root user has correct password"):
       print(machine.succeed("getent passwd root"))
       assert "${rootPassword}" in machine.succeed("getent shadow root"), "root user password is not correct"
 
-    with subtest("normalo user is created"):
-      print(machine.succeed("getent passwd normalo"))
-      assert machine.succeed("stat -c '%U' /home/normalo") == "normalo\n"
-      assert "${normaloPassword}" in machine.succeed("getent shadow normalo"), "normalo user password is not correct"
+    with subtest("sysuser user is created"):
+      print(machine.succeed("getent passwd sysuser"))
+      assert machine.succeed("stat -c '%U' /sysuser") == "sysuser\n"
+      assert "${sysuserPassword}" in machine.succeed("getent shadow sysuser"), "sysuser user password is not correct"
+
+    with subtest("Fail to add new user manually"):
+      machine.fail("useradd manual-sysuser")
 
 
     machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
 
 
-    with subtest("new-normalo user is created after switching to new generation"):
-      print(machine.succeed("getent passwd new-normalo"))
-      print(machine.succeed("getent shadow new-normalo"))
-      assert machine.succeed("stat -c '%U' /home/new-normalo") == "new-normalo\n"
+    with subtest("new-sysuser user is created after switching to new generation"):
+      print(machine.succeed("getent passwd new-sysuser"))
+      assert machine.succeed("stat -c '%U' /new-sysuser") == "new-sysuser\n"
   '';
 }
diff --git a/nixos/tests/systemd-sysusers-mutable.nix b/nixos/tests/systemd-sysusers-mutable.nix
index e69cfe23a59a1..9871a91cca971 100644
--- a/nixos/tests/systemd-sysusers-mutable.nix
+++ b/nixos/tests/systemd-sysusers-mutable.nix
@@ -2,8 +2,8 @@
 
 let
   rootPassword = "$y$j9T$p6OI0WN7.rSfZBOijjRdR.$xUOA2MTcB48ac.9Oc5fz8cxwLv1mMqabnn333iOzSA6";
-  normaloPassword = "hello";
-  newNormaloPassword = "$y$j9T$p6OI0WN7.rSfZBOijjRdR.$xUOA2MTcB48ac.9Oc5fz8cxwLv1mMqabnn333iOzSA6";
+  sysuserPassword = "hello";
+  newSysuserPassword = "$y$j9T$p6OI0WN7.rSfZBOijjRdR.$xUOA2MTcB48ac.9Oc5fz8cxwLv1mMqabnn333iOzSA6";
 in
 
 {
@@ -24,15 +24,19 @@ in
     # Override the empty root password set by the test instrumentation
     users.users.root.hashedPasswordFile = lib.mkForce null;
     users.users.root.initialHashedPassword = rootPassword;
-    users.users.normalo = {
-      isNormalUser = true;
-      initialPassword = normaloPassword;
+    users.users.sysuser = {
+      isSystemUser = true;
+      group = "wheel";
+      home = "/sysuser";
+      initialPassword = sysuserPassword;
     };
 
     specialisation.new-generation.configuration = {
-      users.users.new-normalo = {
-        isNormalUser = true;
-        initialHashedPassword = newNormaloPassword;
+      users.users.new-sysuser = {
+        isSystemUser = true;
+        group = "wheel";
+        home = "/new-sysuser";
+        initialHashedPassword = newSysuserPassword;
       };
     };
   };
@@ -43,7 +47,7 @@ in
     with subtest("systemd-sysusers.service contains the credentials"):
       sysusers_service = machine.succeed("systemctl cat systemd-sysusers.service")
       print(sysusers_service)
-      assert "SetCredential=passwd.plaintext-password.normalo:${normaloPassword}" in sysusers_service
+      assert "SetCredential=passwd.plaintext-password.sysuser:${sysuserPassword}" in sysusers_service
 
     with subtest("Correct mode on the password files"):
       assert machine.succeed("stat -c '%a' /etc/passwd") == "644\n"
@@ -55,17 +59,20 @@ in
       print(machine.succeed("getent passwd root"))
       assert "${rootPassword}" in machine.succeed("getent shadow root"), "root user password is not correct"
 
-    with subtest("normalo user is created"):
-      print(machine.succeed("getent passwd normalo"))
-      assert machine.succeed("stat -c '%U' /home/normalo") == "normalo\n"
+    with subtest("sysuser user is created"):
+      print(machine.succeed("getent passwd sysuser"))
+      assert machine.succeed("stat -c '%U' /sysuser") == "sysuser\n"
+
+    with subtest("Manually add new user"):
+      machine.succeed("useradd manual-sysuser")
 
 
     machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
 
 
-    with subtest("new-normalo user is created after switching to new generation"):
-      print(machine.succeed("getent passwd new-normalo"))
-      assert machine.succeed("stat -c '%U' /home/new-normalo") == "new-normalo\n"
-      assert "${newNormaloPassword}" in machine.succeed("getent shadow new-normalo"), "new-normalo user password is not correct"
+    with subtest("new-sysuser user is created after switching to new generation"):
+      print(machine.succeed("getent passwd new-sysuser"))
+      assert machine.succeed("stat -c '%U' /new-sysuser") == "new-sysuser\n"
+      assert "${newSysuserPassword}" in machine.succeed("getent shadow new-sysuser"), "new-sysuser user password is not correct"
   '';
 }
diff --git a/nixos/tests/systemd.nix b/nixos/tests/systemd.nix
index 4b087d403f37d..3430eb9398cb4 100644
--- a/nixos/tests/systemd.nix
+++ b/nixos/tests/systemd.nix
@@ -204,8 +204,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         assert "0B read, 0B written" not in output
 
     with subtest("systemd per-unit accounting works"):
-        assert "IP traffic received: 84B" in output_ping
-        assert "IP traffic sent: 84B" in output_ping
+        assert "IP traffic received: 84B sent: 84B" in output_ping
 
     with subtest("systemd environment is properly set"):
         machine.systemctl("daemon-reexec")  # Rewrites /proc/1/environ
diff --git a/nixos/tests/tandoor-recipes-script-name.nix b/nixos/tests/tandoor-recipes-script-name.nix
new file mode 100644
index 0000000000000..6216d67b8084a
--- /dev/null
+++ b/nixos/tests/tandoor-recipes-script-name.nix
@@ -0,0 +1,95 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+  {
+    name = "tandoor-recipes-script-name";
+
+    nodes.machine =
+      { pkgs, nodes, ... }:
+      {
+        services.tandoor-recipes = {
+          enable = true;
+          extraConfig = {
+            SCRIPT_NAME = "/any/path";
+            STATIC_URL = "${nodes.machine.services.tandoor-recipes.extraConfig.SCRIPT_NAME}/static/";
+          };
+        };
+      };
+
+    testScript =
+      { nodes, ... }:
+      let
+        inherit (nodes.machine.services.tandoor-recipes) address port;
+        inherit (nodes.machine.services.tandoor-recipes.extraConfig) SCRIPT_NAME;
+      in
+      ''
+        from html.parser import HTMLParser
+
+        origin_url = "http://${address}:${toString port}"
+        base_url = f"{origin_url}${SCRIPT_NAME}"
+        login_path = "/admin/login/"
+        login_url = f"{base_url}{login_path}"
+
+        cookie_jar_path = "/tmp/cookies.txt"
+        curl = f"curl --cookie {cookie_jar_path} --cookie-jar {cookie_jar_path} --fail --header 'Origin: {origin_url}' --show-error --silent"
+
+        print("Wait for the service to respond")
+        machine.wait_for_unit("tandoor-recipes.service")
+        machine.wait_until_succeeds(f"{curl} {login_url}")
+
+        username = "username"
+        password = "password"
+
+        print("Create admin user")
+        machine.succeed(
+            f"DJANGO_SUPERUSER_PASSWORD='{password}' /var/lib/tandoor-recipes/tandoor-recipes-manage createsuperuser --no-input --username='{username}' --email=nobody@example.com"
+        )
+
+        print("Get CSRF token for later requests")
+        csrf_token = machine.succeed(f"grep csrftoken {cookie_jar_path} | cut --fields=7").rstrip()
+
+        print("Log in as admin user")
+        machine.succeed(
+            f"{curl} --data 'csrfmiddlewaretoken={csrf_token}' --data 'username={username}' --data 'password={password}' {login_url}"
+        )
+
+        print("Get the contents of the logged in main page")
+        logged_in_page = machine.succeed(f"{curl} --location {base_url}")
+
+        class UrlParser(HTMLParser):
+            def __init__(self):
+                super().__init__()
+
+                self.urls: list[str] = []
+
+            def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
+                if tag == "form":
+                    for name, value in attrs:
+                        if name == "action" and value is not None:
+                            assert not value.endswith(login_path)
+                            break
+
+                if tag != "a":
+                    return
+
+                for name, value in attrs:
+                    if name == "href" and value is not None:
+                        if value.startswith(base_url):
+                            self.urls.append(value)
+                        elif value.startswith("/"):
+                            self.urls.append(f"{origin_url}{value}")
+                        else:
+                            print(f"Ignoring non-path URL: {value}")
+
+                        break
+
+        parser = UrlParser()
+        parser.feed(logged_in_page)
+
+        for url in parser.urls:
+            with subtest(f"Verify that {url} can be reached"):
+                machine.succeed(f"{curl} {url}")
+      '';
+
+    meta.maintainers = with lib.maintainers; [ l0b0 ];
+  }
+)
diff --git a/nixos/tests/taskchampion-sync-server.nix b/nixos/tests/taskchampion-sync-server.nix
new file mode 100644
index 0000000000000..42dfb0cbeca30
--- /dev/null
+++ b/nixos/tests/taskchampion-sync-server.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix (
+  { ... }:
+  {
+    name = "taskchampion-sync-server";
+
+    nodes = {
+      server = {
+        services.taskchampion-sync-server.enable = true;
+        services.taskchampion-sync-server.openFirewall = true;
+      };
+      client =
+        { pkgs, ... }:
+        {
+          environment.systemPackages = [ pkgs.taskwarrior3 ];
+        };
+    };
+    testScript =
+      { nodes, ... }:
+      let
+        cfg = nodes.server.services.taskchampion-sync-server;
+        port = builtins.toString cfg.port;
+        # Generated with uuidgen
+        uuid = "bf01376e-04a4-435a-9263-608567531af3";
+        password = "nixos-test";
+      in
+      ''
+        # Explicitly start the VMs so that we don't accidentally start newServer
+        server.start()
+        client.start()
+
+        server.wait_for_unit("taskchampion-sync-server.service")
+        server.wait_for_open_port(${port})
+
+        # See man task-sync(5)
+        client.succeed("mkdir ~/.task")
+        client.succeed("touch ~/.taskrc")
+        client.succeed("echo sync.server.origin=http://server:${port} >> ~/.taskrc")
+        client.succeed("echo sync.server.client_id=${uuid} >> ~/.taskrc")
+        client.succeed("echo sync.encryption_secret=${password} >> ~/.taskrc")
+        client.succeed("task add hello world")
+        client.succeed("task sync")
+
+        # Useful for debugging
+        client.copy_from_vm("/root/.task", "client")
+        server.copy_from_vm("${cfg.dataDir}", "server")
+      '';
+  }
+)
diff --git a/nixos/tests/taskserver.nix b/nixos/tests/taskserver.nix
index 254bc8822f89d..caf48db77c44a 100644
--- a/nixos/tests/taskserver.nix
+++ b/nixos/tests/taskserver.nix
@@ -70,7 +70,7 @@ in {
         anotherOrganisation.users = [ "bob" ];
       };
 
-      specialisation.manual-config.configuration = {
+      specialisation.manual_config.configuration = {
         services.taskserver.pki.manual = {
           ca.cert = snakeOil.cacert;
           server.cert = snakeOil.cert;
@@ -81,7 +81,7 @@ in {
     };
 
     client1 = { pkgs, ... }: {
-      environment.systemPackages = [ pkgs.taskwarrior pkgs.gnutls ];
+      environment.systemPackages = [ pkgs.taskwarrior2 pkgs.gnutls ];
       users.users.alice.isNormalUser = true;
       users.users.bob.isNormalUser = true;
       users.users.foo.isNormalUser = true;
@@ -95,7 +95,7 @@ in {
     cfg = nodes.server.config.services.taskserver;
     portStr = toString cfg.listenPort;
     specialisations = "${nodes.server.system.build.toplevel}/specialisation";
-    newServerSystem = "${specialisations}/manual-config";
+    newServerSystem = "${specialisations}/manual_config";
     switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test";
   in ''
     from shlex import quote
diff --git a/nixos/tests/tayga.nix b/nixos/tests/tayga.nix
index 4aade67d74d0d..e3c57b7d58feb 100644
--- a/nixos/tests/tayga.nix
+++ b/nixos/tests/tayga.nix
@@ -55,10 +55,11 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
             "100.64.0.2/24"
           ];
           routes = [
-            { routeConfig = { Destination = "192.0.2.0/24"; Gateway = "100.64.0.1"; }; }
+            { Destination = "192.0.2.0/24"; Gateway = "100.64.0.1"; }
           ];
         };
       };
+      programs.mtr.enable = true;
     };
 
     # The router is configured with static IPv4 addresses towards the server
@@ -120,6 +121,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
             prefixLength = 96;
           };
         };
+        mappings = {
+          "192.0.2.42" = "2001:db8::2";
+        };
       };
     };
 
@@ -171,6 +175,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
             prefixLength = 96;
           };
         };
+        mappings = {
+          "192.0.2.42" = "2001:db8::2";
+        };
       };
     };
 
@@ -195,11 +202,11 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
             "2001:db8::2/64"
           ];
           routes = [
-            { routeConfig = { Destination = "64:ff9b::/96"; Gateway = "2001:db8::1"; }; }
+            { Destination = "64:ff9b::/96"; Gateway = "2001:db8::1"; }
           ];
         };
       };
-      environment.systemPackages = [ pkgs.mtr ];
+      programs.mtr.enable = true;
     };
   };
 
@@ -225,10 +232,16 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       with subtest("Wait for tayga"):
         router.wait_for_unit("tayga.service")
 
-      with subtest("Test ICMP"):
+      with subtest("Test ICMP server -> client"):
+        server.wait_until_succeeds("ping -c 3 192.0.2.42 >&2")
+
+      with subtest("Test ICMP and show a traceroute server -> client"):
+        server.wait_until_succeeds("mtr --show-ips --report-wide 192.0.2.42 >&2")
+
+      with subtest("Test ICMP client -> server"):
         client.wait_until_succeeds("ping -c 3 64:ff9b::100.64.0.2 >&2")
 
-      with subtest("Test ICMP and show a traceroute"):
+      with subtest("Test ICMP and show a traceroute client -> server"):
         client.wait_until_succeeds("mtr --show-ips --report-wide 64:ff9b::100.64.0.2 >&2")
 
       router.log(router.execute("systemd-analyze security tayga.service")[1])
diff --git a/nixos/tests/telegraf.nix b/nixos/tests/telegraf.nix
index c3cdb1645213a..2ccad7af3f555 100644
--- a/nixos/tests/telegraf.nix
+++ b/nixos/tests/telegraf.nix
@@ -19,6 +19,12 @@ import ./make-test-python.nix ({ pkgs, ...} : {
         timeout = "5s";
         data_format = "influx";
       };
+      inputs.ping = {
+        urls = ["127.0.0.1"];
+        count = 4;
+        interval = "10s";
+        timeout = 1.0;
+      };
       outputs.file.files = ["/tmp/metrics.out"];
       outputs.file.data_format = "influx";
     };
@@ -29,5 +35,6 @@ import ./make-test-python.nix ({ pkgs, ...} : {
 
     machine.wait_for_unit("telegraf.service")
     machine.wait_until_succeeds("grep -q example /tmp/metrics.out")
+    machine.wait_until_succeeds("grep -q ping /tmp/metrics.out")
   '';
 })
diff --git a/nixos/tests/teleport.nix b/nixos/tests/teleport.nix
index 2fb347155759a..0d0b9a713065a 100644
--- a/nixos/tests/teleport.nix
+++ b/nixos/tests/teleport.nix
@@ -9,8 +9,8 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
 let
   packages = with pkgs; {
     "default" = teleport;
-    "13" = teleport_13;
     "14" = teleport_14;
+    "15" = teleport_15;
   };
 
   minimal = package: {
diff --git a/nixos/tests/teleports.nix b/nixos/tests/teleports.nix
new file mode 100644
index 0000000000000..a4293f954a455
--- /dev/null
+++ b/nixos/tests/teleports.nix
@@ -0,0 +1,48 @@
+{ pkgs, lib, ... }:
+{
+  name = "teleports-standalone";
+  meta.maintainers = lib.teams.lomiri.members;
+
+  nodes.machine =
+    { config, pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+
+      services.xserver.enable = true;
+
+      environment = {
+        systemPackages = with pkgs.lomiri; [
+          suru-icon-theme
+          teleports
+        ];
+        variables = {
+          UITK_ICON_THEME = "suru";
+        };
+      };
+
+      i18n.supportedLocales = [ "all" ];
+
+      fonts.packages = with pkgs; [
+        # Intended font & helps with OCR
+        ubuntu-classic
+      ];
+    };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.wait_for_x()
+
+    with subtest("teleports launches"):
+        machine.execute("teleports >&2 &")
+        machine.wait_for_text(r"(TELEports|Phone Number)")
+        machine.screenshot("teleports_open")
+
+    machine.succeed("pkill -f teleports")
+
+    with subtest("teleports localisation works"):
+        machine.execute("env LANG=de_DE.UTF-8 teleports >&2 &")
+        machine.wait_for_text("Telefonnummer")
+        machine.screenshot("teleports_localised")
+  '';
+}
diff --git a/nixos/tests/terminal-emulators.nix b/nixos/tests/terminal-emulators.nix
index 3c1188ca88c99..9195f619c8d21 100644
--- a/nixos/tests/terminal-emulators.nix
+++ b/nixos/tests/terminal-emulators.nix
@@ -42,7 +42,7 @@ let tests = {
 
       germinal.pkg = p: p.germinal;
 
-      gnome-terminal.pkg = p: p.gnome.gnome-terminal;
+      gnome-terminal.pkg = p: p.gnome-terminal;
 
       guake.pkg = p: p.guake;
       guake.cmd = "SHELL=$command guake --show";
@@ -62,6 +62,9 @@ let tests = {
       konsole.pkg = p: p.plasma5Packages.konsole;
 
       lomiri-terminal-app.pkg = p: p.lomiri.lomiri-terminal-app;
+      # after recent Mesa change, borked software rendering config under x86_64 icewm?
+      # BGR colour display on x86_64, RGB on aarch64
+      lomiri-terminal-app.colourTest = false;
 
       lxterminal.pkg = p: p.lxterminal;
 
@@ -117,6 +120,8 @@ let tests = {
       xfce4-terminal.pkg = p: p.xfce.xfce4-terminal;
 
       xterm.pkg = p: p.xterm;
+
+      zutty.pkg = p: p.zutty;
     };
 in mapAttrs (name: { pkg, executable ? name, cmd ? "SHELL=$command ${executable}", colourTest ? true, pinkValue ? "#FF0087", kill ? false }: makeTest
 {
diff --git a/nixos/tests/prometheus.nix b/nixos/tests/thanos.nix
index 0111273893775..5bdfab7b3573f 100644
--- a/nixos/tests/prometheus.nix
+++ b/nixos/tests/thanos.nix
@@ -212,8 +212,6 @@ in import ./make-test-python.nix {
   };
 
   testScript = { nodes, ... } : ''
-    import json
-
     # Before starting the other machines we first make sure that our S3 service is online
     # and has a bucket added for thanos:
     s3.start()
@@ -289,61 +287,5 @@ in import ./make-test-python.nix {
         + "jq .thanos.labels.some_label | "
         + "grep 'required by thanos'"
     )
-
-    # Check if switching to a NixOS configuration that changes the prometheus
-    # configuration reloads (instead of restarts) prometheus before the switch
-    # finishes successfully:
-    with subtest("config change reloads prometheus"):
-        # We check if prometheus has finished reloading by looking for the message
-        # "Completed loading of configuration file" in the journal between the start
-        # and finish of switching to the new NixOS configuration.
-        #
-        # To mark the start we record the journal cursor before starting the switch:
-        cursor_before_switching = json.loads(
-            prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
-        )["__CURSOR"]
-
-        # Now we switch:
-        prometheus_config_change = prometheus.succeed(
-            "readlink /run/current-system/specialisation/prometheus-config-change"
-        ).strip()
-        prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
-
-        # Next we retrieve all logs since the start of switching:
-        logs_after_starting_switching = prometheus.succeed(
-            """
-              journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
-            """.format(
-                cursor_before_switching=cursor_before_switching
-            )
-        )
-
-        # Finally we check if the message "Completed loading of configuration file"
-        # occurs before the "finished switching to system configuration" message:
-        finished_switching_msg = (
-            "finished switching to system configuration " + prometheus_config_change
-        )
-        reloaded_before_switching_finished = False
-        finished_switching = False
-        for log_line in logs_after_starting_switching.split("\n"):
-            msg = json.loads(log_line)["MESSAGE"]
-            if "Completed loading of configuration file" in msg:
-                reloaded_before_switching_finished = True
-            if msg == finished_switching_msg:
-                finished_switching = True
-                break
-
-        assert reloaded_before_switching_finished
-        assert finished_switching
-
-        # Check if the reloaded config includes the new s3-node_exporter job:
-        prometheus.succeed(
-          """
-            curl -sf http://127.0.0.1:${toString queryPort}/api/v1/status/config \
-              | jq -r .data.yaml \
-              | yq '.scrape_configs | any(.job_name == "s3-node_exporter")' \
-              | grep true
-          """
-        )
   '';
 }
diff --git a/nixos/tests/tigervnc.nix b/nixos/tests/tigervnc.nix
index ed575682d9338..79c4f19178d5e 100644
--- a/nixos/tests/tigervnc.nix
+++ b/nixos/tests/tigervnc.nix
@@ -7,7 +7,7 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
 makeTest {
   name = "tigervnc";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ lheckemann ];
+    maintainers = [ ];
   };
 
   nodes = {
@@ -38,16 +38,18 @@ makeTest {
     server.succeed("Xvnc -geometry 720x576 :1 -PasswordFile vncpasswd >&2 &")
     server.wait_until_succeeds("nc -z localhost 5901", timeout=10)
     server.succeed("DISPLAY=:1 xwininfo -root | grep 720x576")
-    server.execute("DISPLAY=:1 display -size 360x200 -font sans -gravity south label:'HELLO VNC WORLD' >&2 &")
+    server.execute("DISPLAY=:1 display -size 360x200 -font sans -gravity south label:'HELLO VNC' >&2 &")
 
     client.wait_for_x()
     client.execute("vncviewer server:1 -PasswordFile vncpasswd >&2 &")
     client.wait_for_window(r"VNC")
     client.screenshot("screenshot")
     text = client.get_screen_text()
+
     # Displayed text
-    assert 'HELLO VNC WORLD' in text
+    assert 'HELLO VNC' in text
     # Client window title
-    assert 'TigerVNC' in text
+    # get_screen_text can't get correct string from screenshot
+    # assert 'TigerVNC' in text
   '';
 }
diff --git a/nixos/tests/tika.nix b/nixos/tests/tika.nix
new file mode 100644
index 0000000000000..61a3a6ad22aed
--- /dev/null
+++ b/nixos/tests/tika.nix
@@ -0,0 +1,21 @@
+{ lib, ... }:
+
+{
+  name = "tika-server";
+
+  nodes = {
+    machine = { pkgs, ... }: {
+      services.tika = {
+        enable = true;
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("tika.service")
+    machine.wait_for_open_port(9998)
+  '';
+
+  meta.maintainers = [ lib.maintainers.drupol ];
+}
diff --git a/nixos/tests/timezone.nix b/nixos/tests/timezone.nix
index 7fc9a5058eee9..5d0318e33daab 100644
--- a/nixos/tests/timezone.nix
+++ b/nixos/tests/timezone.nix
@@ -1,6 +1,6 @@
 import ./make-test-python.nix ({ pkgs, ...} : {
   name = "timezone";
-  meta.maintainers = with pkgs.lib.maintainers; [ lheckemann ];
+  meta.maintainers = with pkgs.lib.maintainers; [ ];
 
   nodes = {
     node_eutz = { pkgs, ... }: {
diff --git a/nixos/tests/tinywl.nix b/nixos/tests/tinywl.nix
index 9199866b57af7..2dc354812a75e 100644
--- a/nixos/tests/tinywl.nix
+++ b/nixos/tests/tinywl.nix
@@ -16,7 +16,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
         systemPackages = with pkgs; [ tinywl foot wayland-utils ];
       };
 
-      hardware.opengl.enable = true;
+      hardware.graphics.enable = true;
 
       # Automatically start TinyWL when logging in on tty1:
       programs.bash.loginShellInit = ''
diff --git a/nixos/tests/tomcat.nix b/nixos/tests/tomcat.nix
index df5cb033b78f0..c5e6e65ac600e 100644
--- a/nixos/tests/tomcat.nix
+++ b/nixos/tests/tomcat.nix
@@ -5,23 +5,24 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
   nodes.machine = { pkgs, ... }: {
     services.tomcat = {
       enable = true;
+      port = 8001;
       axis2.enable = true;
     };
   };
 
   testScript = ''
     machine.wait_for_unit("tomcat.service")
-    machine.wait_for_open_port(8080)
+    machine.wait_for_open_port(8001)
     machine.wait_for_file("/var/tomcat/webapps/examples");
 
     machine.succeed(
-        "curl -sS --fail http://localhost:8080/examples/servlets/servlet/HelloWorldExample | grep 'Hello World!'"
+        "curl -sS --fail http://localhost:8001/examples/servlets/servlet/HelloWorldExample | grep 'Hello World!'"
     )
     machine.succeed(
-        "curl -sS --fail http://localhost:8080/examples/jsp/jsp2/simpletag/hello.jsp | grep 'Hello, world!'"
+        "curl -sS --fail http://localhost:8001/examples/jsp/jsp2/simpletag/hello.jsp | grep 'Hello, world!'"
     )
     machine.succeed(
-        "curl -sS --fail http://localhost:8080/axis2/axis2-web/HappyAxis.jsp | grep 'Found Axis2'"
+        "curl -sS --fail http://localhost:8001/axis2/axis2-web/HappyAxis.jsp | grep 'Found Axis2'"
     )
   '';
 })
diff --git a/nixos/tests/traefik.nix b/nixos/tests/traefik.nix
index ce808e6ec95a0..f26b79a0fa4d6 100644
--- a/nixos/tests/traefik.nix
+++ b/nixos/tests/traefik.nix
@@ -23,7 +23,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
             "traefik.http.routers.nginx.rule=Host(`nginx.traefik.test`)"
           ];
           image = "nginx-container";
-          imageFile = pkgs.dockerTools.examples.nginx;
+          imageStream = pkgs.dockerTools.examples.nginxStream;
         };
       };
 
diff --git a/nixos/tests/turbovnc-headless-server.nix b/nixos/tests/turbovnc-headless-server.nix
index a155f9f907b25..c4384a4fc95c9 100644
--- a/nixos/tests/turbovnc-headless-server.nix
+++ b/nixos/tests/turbovnc-headless-server.nix
@@ -7,7 +7,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
   nodes.machine = { pkgs, ... }: {
 
     environment.systemPackages = with pkgs; [
-      glxinfo
+      mesa-demos
       procps # for `pkill`, `pidof` in the test
       scrot # for screenshotting Xorg
       turbovnc
@@ -24,11 +24,14 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     };
 
     # So that we can ssh into the VM, see e.g.
-    # http://blog.patapon.info/nixos-local-vm/#accessing-the-vm-with-ssh
+    # https://nixos.org/manual/nixos/stable/#sec-nixos-test-port-forwarding
     services.openssh.enable = true;
-    services.openssh.settings.PermitRootLogin = "yes";
-    users.extraUsers.root.password = "";
     users.mutableUsers = false;
+    # `test-instrumentation.nix` already sets an empty root password.
+    # The following have to all be set to allow an empty SSH login password.
+    services.openssh.settings.PermitRootLogin = "yes";
+    services.openssh.settings.PermitEmptyPasswords = "yes";
+    security.pam.services.sshd.allowNullPassword = true; # the default `UsePam yes` makes this necessary
   };
 
   testScript = ''
@@ -124,7 +127,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         machine.wait_until_succeeds("test -f /tmp/glxgears-should-fail.stderr")
         wait_until_terminated_or_succeeds(
             termination_check_shell_command="pidof glxgears",
-            success_check_shell_command="grep 'libGL error: failed to load driver: swrast' /tmp/glxgears-should-fail.stderr",
+            success_check_shell_command="grep 'MESA-LOADER: failed to open swrast' /tmp/glxgears-should-fail.stderr",
             get_detail_message_fn=lambda: "Contents of /tmp/glxgears-should-fail.stderr:\n"
             + machine.succeed("cat /tmp/glxgears-should-fail.stderr"),
         )
diff --git a/nixos/tests/turn-rs.nix b/nixos/tests/turn-rs.nix
new file mode 100644
index 0000000000000..750a141c224a3
--- /dev/null
+++ b/nixos/tests/turn-rs.nix
@@ -0,0 +1,65 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+  {
+    name = "turn-rs";
+
+    nodes = {
+      server = {
+        virtualisation.vlans = [ 1 ];
+
+        networking = {
+          useNetworkd = true;
+          useDHCP = false;
+          firewall.enable = false;
+        };
+
+        systemd.network.networks."01-eth1" = {
+          name = "eth1";
+          networkConfig.Address = "10.0.0.1/24";
+        };
+
+        services.turn-rs = {
+          enable = true;
+          secretFile = pkgs.writeText "secret" ''
+            USER_1_CREDS="foobar"
+          '';
+          settings = {
+            turn = {
+              realm = "localhost";
+              interfaces = [
+                {
+                  transport = "udp";
+                  bind = "127.0.0.1:3478";
+                  external = "127.0.0.1:3478";
+                }
+                {
+                  transport = "tcp";
+                  bind = "127.0.0.1:3478";
+                  external = "127.0.0.1:3478";
+                }
+              ];
+            };
+
+            auth.static_credentials.user1 = "$USER_1_CREDS";
+          };
+        };
+      };
+    };
+
+    testScript = # python
+      ''
+        import json
+
+        start_all()
+        server.wait_for_unit('turn-rs.service')
+        server.wait_for_open_port(3000, "127.0.0.1")
+
+        info = server.succeed('curl http://localhost:3000/info')
+        jsonInfo = json.loads(info)
+        assert len(jsonInfo['interfaces']) == 2, f'Interfaces doesn\'t contain two entries:\n{json.dumps(jsonInfo, indent=2)}'
+
+        config = server.succeed('cat /run/turn-rs/config.toml')
+        assert 'foobar' in config, f'Secrets are not properly injected:\n{config}'
+      '';
+  }
+)
diff --git a/nixos/tests/udisks2.nix b/nixos/tests/udisks2.nix
index 8cc148750c7bc..b934f0b951562 100644
--- a/nixos/tests/udisks2.nix
+++ b/nixos/tests/udisks2.nix
@@ -2,6 +2,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
 
 let
 
+  # FIXME: 404s
   stick = pkgs.fetchurl {
     url = "https://nixos.org/~eelco/nix/udisks-test.img.xz";
     sha256 = "0was1xgjkjad91nipzclaz5biv3m4b2nk029ga6nk7iklwi19l8b";
@@ -12,7 +13,7 @@ in
 {
   name = "udisks2";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eelco ];
+    maintainers = [ ];
   };
 
   nodes.machine =
diff --git a/nixos/tests/user-activation-scripts.nix b/nixos/tests/user-activation-scripts.nix
index ebd96b019e920..2e1840a8460f6 100644
--- a/nixos/tests/user-activation-scripts.nix
+++ b/nixos/tests/user-activation-scripts.nix
@@ -3,6 +3,7 @@ import ./make-test-python.nix ({ lib, ... }: {
   meta = with lib.maintainers; { maintainers = [ chkno ]; };
 
   nodes.machine = {
+    system.switch.enable = true;
     system.userActivationScripts.foo = "mktemp ~/user-activation-ran.XXXXXX";
     users.users.alice = {
       initialPassword = "pass1";
diff --git a/nixos/tests/user-home-mode.nix b/nixos/tests/user-home-mode.nix
index 070cb0b75cc9d..2d6d1af3f391b 100644
--- a/nixos/tests/user-home-mode.nix
+++ b/nixos/tests/user-home-mode.nix
@@ -12,6 +12,12 @@ import ./make-test-python.nix ({ lib, ... }: {
       isNormalUser = true;
       homeMode = "750";
     };
+    users.users.carol = {
+      initialPassword = "pass3";
+      isNormalUser = true;
+      createHome = true;
+      home = "/users/carol";
+    };
   };
 
   testScript = ''
@@ -23,5 +29,7 @@ import ./make-test-python.nix ({ lib, ... }: {
     machine.send_chars("pass1\n")
     machine.succeed('[ "$(stat -c %a /home/alice)" == "700" ]')
     machine.succeed('[ "$(stat -c %a /home/bob)" == "750" ]')
+    machine.succeed('[ "$(stat -c %a /users)" == "755" ]')
+    machine.succeed('[ "$(stat -c %a /users/carol)" == "700" ]')
   '';
 })
diff --git a/nixos/tests/userborn-immutable-etc.nix b/nixos/tests/userborn-immutable-etc.nix
new file mode 100644
index 0000000000000..e95fba23063ba
--- /dev/null
+++ b/nixos/tests/userborn-immutable-etc.nix
@@ -0,0 +1,70 @@
+{ lib, ... }:
+
+let
+  normaloHashedPassword = "$y$j9T$IEWqhKtWg.r.8fVkSEF56.$iKNxdMC6hOAQRp6eBtYvBk4c7BGpONXeZMqc8I/LM46";
+
+  common = {
+    services.userborn.enable = true;
+    boot.initrd.systemd.enable = true;
+    system.etc.overlay = {
+      enable = true;
+      mutable = false;
+    };
+  };
+in
+
+{
+
+  name = "userborn-immutable-etc";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine =
+    { config, ... }:
+    {
+      imports = [ common ];
+
+      users = {
+        users = {
+          normalo = {
+            isNormalUser = true;
+            hashedPassword = normaloHashedPassword;
+          };
+        };
+      };
+
+      specialisation.new-generation = {
+        inheritParentConfig = false;
+        configuration = {
+          nixpkgs = {
+            inherit (config.nixpkgs) hostPlatform;
+          };
+          imports = [ common ];
+
+          users.users = {
+            new-normalo = {
+              isNormalUser = true;
+            };
+          };
+        };
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("userborn.service")
+
+    with subtest("normalo user is created"):
+      assert "${normaloHashedPassword}" in machine.succeed("getent shadow normalo"), "normalo user password is not correct"
+
+
+    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+
+    with subtest("normalo user is disabled"):
+      print(machine.succeed("getent shadow normalo"))
+      assert "!*" in machine.succeed("getent shadow normalo"), "normalo user is not disabled"
+
+    with subtest("new-normalo user is created after switching to new generation"):
+      print(machine.succeed("getent passwd new-normalo"))
+  '';
+}
diff --git a/nixos/tests/userborn-immutable-users.nix b/nixos/tests/userborn-immutable-users.nix
new file mode 100644
index 0000000000000..887d2d312eb7c
--- /dev/null
+++ b/nixos/tests/userborn-immutable-users.nix
@@ -0,0 +1,75 @@
+{ lib, ... }:
+
+let
+  normaloHashedPassword = "$y$j9T$IEWqhKtWg.r.8fVkSEF56.$iKNxdMC6hOAQRp6eBtYvBk4c7BGpONXeZMqc8I/LM46";
+
+  common = {
+    services.userborn.enable = true;
+    users.mutableUsers = false;
+  };
+in
+
+{
+
+  name = "userborn-immutable-users";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine =
+    { config, ... }:
+    {
+      imports = [ common ];
+
+      users = {
+        users = {
+          normalo = {
+            isNormalUser = true;
+            hashedPassword = normaloHashedPassword;
+          };
+        };
+      };
+
+      specialisation.new-generation = {
+        inheritParentConfig = false;
+        configuration = {
+          nixpkgs = {
+            inherit (config.nixpkgs) hostPlatform;
+          };
+          imports = [ common ];
+
+          users.users = {
+            new-normalo = {
+              isNormalUser = true;
+            };
+          };
+        };
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("userborn.service")
+
+    with subtest("normalo user is created"):
+      assert "${normaloHashedPassword}" in machine.succeed("getent shadow normalo"), "normalo user password is not correct"
+
+    with subtest("Fail to add new user manually"):
+      machine.fail("useradd manual-normalo")
+
+    with subtest("Fail to add delete user manually"):
+      machine.fail("userdel normalo")
+
+
+    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+
+    with subtest("normalo user is disabled"):
+      print(machine.succeed("getent shadow normalo"))
+      assert "!*" in machine.succeed("getent shadow normalo"), "normalo user is not disabled"
+
+    with subtest("new-normalo user is created after switching to new generation"):
+      print(machine.succeed("getent passwd new-normalo"))
+
+    with subtest("Still fail to add new user manually"):
+      machine.fail("useradd again-normalo")
+  '';
+}
diff --git a/nixos/tests/userborn-mutable-etc.nix b/nixos/tests/userborn-mutable-etc.nix
new file mode 100644
index 0000000000000..6199b84ce71df
--- /dev/null
+++ b/nixos/tests/userborn-mutable-etc.nix
@@ -0,0 +1,70 @@
+{ lib, ... }:
+
+let
+  normaloHashedPassword = "$y$j9T$IEWqhKtWg.r.8fVkSEF56.$iKNxdMC6hOAQRp6eBtYvBk4c7BGpONXeZMqc8I/LM46";
+
+  common = {
+    services.userborn.enable = true;
+    boot.initrd.systemd.enable = true;
+    system.etc.overlay = {
+      enable = true;
+      mutable = true;
+    };
+  };
+in
+
+{
+
+  name = "userborn-mutable-etc";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine =
+    { config, ... }:
+    {
+      imports = [ common ];
+
+      users = {
+        users = {
+          normalo = {
+            isNormalUser = true;
+            hashedPassword = normaloHashedPassword;
+          };
+        };
+      };
+
+      specialisation.new-generation = {
+        inheritParentConfig = false;
+        configuration = {
+          nixpkgs = {
+            inherit (config.nixpkgs) hostPlatform;
+          };
+          imports = [ common ];
+
+          users.users = {
+            new-normalo = {
+              isNormalUser = true;
+            };
+          };
+        };
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("userborn.service")
+
+    with subtest("normalo user is created"):
+      assert "${normaloHashedPassword}" in machine.succeed("getent shadow normalo"), "normalo user password is not correct"
+
+
+    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+
+    with subtest("normalo user is disabled"):
+      print(machine.succeed("getent shadow normalo"))
+      assert "!*" in machine.succeed("getent shadow normalo"), "normalo user is not disabled"
+
+    with subtest("new-normalo user is created after switching to new generation"):
+      print(machine.succeed("getent passwd new-normalo"))
+  '';
+}
diff --git a/nixos/tests/userborn-mutable-users.nix b/nixos/tests/userborn-mutable-users.nix
new file mode 100644
index 0000000000000..e2b9c3df49539
--- /dev/null
+++ b/nixos/tests/userborn-mutable-users.nix
@@ -0,0 +1,76 @@
+{ lib, ... }:
+
+let
+  normaloHashedPassword = "$y$j9T$IEWqhKtWg.r.8fVkSEF56.$iKNxdMC6hOAQRp6eBtYvBk4c7BGpONXeZMqc8I/LM46";
+
+  common = {
+    services.userborn.enable = true;
+    users.mutableUsers = true;
+  };
+in
+
+{
+
+  name = "userborn-mutable-users";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine =
+    { config, ... }:
+    {
+      imports = [ common ];
+
+      users = {
+        mutableUsers = true;
+        users = {
+          normalo = {
+            isNormalUser = true;
+            hashedPassword = normaloHashedPassword;
+          };
+        };
+      };
+
+      specialisation.new-generation = {
+        inheritParentConfig = false;
+        configuration = {
+          nixpkgs = {
+            inherit (config.nixpkgs) hostPlatform;
+          };
+          imports = [ common ];
+
+          users.users = {
+            new-normalo = {
+              isNormalUser = true;
+            };
+          };
+        };
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("userborn.service")
+
+    with subtest("normalo user is created"):
+      assert 1000 == int(machine.succeed("id --user normalo")), "normalo user doesn't have UID 1000"
+      assert "${normaloHashedPassword}" in machine.succeed("getent shadow normalo"), "normalo user password is not correct"
+
+    with subtest("Add new user manually"):
+      machine.succeed("useradd manual-normalo")
+      assert 1001 == int(machine.succeed("id --user manual-normalo")), "manual-normalo user doesn't have UID 1001"
+
+    with subtest("Delete manual--normalo user manually"):
+      machine.succeed("userdel manual-normalo")
+
+
+    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+
+    with subtest("normalo user is disabled"):
+      print(machine.succeed("getent shadow normalo"))
+      assert "!*" in machine.succeed("getent shadow normalo"), "normalo user is not disabled"
+
+    with subtest("new-normalo user is created after switching to new generation"):
+      print(machine.succeed("getent passwd new-normalo"))
+      assert 1001 == int(machine.succeed("id --user new-normalo")), "new-normalo user doesn't have UID 1001"
+  '';
+}
diff --git a/nixos/tests/userborn.nix b/nixos/tests/userborn.nix
new file mode 100644
index 0000000000000..2c4f44b93ca54
--- /dev/null
+++ b/nixos/tests/userborn.nix
@@ -0,0 +1,135 @@
+{ lib, ... }:
+
+let
+  # All passwords are "test"
+  rootHashedPasswordFile = "$y$j9T$6ueoTO5y7vvFsGvpQJEEa.$vubxgBiMnkTCtRtPD3hNiZHa7Nm1WsJeE9QomYqSRXB";
+  updatedRootHashedPassword = "$y$j9T$pBCO9N1FRF1rSl6V15n9n/$1JmRLEYPO7TRCx43cvLO19u59WA/oqTEhmSR4wrhzr.";
+
+  normaloPassword = "test";
+  updatedNormaloHashedPassword = "$y$j9T$IEWqhKtWg.r.8fVkSEF56.$iKNxdMC6hOAQRp6eBtYvBk4c7BGpONXeZMqc8I/LM46";
+
+  sysuserInitialHashedPassword = "$y$j9T$Kb6jGrk41hudTZpNjazf11$iw7fZXrewC6JxRaGPz7/gPXDZ.Z1VWsupvy81Hi1XiD";
+  updatedSysuserInitialHashedPassword = "$y$j9T$kUBVhgOdSjymSfwfRVja70$eqCwWzVsz0fI0Uc6JsdD2CYMCpfJcErqnIqva2JCi1D";
+
+  newNormaloHashedPassword = "$y$j9T$UFBMWbGjjVola0YE9YCcV/$jRSi5S6lzkcifbuqjMcyXLTwgOGm9BTQk/G/jYaxroC";
+in
+
+{
+
+  name = "userborn";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = {
+    services.userborn.enable = true;
+
+    # Read this password file at runtime from outside the Nix store.
+    environment.etc."rootpw.secret".text = rootHashedPasswordFile;
+
+    users = {
+      users = {
+        root = {
+          # Override the empty root password set by the test instrumentation.
+          hashedPasswordFile = lib.mkForce "/etc/rootpw.secret";
+        };
+        normalo = {
+          isNormalUser = true;
+          password = normaloPassword;
+        };
+        sysuser = {
+          isSystemUser = true;
+          group = "sysusers";
+          initialHashedPassword = sysuserInitialHashedPassword;
+        };
+      };
+      groups = {
+        sysusers = { };
+      };
+    };
+
+    specialisation.new-generation.configuration = {
+      users = {
+        users = {
+          root = {
+            # Forcing this to null simulates removing the config value in a new
+            # generation.
+            hashedPasswordFile = lib.mkOverride 9 null;
+            hashedPassword = updatedRootHashedPassword;
+          };
+          normalo = {
+            hashedPassword = updatedNormaloHashedPassword;
+          };
+          sysuser = {
+            initialHashedPassword = lib.mkForce updatedSysuserInitialHashedPassword;
+          };
+          new-normalo = {
+            isNormalUser = true;
+            hashedPassword = newNormaloHashedPassword;
+          };
+        };
+        groups = {
+          new-group = { };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("userborn.service")
+
+    with subtest("Correct mode on the password files"):
+      assert machine.succeed("stat -c '%a' /etc/passwd") == "644\n"
+      assert machine.succeed("stat -c '%a' /etc/group") == "644\n"
+      assert machine.succeed("stat -c '%a' /etc/shadow") == "0\n"
+
+    with subtest("root user has correct password"):
+      print(machine.succeed("getent passwd root"))
+      assert "${rootHashedPasswordFile}" in machine.succeed("getent shadow root"), "root user password is not correct"
+
+    with subtest("normalo user is created"):
+      print(machine.succeed("getent passwd normalo"))
+      assert 1000 <= int(machine.succeed("id --user normalo")), "normalo user doesn't have a normal UID"
+      assert machine.succeed("stat -c '%U' /home/normalo") == "normalo\n"
+
+    with subtest("system user is created with correct password"):
+      print(machine.succeed("getent passwd sysuser"))
+      assert 1000 > int(machine.succeed("id --user sysuser")), "sysuser user doesn't have a system UID"
+      assert "${sysuserInitialHashedPassword}" in machine.succeed("getent shadow sysuser"), "system user password is not correct"
+
+    with subtest("sysusers group is created"):
+      print(machine.succeed("getent group sysusers"))
+
+    with subtest("Check files"):
+      print(machine.succeed("grpck -r"))
+      print(machine.succeed("pwck -r"))
+
+
+    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+
+    with subtest("root user password is updated"):
+      print(machine.succeed("getent passwd root"))
+      assert "${updatedRootHashedPassword}" in machine.succeed("getent shadow root"), "root user password is not updated"
+
+    with subtest("normalo user password is updated"):
+      print(machine.succeed("getent passwd normalo"))
+      assert "${updatedNormaloHashedPassword}" in machine.succeed("getent shadow normalo"), "normalo user password is not updated"
+
+    with subtest("system user password is NOT updated"):
+      print(machine.succeed("getent passwd sysuser"))
+      assert "${sysuserInitialHashedPassword}" in machine.succeed("getent shadow sysuser"), "sysuser user password is not updated"
+
+    with subtest("new-normalo user is created after switching to new generation"):
+      print(machine.succeed("getent passwd new-normalo"))
+      assert 1000 <= int(machine.succeed("id --user new-normalo")), "new-normalo user doesn't have a normal UID"
+      assert machine.succeed("stat -c '%U' /home/new-normalo") == "new-normalo\n"
+      assert "${newNormaloHashedPassword}" in machine.succeed("getent shadow new-normalo"), "new-normalo user password is not correct"
+
+    with subtest("new-group group is created after switching to new generation"):
+      print(machine.succeed("getent group new-group"))
+
+    with subtest("Check files"):
+      print(machine.succeed("grpck -r"))
+      print(machine.succeed("pwck -r"))
+  '';
+}
diff --git a/nixos/tests/vaultwarden.nix b/nixos/tests/vaultwarden.nix
index 28ff170e36107..b51a147be99d3 100644
--- a/nixos/tests/vaultwarden.nix
+++ b/nixos/tests/vaultwarden.nix
@@ -1,38 +1,94 @@
-{ system ? builtins.currentSystem
-, config ? { }
-, pkgs ? import ../.. { inherit system config; }
-}:
-
 # These tests will:
 #  * Set up a vaultwarden server
-#  * Have Firefox use the web vault to create an account, log in, and save a password to the valut
+#  * Have Firefox use the web vault to create an account, log in, and save a password to the vault
 #  * Have the bw cli log in and read that password from the vault
 #
 # Note that Firefox must be on the same machine as the server for WebCrypto APIs to be available (or HTTPS must be configured)
 #
 # The same tests should work without modification on the official bitwarden server, if we ever package that.
 
-with import ../lib/testing-python.nix { inherit system pkgs; };
-with pkgs.lib;
 let
-  backends = [ "sqlite" "mysql" "postgresql" ];
-
-  dbPassword = "please_dont_hack";
-
-  userEmail = "meow@example.com";
-  userPassword = "also_super_secret_ZJWpBKZi668QGt"; # Must be complex to avoid interstitial warning on the signup page
-
-  storedPassword = "seeeecret";
+  makeVaultwardenTest = name: {
+    backend ? name,
+    withClient ? true,
+    testScript ? null,
+  }: import ./make-test-python.nix ({ lib, pkgs, ...}: let
+    dbPassword = "please_dont_hack";
+    userEmail = "meow@example.com";
+    userPassword = "also_super_secret_ZJWpBKZi668QGt"; # Must be complex to avoid interstitial warning on the signup page
+    storedPassword = "seeeecret";
+
+    testRunner = pkgs.writers.writePython3Bin "test-runner" {
+      libraries = [ pkgs.python3Packages.selenium ];
+      flakeIgnore = [  "E501" ];
+    } ''
+
+      from selenium.webdriver.common.by import By
+      from selenium.webdriver import Firefox
+      from selenium.webdriver.firefox.options import Options
+      from selenium.webdriver.support.ui import WebDriverWait
+      from selenium.webdriver.support import expected_conditions as EC
+
+      options = Options()
+      options.add_argument('--headless')
+      driver = Firefox(options=options)
+
+      driver.implicitly_wait(20)
+      driver.get('http://localhost:8080/#/register')
+
+      wait = WebDriverWait(driver, 10)
+
+      wait.until(EC.title_contains("Vaultwarden Web"))
+
+      driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_email').send_keys(
+          '${userEmail}'
+      )
+      driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_name').send_keys(
+          'A Cat'
+      )
+      driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_master-password').send_keys(
+          '${userPassword}'
+      )
+      driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_confirm-master-password').send_keys(
+          '${userPassword}'
+      )
+      if driver.find_element(By.CSS_SELECTOR, 'input#checkForBreaches').is_selected():
+          driver.find_element(By.CSS_SELECTOR, 'input#checkForBreaches').click()
+
+      driver.find_element(By.XPATH, "//button[contains(., 'Create account')]").click()
+
+      wait.until_not(EC.title_contains("Create account"))
+
+      driver.find_element(By.XPATH, "//button[contains(., 'Continue')]").click()
+
+      driver.find_element(By.CSS_SELECTOR, 'input#login_input_master-password').send_keys(
+          '${userPassword}'
+      )
+      driver.find_element(By.XPATH, "//button[contains(., 'Log in')]").click()
+
+      wait.until(EC.title_contains("Vaults"))
+
+      driver.find_element(By.XPATH, "//button[contains(., 'New item')]").click()
+
+      driver.find_element(By.CSS_SELECTOR, 'input#name').send_keys(
+          'secrets'
+      )
+      driver.find_element(By.CSS_SELECTOR, 'input#loginPassword').send_keys(
+          '${storedPassword}'
+      )
+
+      driver.find_element(By.XPATH, "//button[contains(., 'Save')]").click()
+    '';
+  in {
+    inherit name;
 
-  makeVaultwardenTest = backend: makeTest {
-    name = "vaultwarden-${backend}";
     meta = {
-      maintainers = with pkgs.lib.maintainers; [ jjjollyjim ];
+      maintainers = with pkgs.lib.maintainers; [ dotlambda SuperSandro2000 ];
     };
 
     nodes = {
-      server = { pkgs, ... }:
-        let backendConfig = {
+      server = { pkgs, ... }: lib.mkMerge [
+        {
           mysql = {
             services.mysql = {
               enable = true;
@@ -53,119 +109,53 @@ let
           postgresql = {
             services.postgresql = {
               enable = true;
-              initialScript = pkgs.writeText "postgresql-init.sql" ''
-                CREATE USER bitwardenuser WITH PASSWORD '${dbPassword}';
-                CREATE DATABASE bitwarden WITH OWNER bitwardenuser;
-              '';
+              ensureDatabases = [ "vaultwarden" ];
+              ensureUsers = [{
+                name = "vaultwarden";
+                ensureDBOwnership = true;
+              }];
             };
 
-            services.vaultwarden.config.databaseUrl = "postgresql://bitwardenuser:${dbPassword}@localhost/bitwarden";
+            services.vaultwarden.config.databaseUrl = "postgresql:///vaultwarden?host=/run/postgresql";
 
             systemd.services.vaultwarden.after = [ "postgresql.service" ];
           };
 
-          sqlite = { };
-        };
-        in
-        mkMerge [
-          backendConfig.${backend}
-          {
-            services.vaultwarden = {
-              enable = true;
-              dbBackend = backend;
-              config = {
-                rocketAddress = "0.0.0.0";
-                rocketPort = 80;
-              };
-            };
+          sqlite = {
+            services.vaultwarden.backupDir = "/srv/backups/vaultwarden";
+
+            environment.systemPackages = [ pkgs.sqlite ];
+          };
+        }.${backend}
 
-            networking.firewall.allowedTCPPorts = [ 80 ];
-
-            environment.systemPackages =
-              let
-                testRunner = pkgs.writers.writePython3Bin "test-runner"
-                  {
-                    libraries = [ pkgs.python3Packages.selenium ];
-                    flakeIgnore = [
-                      "E501"
-                    ];
-                  } ''
-
-                  from selenium.webdriver.common.by import By
-                  from selenium.webdriver import Firefox
-                  from selenium.webdriver.firefox.options import Options
-                  from selenium.webdriver.support.ui import WebDriverWait
-                  from selenium.webdriver.support import expected_conditions as EC
-
-                  options = Options()
-                  options.add_argument('--headless')
-                  driver = Firefox(options=options)
-
-                  driver.implicitly_wait(20)
-                  driver.get('http://localhost/#/register')
-
-                  wait = WebDriverWait(driver, 10)
-
-                  wait.until(EC.title_contains("Vaultwarden Web"))
-
-                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_email').send_keys(
-                      '${userEmail}'
-                  )
-                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_name').send_keys(
-                      'A Cat'
-                  )
-                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_master-password').send_keys(
-                      '${userPassword}'
-                  )
-                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_confirm-master-password').send_keys(
-                      '${userPassword}'
-                  )
-                  if driver.find_element(By.CSS_SELECTOR, 'input#checkForBreaches').is_selected():
-                      driver.find_element(By.CSS_SELECTOR, 'input#checkForBreaches').click()
-
-                  driver.find_element(By.XPATH, "//button[contains(., 'Create account')]").click()
-
-                  wait.until_not(EC.title_contains("Create account"))
-
-                  driver.find_element(By.XPATH, "//button[contains(., 'Continue')]").click()
-
-                  driver.find_element(By.CSS_SELECTOR, 'input#login_input_master-password').send_keys(
-                      '${userPassword}'
-                  )
-                  driver.find_element(By.XPATH, "//button[contains(., 'Log in')]").click()
-
-                  wait.until(EC.title_contains("Vaults"))
-
-                  driver.find_element(By.XPATH, "//button[contains(., 'New item')]").click()
-
-                  driver.find_element(By.CSS_SELECTOR, 'input#name').send_keys(
-                      'secrets'
-                  )
-                  driver.find_element(By.CSS_SELECTOR, 'input#loginPassword').send_keys(
-                      '${storedPassword}'
-                  )
-
-                  driver.find_element(By.XPATH, "//button[contains(., 'Save')]").click()
-                '';
-              in
-              [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
-
-          }
-        ];
-
-      client = { pkgs, ... }:
         {
-          environment.systemPackages = [ pkgs.bitwarden-cli ];
-        };
+          services.vaultwarden = {
+            enable = true;
+            dbBackend = backend;
+            config = {
+              rocketAddress = "::";
+              rocketPort = 8080;
+            };
+          };
+
+          networking.firewall.allowedTCPPorts = [ 8080 ];
+
+          environment.systemPackages = [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
+        }
+      ];
+    } // lib.optionalAttrs withClient {
+      client = { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.bitwarden-cli ];
+      };
     };
 
-    testScript = ''
+    testScript = if testScript != null then testScript else ''
       start_all()
       server.wait_for_unit("vaultwarden.service")
-      server.wait_for_open_port(80)
+      server.wait_for_open_port(8080)
 
       with subtest("configure the cli"):
-          client.succeed("bw --nointeraction config server http://server")
+          client.succeed("bw --nointeraction config server http://server:8080")
 
       with subtest("can't login to nonexistent account"):
           client.fail(
@@ -184,15 +174,43 @@ let
           client.succeed(f"bw --nointeraction --raw --session {key} sync -f")
 
       with subtest("get the password with the cli"):
-          password = client.succeed(
-              f"bw --nointeraction --raw --session {key} list items | ${pkgs.jq}/bin/jq -r .[].login.password"
+          password = client.wait_until_succeeds(
+              f"bw --nointeraction --raw --session {key} list items | ${pkgs.jq}/bin/jq -r .[].login.password",
+              timeout=60
           )
           assert password.strip() == "${storedPassword}"
+
+      with subtest("Check systemd unit hardening"):
+          server.log(server.succeed("systemd-analyze security vaultwarden.service | grep -v ✓"))
     '';
-  };
+  });
 in
-builtins.listToAttrs (
-  map
-    (backend: { name = backend; value = makeVaultwardenTest backend; })
-    backends
-)
+builtins.mapAttrs (k: v: makeVaultwardenTest k v) {
+  mysql = {};
+  postgresql = {};
+  sqlite = {};
+  sqlite-backup = {
+    backend = "sqlite";
+    withClient = false;
+
+    testScript = ''
+      start_all()
+      server.wait_for_unit("vaultwarden.service")
+      server.wait_for_open_port(8080)
+
+      with subtest("Set up vaultwarden"):
+          server.succeed("PYTHONUNBUFFERED=1 test-runner | systemd-cat -t test-runner")
+
+      with subtest("Run the backup script"):
+          server.start_job("backup-vaultwarden.service")
+
+      with subtest("Check that backup exists"):
+          server.succeed('[ -d "/srv/backups/vaultwarden" ]')
+          server.succeed('[ -f "/srv/backups/vaultwarden/db.sqlite3" ]')
+          server.succeed('[ -d "/srv/backups/vaultwarden/attachments" ]')
+          server.succeed('[ -f "/srv/backups/vaultwarden/rsa_key.pem" ]')
+          # Ensure only the db backed up with the backup command exists and not the other db files.
+          server.succeed('[ ! -f "/srv/backups/vaultwarden/db.sqlite3-shm" ]')
+    '';
+  };
+}
diff --git a/nixos/tests/vector.nix b/nixos/tests/vector.nix
deleted file mode 100644
index a55eb4e012c5b..0000000000000
--- a/nixos/tests/vector.nix
+++ /dev/null
@@ -1,37 +0,0 @@
-{ system ? builtins.currentSystem, config ? { }
-, pkgs ? import ../.. { inherit system config; } }:
-
-with import ../lib/testing-python.nix { inherit system pkgs; };
-with pkgs.lib;
-
-{
-  test1 = makeTest {
-    name = "vector-test1";
-    meta.maintainers = [ pkgs.lib.maintainers.happysalada ];
-
-    nodes.machine = { config, pkgs, ... }: {
-      services.vector = {
-        enable = true;
-        journaldAccess = true;
-        settings = {
-          sources.journald.type = "journald";
-
-          sinks = {
-            file = {
-              type = "file";
-              inputs = [ "journald" ];
-              path = "/var/lib/vector/logs.log";
-              encoding = { codec = "json"; };
-            };
-          };
-        };
-      };
-    };
-
-    # ensure vector is forwarding the messages appropriately
-    testScript = ''
-      machine.wait_for_unit("vector.service")
-      machine.wait_for_file("/var/lib/vector/logs.log")
-    '';
-  };
-}
diff --git a/nixos/tests/vector/api.nix b/nixos/tests/vector/api.nix
new file mode 100644
index 0000000000000..8aa3a0c1b771f
--- /dev/null
+++ b/nixos/tests/vector/api.nix
@@ -0,0 +1,39 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "vector-api";
+  meta.maintainers = [ pkgs.lib.maintainers.happysalada ];
+
+  nodes.machineapi = { config, pkgs, ... }: {
+    services.vector = {
+      enable = true;
+      journaldAccess = false;
+      settings = {
+        api.enabled = true;
+
+        sources = {
+          demo_logs = {
+            type = "demo_logs";
+            format = "json";
+          };
+        };
+
+        sinks = {
+          file = {
+            type = "file";
+            inputs = [ "demo_logs" ];
+            path = "/var/lib/vector/logs.log";
+            encoding = { codec = "json"; };
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    machineapi.wait_for_unit("vector")
+    machineapi.wait_for_open_port(8686)
+    machineapi.succeed("journalctl -o cat -u vector.service | grep 'API server running'")
+    machineapi.wait_until_succeeds("curl -sSf http://localhost:8686/health")
+  '';
+})
diff --git a/nixos/tests/vector/default.nix b/nixos/tests/vector/default.nix
new file mode 100644
index 0000000000000..dc3747da74216
--- /dev/null
+++ b/nixos/tests/vector/default.nix
@@ -0,0 +1,12 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
+
+{
+  file-sink = import ./file-sink.nix { inherit system pkgs; };
+  api = import ./api.nix { inherit system pkgs; };
+  dnstap = import ./dnstap.nix { inherit system pkgs; };
+  nginx-clickhouse = import ./nginx-clickhouse.nix { inherit system pkgs; };
+  syslog-quickwit = import ./syslog-quickwit.nix { inherit system pkgs; };
+}
diff --git a/nixos/tests/vector/dnstap.nix b/nixos/tests/vector/dnstap.nix
new file mode 100644
index 0000000000000..5143fd938fdef
--- /dev/null
+++ b/nixos/tests/vector/dnstap.nix
@@ -0,0 +1,118 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+  dnstapSocket = "/var/run/vector/dnstap.sock";
+in
+{
+  name = "vector-dnstap";
+  meta.maintainers = [ pkgs.lib.maintainers.happysalada ];
+
+  nodes = {
+    unbound = { config, pkgs, ... }: {
+      networking.firewall.allowedUDPPorts = [ 53 ];
+
+      services.vector = {
+        enable = true;
+
+        settings = {
+          sources = {
+            dnstap = {
+              type = "dnstap";
+              multithreaded = true;
+              mode = "unix";
+              lowercase_hostnames = true;
+              socket_file_mode = 504;
+              socket_path = "${dnstapSocket}";
+            };
+          };
+
+          sinks = {
+            file = {
+              type = "file";
+              inputs = [ "dnstap" ];
+              path = "/var/lib/vector/logs.log";
+              encoding = { codec = "json"; };
+            };
+          };
+        };
+      };
+
+      systemd.services.vector.serviceConfig = {
+        RuntimeDirectory = "vector";
+        RuntimeDirectoryMode = "0770";
+      };
+
+      services.unbound = {
+        enable = true;
+        enableRootTrustAnchor = false;
+        package = pkgs.unbound-full;
+        settings = {
+          server = {
+            interface = [ "0.0.0.0" "::" ];
+            access-control = [ "192.168.0.0/24 allow" "::/0 allow" ];
+
+            domain-insecure = "local";
+            private-domain = "local";
+
+            local-zone = "local. static";
+            local-data = [
+              ''"test.local. 10800 IN A 192.168.123.5"''
+            ];
+          };
+
+          dnstap = {
+            dnstap-enable = "yes";
+            dnstap-socket-path = "${dnstapSocket}";
+            dnstap-send-identity = "yes";
+            dnstap-send-version = "yes";
+            dnstap-log-client-query-messages = "yes";
+            dnstap-log-client-response-messages = "yes";
+          };
+        };
+      };
+
+      systemd.services.unbound = {
+        after = [ "vector.service" ];
+        wants = [ "vector.service" ];
+        serviceConfig = {
+          # DNSTAP access
+          ReadWritePaths = [ "/var/run/vector" ];
+          SupplementaryGroups = [ "vector" ];
+        };
+      };
+    };
+
+    dnsclient = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.dig ];
+    };
+  };
+
+  testScript = ''
+    unbound.wait_for_unit("unbound")
+    unbound.wait_for_unit("vector")
+
+    unbound.wait_until_succeeds(
+      "journalctl -o cat -u vector.service | grep 'Socket permissions updated to 0o770'"
+    )
+    unbound.wait_until_succeeds(
+      "journalctl -o cat -u vector.service | grep 'component_type=dnstap' | grep 'Listening... path=\"${dnstapSocket}\"'"
+    )
+
+    unbound.wait_for_file("${dnstapSocket}")
+    unbound.succeed("test 770 -eq $(stat -c '%a' ${dnstapSocket})")
+
+    dnsclient.wait_for_unit("network-online.target")
+    dnsclient.succeed(
+      "dig @unbound test.local"
+    )
+
+    unbound.wait_for_file("/var/lib/vector/logs.log")
+
+    unbound.wait_until_succeeds(
+      "grep ClientQuery /var/lib/vector/logs.log | grep '\"domainName\":\"test.local.\"' | grep '\"rcodeName\":\"NoError\"'"
+    )
+    unbound.wait_until_succeeds(
+      "grep ClientResponse /var/lib/vector/logs.log | grep '\"domainName\":\"test.local.\"' | grep '\"rData\":\"192.168.123.5\"'"
+    )
+  '';
+})
diff --git a/nixos/tests/vector/file-sink.nix b/nixos/tests/vector/file-sink.nix
new file mode 100644
index 0000000000000..2220d20ac55c3
--- /dev/null
+++ b/nixos/tests/vector/file-sink.nix
@@ -0,0 +1,49 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "vector-test1";
+  meta.maintainers = [ pkgs.lib.maintainers.happysalada ];
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.vector = {
+      enable = true;
+      journaldAccess = true;
+      settings = {
+        sources = {
+          journald.type = "journald";
+
+          vector_metrics.type = "internal_metrics";
+
+          vector_logs.type = "internal_logs";
+        };
+
+        sinks = {
+          file = {
+            type = "file";
+            inputs = [ "journald" "vector_logs" ];
+            path = "/var/lib/vector/logs.log";
+            encoding = { codec = "json"; };
+          };
+
+          prometheus_exporter = {
+            type = "prometheus_exporter";
+            inputs = [ "vector_metrics" ];
+            address = "[::]:9598";
+          };
+        };
+      };
+    };
+  };
+
+  # ensure vector is forwarding the messages appropriately
+  testScript = ''
+    machine.wait_for_unit("vector.service")
+    machine.wait_for_open_port(9598)
+    machine.wait_until_succeeds("journalctl -o cat -u vector.service | grep 'version=\"${pkgs.vector.version}\"'")
+    machine.wait_until_succeeds("journalctl -o cat -u vector.service | grep 'API is disabled'")
+    machine.wait_until_succeeds("curl -sSf http://localhost:9598/metrics | grep vector_build_info")
+    machine.wait_until_succeeds("curl -sSf http://localhost:9598/metrics | grep vector_component_received_bytes_total | grep journald")
+    machine.wait_until_succeeds("curl -sSf http://localhost:9598/metrics | grep vector_utilization | grep prometheus_exporter")
+    machine.wait_for_file("/var/lib/vector/logs.log")
+  '';
+})
diff --git a/nixos/tests/vector/nginx-clickhouse.nix b/nixos/tests/vector/nginx-clickhouse.nix
new file mode 100644
index 0000000000000..3d99bac6ac161
--- /dev/null
+++ b/nixos/tests/vector/nginx-clickhouse.nix
@@ -0,0 +1,168 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "vector-nginx-clickhouse";
+  meta.maintainers = [ pkgs.lib.maintainers.happysalada ];
+
+  nodes = {
+    clickhouse = { config, pkgs, ... }: {
+      virtualisation.memorySize = 4096;
+
+      # Clickhouse module can't listen on a non-loopback IP.
+      networking.firewall.allowedTCPPorts = [ 6000 ];
+      services.clickhouse.enable = true;
+
+      # Exercise Vector sink->source for now.
+      services.vector = {
+        enable = true;
+
+        settings = {
+          sources = {
+            vector_source = {
+              type = "vector";
+              address = "[::]:6000";
+            };
+          };
+
+          sinks = {
+            clickhouse = {
+              type = "clickhouse";
+              inputs = [ "vector_source" ];
+              endpoint = "http://localhost:8123";
+              database = "nginxdb";
+              table = "access_logs";
+              skip_unknown_fields = true;
+            };
+          };
+        };
+      };
+    };
+
+    nginx = { config, pkgs, ... }: {
+      services.nginx = {
+        enable = true;
+        virtualHosts.localhost = {};
+      };
+
+      services.vector = {
+        enable = true;
+
+        settings = {
+          sources = {
+            nginx_logs = {
+              type = "file";
+              include = [ "/var/log/nginx/access.log" ];
+              read_from = "end";
+            };
+          };
+
+          sinks = {
+            vector_sink = {
+              type = "vector";
+              inputs = [ "nginx_logs" ];
+              address = "clickhouse:6000";
+            };
+          };
+        };
+      };
+
+      systemd.services.vector.serviceConfig = {
+        SupplementaryGroups = [ "nginx" ];
+      };
+    };
+  };
+
+  testScript =
+  let
+    # work around quote/substitution complexity by Nix, Perl, bash and SQL.
+    databaseDDL = pkgs.writeText "database.sql" "CREATE DATABASE IF NOT EXISTS nginxdb";
+
+    tableDDL = pkgs.writeText "table.sql" ''
+      CREATE TABLE IF NOT EXISTS  nginxdb.access_logs (
+        message String
+      )
+      ENGINE = MergeTree()
+      ORDER BY tuple()
+    '';
+
+    # Graciously taken from https://clickhouse.com/docs/en/integrations/vector
+    tableView = pkgs.writeText "table-view.sql" ''
+      CREATE MATERIALIZED VIEW nginxdb.access_logs_view
+      (
+        RemoteAddr String,
+        Client String,
+        RemoteUser String,
+        TimeLocal DateTime,
+        RequestMethod String,
+        Request String,
+        HttpVersion String,
+        Status Int32,
+        BytesSent Int64,
+        UserAgent String
+      )
+      ENGINE = MergeTree()
+      ORDER BY RemoteAddr
+      POPULATE AS
+      WITH
+       splitByWhitespace(message) as split,
+       splitByRegexp('\S \d+ "([^"]*)"', message) as referer
+      SELECT
+        split[1] AS RemoteAddr,
+        split[2] AS Client,
+        split[3] AS RemoteUser,
+        parseDateTimeBestEffort(replaceOne(trim(LEADING '[' FROM split[4]), ':', ' ')) AS TimeLocal,
+        trim(LEADING '"' FROM split[6]) AS RequestMethod,
+        split[7] AS Request,
+        trim(TRAILING '"' FROM split[8]) AS HttpVersion,
+        split[9] AS Status,
+        split[10] AS BytesSent,
+        trim(BOTH '"' from referer[2]) AS UserAgent
+      FROM
+        (SELECT message FROM nginxdb.access_logs)
+    '';
+
+    selectQuery = pkgs.writeText "select.sql" "SELECT * from nginxdb.access_logs_view";
+  in
+  ''
+    clickhouse.wait_for_unit("clickhouse")
+    clickhouse.wait_for_open_port(8123)
+
+    clickhouse.wait_until_succeeds(
+      "journalctl -o cat -u clickhouse.service | grep 'Started ClickHouse server'"
+    )
+
+    clickhouse.wait_for_unit("vector")
+    clickhouse.wait_for_open_port(6000)
+
+    clickhouse.succeed(
+      "cat ${databaseDDL} | clickhouse-client"
+    )
+
+    clickhouse.succeed(
+      "cat ${tableDDL} | clickhouse-client"
+    )
+
+    clickhouse.succeed(
+      "cat ${tableView} | clickhouse-client"
+    )
+
+    nginx.wait_for_unit("nginx")
+    nginx.wait_for_open_port(80)
+    nginx.wait_for_unit("vector")
+    nginx.wait_until_succeeds(
+      "journalctl -o cat -u vector.service | grep 'Starting file server'"
+    )
+
+    nginx.succeed("curl http://localhost/")
+    nginx.succeed("curl http://localhost/")
+
+    nginx.wait_for_file("/var/log/nginx/access.log")
+    nginx.wait_until_succeeds(
+      "journalctl -o cat -u vector.service | grep 'Found new file to watch. file=/var/log/nginx/access.log'"
+    )
+
+    clickhouse.wait_until_succeeds(
+      "cat ${selectQuery} | clickhouse-client | grep 'curl'"
+    )
+  '';
+})
diff --git a/nixos/tests/vector/syslog-quickwit.nix b/nixos/tests/vector/syslog-quickwit.nix
new file mode 100644
index 0000000000000..cb6e04e00eae4
--- /dev/null
+++ b/nixos/tests/vector/syslog-quickwit.nix
@@ -0,0 +1,156 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+# Based on https://quickwit.io/docs/log-management/send-logs/using-vector
+
+{
+  name = "vector-syslog-quickwit";
+  meta.maintainers = [ pkgs.lib.maintainers.happysalada ];
+
+  nodes = {
+    quickwit = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+
+      networking.firewall.allowedTCPPorts = [ 7280 ];
+
+      services.quickwit = {
+        enable = true;
+        settings = {
+          listen_address = "::";
+        };
+      };
+    };
+
+    syslog = { config, pkgs, ... }: {
+      services.vector = {
+        enable = true;
+
+        settings = {
+          sources = {
+            generate_syslog = {
+              type = "demo_logs";
+              format = "syslog";
+              interval = 0.5;
+            };
+          };
+
+          transforms = {
+            remap_syslog = {
+              inputs = ["generate_syslog"];
+              type = "remap";
+              source = ''
+                structured = parse_syslog!(.message)
+                .timestamp_nanos = to_unix_timestamp!(structured.timestamp, unit: "nanoseconds")
+                .body = structured
+                .service_name = structured.appname
+                .resource_attributes.source_type = .source_type
+                .resource_attributes.host.hostname = structured.hostname
+                .resource_attributes.service.name = structured.appname
+                .attributes.syslog.procid = structured.procid
+                .attributes.syslog.facility = structured.facility
+                .attributes.syslog.version = structured.version
+                .severity_text = if includes(["emerg", "err", "crit", "alert"], structured.severity) {
+                  "ERROR"
+                } else if structured.severity == "warning" {
+                  "WARN"
+                } else if structured.severity == "debug" {
+                  "DEBUG"
+                } else if includes(["info", "notice"], structured.severity) {
+                  "INFO"
+                } else {
+                 structured.severity
+                }
+                .scope_name = structured.msgid
+                del(.message)
+                del(.host)
+                del(.timestamp)
+                del(.service)
+                del(.source_type)
+              '';
+            };
+          };
+
+          sinks = {
+            #emit_syslog = {
+            #  inputs = ["remap_syslog"];
+            #  type = "console";
+            #  encoding.codec = "json";
+            #};
+            quickwit_logs = {
+              type = "http";
+              method = "post";
+              inputs = [ "remap_syslog" ];
+              encoding.codec = "json";
+              framing.method = "newline_delimited";
+              uri = "http://quickwit:7280/api/v1/otel-logs-v0_7/ingest";
+            };
+          };
+        };
+      };
+    };
+  };
+
+  testScript =
+  let
+    aggregationQuery = pkgs.writeText "aggregation-query.json" ''
+      {
+        "query": "*",
+        "max_hits": 0,
+        "aggs": {
+          "count_per_minute": {
+            "histogram": {
+                "field": "timestamp_nanos",
+                "interval": 60000000
+            },
+            "aggs": {
+              "severity_text_count": {
+                "terms": {
+                  "field": "severity_text"
+                }
+              }
+            }
+          }
+        }
+      }
+    '';
+  in
+  ''
+    quickwit.wait_for_unit("quickwit")
+    quickwit.wait_for_open_port(7280)
+    quickwit.wait_for_open_port(7281)
+
+    quickwit.wait_until_succeeds(
+      "journalctl -o cat -u quickwit.service | grep 'transitioned to ready state'"
+    )
+
+    syslog.wait_for_unit("vector")
+    syslog.wait_until_succeeds(
+      "journalctl -o cat -u vector.service | grep 'Vector has started'"
+    )
+
+    quickwit.wait_until_succeeds(
+      "journalctl -o cat -u quickwit.service | grep 'publish-new-splits'"
+    )
+
+    # Wait for logs to be generated
+    # Test below aggregates by the minute
+    syslog.sleep(60 * 2)
+
+    quickwit.wait_until_succeeds(
+      "curl -sSf -XGET http://127.0.0.1:7280/api/v1/otel-logs-v0_7/search?query=severity_text:ERROR |"
+      + " jq '.num_hits' | grep -v '0'"
+    )
+
+    quickwit.wait_until_succeeds(
+      "journalctl -o cat -u quickwit.service | grep 'SearchRequest'"
+    )
+
+    quickwit.wait_until_succeeds(
+      "curl -sSf -XPOST -H 'Content-Type: application/json' http://127.0.0.1:7280/api/v1/otel-logs-v0_7/search --data @${aggregationQuery} |"
+      + " jq '.num_hits' | grep -v '0'"
+    )
+
+    quickwit.wait_until_succeeds(
+      "journalctl -o cat -u quickwit.service | grep 'count_per_minute'"
+    )
+  '';
+})
diff --git a/nixos/tests/virtualbox.nix b/nixos/tests/virtualbox.nix
index 3c2a391233dbd..5fce3ba548123 100644
--- a/nixos/tests/virtualbox.nix
+++ b/nixos/tests/virtualbox.nix
@@ -98,7 +98,6 @@ let
     cfg = (import ../lib/eval-config.nix {
       system = if use64bitGuest then "x86_64-linux" else "i686-linux";
       modules = [
-        ../modules/profiles/minimal.nix
         (testVMConfig vmName vmScript)
       ];
     }).config;
diff --git a/nixos/tests/vscode-remote-ssh.nix b/nixos/tests/vscode-remote-ssh.nix
index de7cc6badc9a2..278f2308cc165 100644
--- a/nixos/tests/vscode-remote-ssh.nix
+++ b/nixos/tests/vscode-remote-ssh.nix
@@ -14,7 +14,11 @@ import ./make-test-python.nix ({ lib, ... }@args: let
   inherit (pkgs.vscode.passthru) rev vscodeServer;
 in {
   name = "vscode-remote-ssh";
-  meta.maintainers = with lib.maintainers; [ Enzime ];
+
+  meta = {
+    maintainers = [ ];
+    timeout = 600;
+  };
 
   nodes = let
     serverAddress = "192.168.0.2";
@@ -70,7 +74,11 @@ in {
       client.succeed("sudo -u alice code --remote=ssh-remote+root@server /root")
       client.wait_for_window("Visual Studio Code")
 
-      client.wait_for_text("Do you trust the authors" if should_succeed else "Disconnected from SSH")
+      if should_succeed:
+        ocr_text = "Do you trust"
+      else:
+        ocr_text = "Could not establish connection"
+      client.wait_for_text(ocr_text)
       client.screenshot(screenshot)
 
       if should_succeed:
diff --git a/nixos/tests/web-apps/mastodon/default.nix b/nixos/tests/web-apps/mastodon/default.nix
index 178590d13b63c..7f925b9ad4ed2 100644
--- a/nixos/tests/web-apps/mastodon/default.nix
+++ b/nixos/tests/web-apps/mastodon/default.nix
@@ -1,9 +1,9 @@
-{ system ? builtins.currentSystem, handleTestOn }:
+{ system ? builtins.currentSystem, pkgs, handleTestOn, ... }:
 let
   supportedSystems = [ "x86_64-linux" "i686-linux" "aarch64-linux" ];
 
 in
 {
-  standard = handleTestOn supportedSystems ./standard.nix { inherit system; };
-  remote-databases = handleTestOn supportedSystems ./remote-databases.nix { inherit system; };
+  standard = handleTestOn supportedSystems ./standard.nix { inherit system pkgs; };
+  remote-databases = handleTestOn supportedSystems ./remote-databases.nix { inherit system pkgs; };
 }
diff --git a/nixos/tests/web-apps/mastodon/remote-databases.nix b/nixos/tests/web-apps/mastodon/remote-databases.nix
index 55243658ec6a9..8dc754fe9eb09 100644
--- a/nixos/tests/web-apps/mastodon/remote-databases.nix
+++ b/nixos/tests/web-apps/mastodon/remote-databases.nix
@@ -10,6 +10,9 @@ let
     192.168.2.103 mastodon.local
   '';
 
+  postgresqlPassword = "thisisnotasecret";
+  redisPassword = "thisisnotasecrettoo";
+
 in
 {
   name = "mastodon-remote-postgresql";
@@ -19,9 +22,7 @@ in
     databases = { config, ... }: {
       environment = {
         etc = {
-          "redis/password-redis-db".text = ''
-            ogjhJL8ynrP7MazjYOF6
-          '';
+          "redis/password-redis-db".text = redisPassword;
         };
       };
       networking = {
@@ -46,16 +47,19 @@ in
 
       services.postgresql = {
         enable = true;
-        # TODO remove once https://github.com/NixOS/nixpkgs/pull/266270 is resolved.
-        package = pkgs.postgresql_14;
         enableTCPIP = true;
         authentication = ''
-          hostnossl mastodon_local mastodon_test 192.168.2.201/32 md5
+          hostnossl mastodon mastodon 192.168.2.201/32 md5
         '';
+        ensureDatabases = [ "mastodon" ];
+        ensureUsers = [
+          {
+            name = "mastodon";
+            ensureDBOwnership = true;
+          }
+        ];
         initialScript = pkgs.writeText "postgresql_init.sql" ''
-          CREATE ROLE mastodon_test LOGIN PASSWORD 'SoDTZcISc3f1M1LJsRLT';
-          CREATE DATABASE mastodon_local TEMPLATE template0 ENCODING UTF8;
-          GRANT ALL PRIVILEGES ON DATABASE mastodon_local TO mastodon_test;
+          CREATE ROLE mastodon LOGIN PASSWORD '${postgresqlPassword}';
         '';
       };
     };
@@ -100,12 +104,8 @@ in
 
       environment = {
         etc = {
-          "mastodon/password-redis-db".text = ''
-            ogjhJL8ynrP7MazjYOF6
-          '';
-          "mastodon/password-posgressql-db".text = ''
-            SoDTZcISc3f1M1LJsRLT
-          '';
+          "mastodon/password-redis-db".text = redisPassword;
+          "mastodon/password-posgressql-db".text = postgresqlPassword;
         };
       };
 
@@ -138,8 +138,8 @@ in
           createLocally = false;
           host = "192.168.2.102";
           port = 5432;
-          name = "mastodon_local";
-          user = "mastodon_test";
+          name = "mastodon";
+          user = "mastodon";
           passwordFile = "/etc/mastodon/password-posgressql-db";
         };
         smtp = {
diff --git a/nixos/tests/web-apps/mastodon/standard.nix b/nixos/tests/web-apps/mastodon/standard.nix
index ddc764e2168c9..cd720ce9f2bfc 100644
--- a/nixos/tests/web-apps/mastodon/standard.nix
+++ b/nixos/tests/web-apps/mastodon/standard.nix
@@ -34,9 +34,6 @@ in
         pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
       };
 
-      # TODO remove once https://github.com/NixOS/nixpkgs/pull/266270 is resolved.
-      services.postgresql.package = pkgs.postgresql_14;
-
       services.mastodon = {
         enable = true;
         configureNginx = true;
diff --git a/nixos/tests/web-apps/nextjs-ollama-llm-ui.nix b/nixos/tests/web-apps/nextjs-ollama-llm-ui.nix
new file mode 100644
index 0000000000000..3bb9d1e62aefe
--- /dev/null
+++ b/nixos/tests/web-apps/nextjs-ollama-llm-ui.nix
@@ -0,0 +1,22 @@
+{ lib, ... }:
+
+{
+  name = "nextjs-ollama-llm-ui";
+  meta.maintainers = with lib.maintainers; [ malteneuss ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      services.nextjs-ollama-llm-ui = {
+        enable = true;
+        port = 8080;
+      };
+    };
+
+  testScript = ''
+    # Ensure the service is started and reachable
+    machine.wait_for_unit("nextjs-ollama-llm-ui.service")
+    machine.wait_for_open_port(8080)
+    machine.succeed("curl --fail http://127.0.0.1:8080")
+  '';
+}
diff --git a/nixos/tests/web-apps/pixelfed/standard.nix b/nixos/tests/web-apps/pixelfed/standard.nix
index 9260e27af960d..c575ee0b0f76c 100644
--- a/nixos/tests/web-apps/pixelfed/standard.nix
+++ b/nixos/tests/web-apps/pixelfed/standard.nix
@@ -1,7 +1,6 @@
-import ../../make-test-python.nix ({pkgs, ...}:
-{
+import ../../make-test-python.nix {
   name = "pixelfed-standard";
-  meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+  meta.maintainers = [ ];
 
   nodes = {
     server = { pkgs, ... }: {
@@ -35,4 +34,4 @@ import ../../make-test-python.nix ({pkgs, ...}:
     # server.succeed("pixelfed-manage passport:client --personal")
     # server.succeed("curl -H 'Host: pixefed.local' -H 'Accept: application/json' -H 'Authorization: Bearer secret' -F'status'='test' http://localhost/api/v1/statuses")
   '';
-})
+}
diff --git a/nixos/tests/web-apps/pretalx.nix b/nixos/tests/web-apps/pretalx.nix
index a226639b076b4..cbb6580aa0515 100644
--- a/nixos/tests/web-apps/pretalx.nix
+++ b/nixos/tests/web-apps/pretalx.nix
@@ -5,13 +5,16 @@
   meta.maintainers = lib.teams.c3d2.members;
 
   nodes = {
-    pretalx = {
+    pretalx = { config, ... }: {
       networking.extraHosts = ''
         127.0.0.1 talks.local
       '';
 
       services.pretalx = {
         enable = true;
+        plugins = with config.services.pretalx.package.plugins; [
+          pages
+        ];
         nginx.domain = "talks.local";
         settings = {
           site.url = "http://talks.local";
@@ -27,5 +30,9 @@
     pretalx.wait_for_unit("pretalx-worker.service")
 
     pretalx.wait_until_succeeds("curl -q --fail http://talks.local/orga/")
+
+    pretalx.succeed("pretalx-manage --help")
+
+    pretalx.log(pretalx.succeed("systemd-analyze security pretalx-web.service"))
   '';
 }
diff --git a/nixos/tests/web-apps/pretix.nix b/nixos/tests/web-apps/pretix.nix
index 559316f9b85cb..ac89a7b3fec30 100644
--- a/nixos/tests/web-apps/pretix.nix
+++ b/nixos/tests/web-apps/pretix.nix
@@ -20,6 +20,7 @@
         plugins = with pkgs.pretix.plugins; [
           passbook
           pages
+          zugferd
         ];
         settings = {
           pretix = {
diff --git a/nixos/tests/web-apps/weblate.nix b/nixos/tests/web-apps/weblate.nix
new file mode 100644
index 0000000000000..40d60f7e5f996
--- /dev/null
+++ b/nixos/tests/web-apps/weblate.nix
@@ -0,0 +1,104 @@
+import ../make-test-python.nix (
+  { pkgs, ... }:
+
+  let
+    certs = import ../common/acme/server/snakeoil-certs.nix;
+
+    serverDomain = certs.domain;
+
+    admin = {
+      username = "admin";
+      password = "snakeoilpass";
+    };
+    # An API token that we manually insert into the db as a valid one.
+    apiToken = "OVJh65sXaAfQMZ4NTcIGbFZIyBZbEZqWTi7azdDf";
+  in
+  {
+    name = "weblate";
+    meta.maintainers = with pkgs.lib.maintainers; [ erictapen ];
+
+    nodes.server =
+      { pkgs, lib, ... }:
+      {
+        virtualisation.memorySize = 2048;
+
+        services.weblate = {
+          enable = true;
+          localDomain = "${serverDomain}";
+          djangoSecretKeyFile = pkgs.writeText "weblate-django-secret" "thisissnakeoilsecretwithmorethan50characterscorrecthorsebatterystaple";
+          extraConfig = ''
+            # Weblate tries to fetch Avatars from the network
+            ENABLE_AVATARS = False
+          '';
+        };
+
+        services.nginx.virtualHosts."${serverDomain}" = {
+          enableACME = lib.mkForce false;
+          sslCertificate = certs."${serverDomain}".cert;
+          sslCertificateKey = certs."${serverDomain}".key;
+        };
+
+        security.pki.certificateFiles = [ certs.ca.cert ];
+
+        networking.hosts."::1" = [ "${serverDomain}" ];
+        networking.firewall.allowedTCPPorts = [
+          80
+          443
+        ];
+
+        users.users.weblate.shell = pkgs.bashInteractive;
+      };
+
+    nodes.client =
+      { pkgs, nodes, ... }:
+      {
+        environment.systemPackages = [ pkgs.wlc ];
+
+        environment.etc."xdg/weblate".text = ''
+          [weblate]
+          url = https://${serverDomain}/api/
+          key = ${apiToken}
+        '';
+
+        networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ "${serverDomain}" ];
+
+        security.pki.certificateFiles = [ certs.ca.cert ];
+      };
+
+    testScript = ''
+      import json
+
+      start_all()
+      server.wait_for_unit("weblate.socket")
+      server.wait_until_succeeds("curl -f https://${serverDomain}/")
+      server.succeed("sudo -iu weblate -- weblate createadmin --username ${admin.username} --password ${admin.password} --email weblate@example.org")
+
+      # It's easier to replace the generated API token with a predefined one than
+      # to extract it at runtime.
+      server.succeed("sudo -iu weblate -- psql -d weblate -c \"UPDATE authtoken_token SET key = '${apiToken}' WHERE user_id = (SELECT id FROM weblate_auth_user WHERE username = 'admin');\"")
+
+      client.wait_for_unit("multi-user.target")
+
+      # Test the official Weblate client wlc.
+      client.wait_until_succeeds("REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt wlc --debug list-projects")
+
+      def call_wl_api(arg):
+          (rv, result) = client.execute("curl -H \"Content-Type: application/json\" -H \"Authorization: Token ${apiToken}\" https://${serverDomain}/api/{}".format(arg))
+          assert rv == 0
+          print(result)
+
+      call_wl_api("users/ --data '{}'".format(
+        json.dumps(
+          {"username": "test1",
+            "full_name": "test1",
+            "email": "test1@example.org"
+          })))
+
+      # TODO: Check sending and receiving email.
+      # server.wait_for_unit("postfix.service")
+
+      # TODO: The goal is for this to succeed, but there are still some checks failing.
+      # server.succeed("sudo -iu weblate -- weblate check --deploy")
+    '';
+  }
+)
diff --git a/nixos/tests/web-servers/stargazer.nix b/nixos/tests/web-servers/stargazer.nix
index f56d1b8c94545..b687f2046a044 100644
--- a/nixos/tests/web-servers/stargazer.nix
+++ b/nixos/tests/web-servers/stargazer.nix
@@ -1,9 +1,9 @@
 { pkgs, lib, ... }:
 let
-  test_script = pkgs.stdenv.mkDerivation rec {
+  test_script = pkgs.stdenv.mkDerivation {
     pname = "stargazer-test-script";
     inherit (pkgs.stargazer) version src;
-    buildInputs = with pkgs; [ (python3.withPackages (ps: with ps; [ cryptography ])) ];
+    buildInputs = with pkgs; [ (python3.withPackages (ps: with ps; [ cryptography urllib3 ])) ];
     dontBuild = true;
     doCheck = false;
     installPhase = ''
@@ -11,7 +11,7 @@ let
       cp scripts/gemini-diagnostics $out/bin/test
     '';
   };
-  test_env = pkgs.stdenv.mkDerivation rec {
+  test_env = pkgs.stdenv.mkDerivation {
     pname = "stargazer-test-env";
     inherit (pkgs.stargazer) version src;
     buildPhase = ''
@@ -23,7 +23,7 @@ let
       cp -r * $out/
     '';
   };
-  scgi_server = pkgs.stdenv.mkDerivation rec {
+  scgi_server = pkgs.stdenv.mkDerivation {
     pname = "stargazer-test-scgi-server";
     inherit (pkgs.stargazer) version src;
     buildInputs = with pkgs; [ python3 ];
@@ -100,7 +100,12 @@ in
           }
           {
             route = "localhost:/no-exist";
-            root = "./does_not_exist";
+            root = "${test_env}/does_not_exist";
+          }
+          {
+            route = "localhost=/rss.xml";
+            root = "${test_env}/test_data/test_site";
+            mime-override = "application/atom+xml";
           }
         ];
       };
@@ -112,16 +117,41 @@ in
         };
       };
     };
+    cgiTestServer = { ... }: {
+      users.users.cgi = {
+        isSystemUser = true;
+        group = "cgi";
+      };
+      users.groups.cgi = { };
+      services.stargazer = {
+        enable = true;
+        connectionLogging = false;
+        requestTimeout = 1;
+        allowCgiUser = true;
+        routes = [
+          {
+            route = "localhost:/cgi-bin";
+            root = "${test_env}/test_data";
+            cgi = true;
+            cgi-timeout = 5;
+            cgi-user = "cgi";
+          }
+        ];
+      };
+    };
   };
 
   testScript = { nodes, ... }: ''
     geminiserver.wait_for_unit("scgi_server")
     geminiserver.wait_for_open_port(1099)
     geminiserver.wait_for_unit("stargazer")
-    geminiserver.wait_for_open_port(1965)
+    cgiTestServer.wait_for_open_port(1965)
 
     with subtest("stargazer test suite"):
       response = geminiserver.succeed("sh -c 'cd ${test_env}; ${test_script}/bin/test'")
       print(response)
+    with subtest("stargazer cgi-user test"):
+      response = cgiTestServer.succeed("sh -c 'cd ${test_env}; ${test_script}/bin/test --checks CGIVars'")
+      print(response)
   '';
 }
diff --git a/nixos/tests/wg-access-server.nix b/nixos/tests/wg-access-server.nix
new file mode 100644
index 0000000000000..84fdf43e7943b
--- /dev/null
+++ b/nixos/tests/wg-access-server.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, lib, kernelPackages ? null, ... }:
+{
+  name = "wg-access-server";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ xanderio ];
+  };
+
+  nodes = {
+    server = {
+      services.wg-access-server = {
+        enable = true;
+        settings = {
+          adminUsername = "admin";
+        };
+        secretsFile = (pkgs.writers.writeYAML "secrets.yaml" {
+          adminPassword = "hunter2";
+        });
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("wg-access-server.service")
+  '';
+}
+)
diff --git a/nixos/tests/wireguard/default.nix b/nixos/tests/wireguard/default.nix
index c30f1b74770b8..fc22f06b778b1 100644
--- a/nixos/tests/wireguard/default.nix
+++ b/nixos/tests/wireguard/default.nix
@@ -1,7 +1,8 @@
 { system ? builtins.currentSystem
 , config ? { }
 , pkgs ? import ../../.. { inherit system config; }
-, kernelVersionsToTest ? [ "5.4" "latest" ]
+  # Test current default (LTS) and latest kernel
+, kernelVersionsToTest ? [ (pkgs.lib.versions.majorMinor pkgs.linuxPackages.kernel.version) "latest" ]
 }:
 
 with pkgs.lib;
diff --git a/nixos/tests/wordpress.nix b/nixos/tests/wordpress.nix
index 592af9a094f1d..a2cd480302986 100644
--- a/nixos/tests/wordpress.nix
+++ b/nixos/tests/wordpress.nix
@@ -11,7 +11,7 @@ rec {
   };
 
   nodes = lib.foldl (a: version: let
-    package = pkgs."wordpress${version}";
+    package = pkgs."wordpress_${version}";
   in a // {
     "wp${version}_httpd" = _: {
       services.httpd.adminAddr = "webmaster@site.local";
@@ -67,7 +67,7 @@ rec {
       networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
     };
   }) {} [
-    "6_3" "6_4"
+    "6_5" "6_6"
   ];
 
   testScript = ''
diff --git a/nixos/tests/wpa_supplicant.nix b/nixos/tests/wpa_supplicant.nix
index 5e3b39f27ecf3..7ca70864b8371 100644
--- a/nixos/tests/wpa_supplicant.nix
+++ b/nixos/tests/wpa_supplicant.nix
@@ -1,17 +1,18 @@
-import ./make-test-python.nix ({ pkgs, lib, ...}:
-{
-  name = "wpa_supplicant";
+{ pkgs, runTest }:
+
+let
+
+  inherit (pkgs) lib;
+
   meta = with lib.maintainers; {
     maintainers = [ oddlama rnhmjoj ];
   };
 
-  nodes = let
-    machineWithHostapd = extraConfigModule: { ... }: {
-      imports = [
-        ../modules/profiles/minimal.nix
-        extraConfigModule
-      ];
+  runConnectionTest = name: extraConfig: runTest {
+    name = "wpa_supplicant-${name}";
+    inherit meta;
 
+    nodes.machine = {
       # add a virtual wlan interface
       boot.kernelModules = [ "mac80211_hwsim" ];
 
@@ -20,6 +21,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...}:
         enable = true;
         radios.wlan0 = {
           band = "2g";
+          channel = 6;
           countryCode = "US";
           networks = {
             wlan0 = {
@@ -53,27 +55,49 @@ import ./make-test-python.nix ({ pkgs, lib, ...}:
       };
 
       # wireless client
-      networking.wireless = {
-        # the override is needed because the wifi is
-        # disabled with mkVMOverride in qemu-vm.nix.
-        enable = lib.mkOverride 0 true;
-        userControlled.enable = true;
-        interfaces = [ "wlan1" ];
-        fallbackToWPA2 = lib.mkDefault true;
+      networking.wireless = lib.mkMerge [
+        {
+          # the override is needed because the wifi is
+          # disabled with mkVMOverride in qemu-vm.nix.
+          enable = lib.mkOverride 0 true;
+          userControlled.enable = true;
+          interfaces = [ "wlan1" ];
+          fallbackToWPA2 = lib.mkDefault true;
+
+          # secrets
+          secretsFile = pkgs.writeText "wpa-secrets" ''
+            psk_nixos_test=reproducibility
+          '';
+        }
+        extraConfig
+      ];
+    };
 
-        # networks will be added on-demand below for the specific
-        # network that should be tested
+    testScript = ''
+      # save hostapd config file for manual inspection
+      machine.wait_for_unit("hostapd.service")
+      machine.copy_from_vm("/run/hostapd/wlan0.hostapd.conf")
 
-        # secrets
-        environmentFile = pkgs.writeText "wpa-secrets" ''
-          PSK_NIXOS_TEST="reproducibility"
-        '';
-      };
-    };
-  in {
-    basic = { ... }: {
-      imports = [ ../modules/profiles/minimal.nix ];
+      with subtest("Daemon can connect to the access point"):
+          machine.wait_for_unit("wpa_supplicant-wlan1.service")
+          machine.wait_until_succeeds(
+            "wpa_cli -i wlan1 status | grep -q wpa_state=COMPLETED"
+          )
+    '';
+  };
+
+in
 
+{
+  # Test the basic setup:
+  #   - automatic interface discovery
+  #   - WPA2 fallbacks
+  #   - connecting to the daemon
+  basic = runTest {
+    name = "wpa_supplicant-basic";
+    inherit meta;
+
+    nodes.machine = {
       # add a virtual wlan interface
       boot.kernelModules = [ "mac80211_hwsim" ];
 
@@ -83,7 +107,6 @@ import ./make-test-python.nix ({ pkgs, lib, ...}:
         # disabled with mkVMOverride in qemu-vm.nix.
         enable = lib.mkOverride 0 true;
         userControlled.enable = true;
-        interfaces = [ "wlan1" ];
         fallbackToWPA2 = true;
 
         networks = {
@@ -96,28 +119,36 @@ import ./make-test-python.nix ({ pkgs, lib, ...}:
             psk = "password";
             authProtocols = [ "SAE" ];
           };
-
-          # secrets substitution test cases
-          test1.psk = "@PSK_VALID@";              # should be replaced
-          test2.psk = "@PSK_SPECIAL@";            # should be replaced
-          test3.psk = "@PSK_MISSING@";            # should not be replaced
-          test4.psk = "P@ssowrdWithSome@tSymbol"; # should not be replaced
-          test5.psk = "@PSK_AWK_REGEX@";          # should be replaced
         };
-
-        # secrets
-        environmentFile = pkgs.writeText "wpa-secrets" ''
-          PSK_VALID="S0m3BadP4ssw0rd";
-          # taken from https://github.com/minimaxir/big-list-of-naughty-strings
-          PSK_SPECIAL=",./;'[]\/\-= <>?:\"{}|_+ !@#$%^&*()`~";
-          PSK_AWK_REGEX="PassowrdWith&symbol";
-        '';
       };
     };
 
-    imperative = { ... }: {
-      imports = [ ../modules/profiles/minimal.nix ];
+    testScript = ''
+      with subtest("Daemon is running and accepting connections"):
+          machine.wait_for_unit("wpa_supplicant.service")
+          status = machine.wait_until_succeeds("wpa_cli status")
+          assert "Failed to connect" not in status, \
+                 "Failed to connect to the daemon"
+
+      # get the configuration file
+      cmdline = machine.succeed("cat /proc/$(pgrep wpa)/cmdline").split('\x00')
+      config_file = cmdline[cmdline.index("-c") + 1]
 
+      with subtest("WPA2 fallbacks have been generated"):
+          assert int(machine.succeed(f"grep -c sae-only {config_file}")) == 1
+          assert int(machine.succeed(f"grep -c mixed-wpa {config_file}")) == 2
+
+      # save file for manual inspection
+      machine.copy_from_vm(config_file)
+    '';
+  };
+
+  # Test configuring the daemon imperatively
+  imperative = runTest {
+    name = "wpa_supplicant-imperative";
+    inherit meta;
+
+    nodes.machine = {
       # add a virtual wlan interface
       boot.kernelModules = [ "mac80211_hwsim" ];
 
@@ -130,108 +161,55 @@ import ./make-test-python.nix ({ pkgs, lib, ...}:
       };
     };
 
-    # Test connecting to the SAE-only hotspot using SAE
-    machineSae = machineWithHostapd {
-      networking.wireless = {
-        fallbackToWPA2 = false;
-        networks.nixos-test-sae = {
-          psk = "@PSK_NIXOS_TEST@";
-          authProtocols = [ "SAE" ];
-        };
-      };
-    };
-
-    # Test connecting to the SAE and WPA2 mixed hotspot using SAE
-    machineMixedUsingSae = machineWithHostapd {
-      networking.wireless = {
-        fallbackToWPA2 = false;
-        networks.nixos-test-mixed = {
-          psk = "@PSK_NIXOS_TEST@";
-          authProtocols = [ "SAE" ];
-        };
-      };
-    };
-
-    # Test connecting to the SAE and WPA2 mixed hotspot using WPA2
-    machineMixedUsingWpa2 = machineWithHostapd {
-      networking.wireless = {
-        fallbackToWPA2 = true;
-        networks.nixos-test-mixed = {
-          psk = "@PSK_NIXOS_TEST@";
-          authProtocols = [ "WPA-PSK-SHA256" ];
-        };
-      };
-    };
-
-    # Test connecting to the WPA2 legacy hotspot using WPA2
-    machineWpa2 = machineWithHostapd {
-      networking.wireless = {
-        fallbackToWPA2 = true;
-        networks.nixos-test-wpa2 = {
-          psk = "@PSK_NIXOS_TEST@";
-          authProtocols = [ "WPA-PSK-SHA256" ];
-        };
-      };
-    };
-  };
-
-  testScript =
-    ''
-      config_file = "/run/wpa_supplicant/wpa_supplicant.conf"
-
-      with subtest("Configuration file is inaccessible to other users"):
-          basic.wait_for_file(config_file)
-          basic.fail(f"sudo -u nobody ls {config_file}")
-
-      with subtest("Secrets variables have been substituted"):
-          basic.fail(f"grep -q @PSK_VALID@ {config_file}")
-          basic.fail(f"grep -q @PSK_SPECIAL@ {config_file}")
-          basic.succeed(f"grep -q @PSK_MISSING@ {config_file}")
-          basic.succeed(f"grep -q P@ssowrdWithSome@tSymbol {config_file}")
-          basic.succeed(f"grep -q 'PassowrdWith&symbol' {config_file}")
-
-      with subtest("WPA2 fallbacks have been generated"):
-          assert int(basic.succeed(f"grep -c sae-only {config_file}")) == 1
-          assert int(basic.succeed(f"grep -c mixed-wpa {config_file}")) == 2
-
-      # save file for manual inspection
-      basic.copy_from_vm(config_file)
-
+    testScript = ''
       with subtest("Daemon is running and accepting connections"):
-          basic.wait_for_unit("wpa_supplicant-wlan1.service")
-          status = basic.succeed("wpa_cli -i wlan1 status")
+          machine.wait_for_unit("wpa_supplicant-wlan1.service")
+          status = machine.wait_until_succeeds("wpa_cli -i wlan1 status")
           assert "Failed to connect" not in status, \
                  "Failed to connect to the daemon"
 
       with subtest("Daemon can be configured imperatively"):
-          imperative.wait_for_unit("wpa_supplicant-wlan1.service")
-          imperative.wait_until_succeeds("wpa_cli -i wlan1 status")
-          imperative.succeed("wpa_cli -i wlan1 add_network")
-          imperative.succeed("wpa_cli -i wlan1 set_network 0 ssid '\"nixos-test\"'")
-          imperative.succeed("wpa_cli -i wlan1 set_network 0 psk '\"reproducibility\"'")
-          imperative.succeed("wpa_cli -i wlan1 save_config")
-          imperative.succeed("grep -q nixos-test /etc/wpa_supplicant.conf")
-
-      machineSae.wait_for_unit("hostapd.service")
-      machineSae.copy_from_vm("/run/hostapd/wlan0.hostapd.conf")
-      with subtest("Daemon can connect to the SAE access point using SAE"):
-          machineSae.wait_until_succeeds(
-            "wpa_cli -i wlan1 status | grep -q wpa_state=COMPLETED"
-          )
+          machine.succeed("wpa_cli -i wlan1 add_network")
+          machine.succeed("wpa_cli -i wlan1 set_network 0 ssid '\"nixos-test\"'")
+          machine.succeed("wpa_cli -i wlan1 set_network 0 psk '\"reproducibility\"'")
+          machine.succeed("wpa_cli -i wlan1 save_config")
+          machine.succeed("grep -q nixos-test /etc/wpa_supplicant.conf")
+    '';
+  };
 
-      with subtest("Daemon can connect to the SAE and WPA2 mixed access point using SAE"):
-          machineMixedUsingSae.wait_until_succeeds(
-            "wpa_cli -i wlan1 status | grep -q wpa_state=COMPLETED"
-          )
+  # Test connecting to a SAE-only hotspot using SAE
+  saeOnly = runConnectionTest "sae-only" {
+    fallbackToWPA2 = false;
+    networks.nixos-test-sae = {
+      pskRaw = "ext:psk_nixos_test";
+      authProtocols = [ "SAE" ];
+    };
+  };
 
-      with subtest("Daemon can connect to the SAE and WPA2 mixed access point using WPA2"):
-          machineMixedUsingWpa2.wait_until_succeeds(
-            "wpa_cli -i wlan1 status | grep -q wpa_state=COMPLETED"
-          )
+  # Test connecting to a mixed SAE/WPA2 hotspot using SAE
+  mixedUsingSae = runConnectionTest "mixed-using-sae" {
+    fallbackToWPA2 = false;
+    networks.nixos-test-mixed = {
+      pskRaw = "ext:psk_nixos_test";
+      authProtocols = [ "SAE" ];
+    };
+  };
 
-      with subtest("Daemon can connect to the WPA2 access point using WPA2"):
-          machineWpa2.wait_until_succeeds(
-            "wpa_cli -i wlan1 status | grep -q wpa_state=COMPLETED"
-          )
-    '';
-})
+  # Test connecting to a mixed SAE/WPA2 hotspot using WPA2
+  mixedUsingWpa2 = runConnectionTest "mixed-using-wpa2" {
+    fallbackToWPA2 = true;
+    networks.nixos-test-mixed = {
+      pskRaw = "ext:psk_nixos_test";
+      authProtocols = [ "WPA-PSK-SHA256" ];
+    };
+  };
+
+  # Test connecting to a legacy WPA2-only hotspot using WPA2
+  legacy = runConnectionTest "legacy" {
+    fallbackToWPA2 = true;
+    networks.nixos-test-wpa2 = {
+      pskRaw = "ext:psk_nixos_test";
+      authProtocols = [ "WPA-PSK-SHA256" ];
+    };
+  };
+}
diff --git a/nixos/tests/wstunnel.nix b/nixos/tests/wstunnel.nix
new file mode 100644
index 0000000000000..7a0a8ce3496ad
--- /dev/null
+++ b/nixos/tests/wstunnel.nix
@@ -0,0 +1,93 @@
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+in
+
+{
+  name = "wstunnel";
+
+  nodes = {
+    server = {
+      virtualisation.vlans = [ 1 ];
+
+      security.pki.certificateFiles = [ certs.ca.cert ];
+
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.1/24";
+      };
+
+      services.wstunnel = {
+        enable = true;
+        servers.my-server = {
+          listen = {
+            host = "10.0.0.1";
+            port = 443;
+          };
+          tlsCertificate = certs.${domain}.cert;
+          tlsKey = certs.${domain}.key;
+        };
+      };
+    };
+
+    client = {
+      virtualisation.vlans = [ 1 ];
+
+      security.pki.certificateFiles = [ certs.ca.cert ];
+
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        extraHosts = ''
+          10.0.0.1 ${domain}
+        '';
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.2/24";
+      };
+
+      services.wstunnel = {
+        enable = true;
+        clients.my-client = {
+          autoStart = false;
+          connectTo = "wss://${domain}:443";
+          localToRemote = [ "tcp://8080:localhost:2080" ];
+          remoteToLocal = [ "tcp://2081:localhost:8081" ];
+        };
+      };
+    };
+  };
+
+  testScript = # python
+    ''
+      start_all()
+      server.wait_for_unit("wstunnel-server-my-server.service")
+      client.wait_for_open_port(443, "10.0.0.1")
+
+      client.systemctl("start wstunnel-client-my-client.service")
+      client.wait_for_unit("wstunnel-client-my-client.service")
+
+      with subtest("connection from client to server"):
+        server.succeed("nc -l 2080 >/tmp/msg &")
+        client.sleep(1)
+        client.succeed('nc -w1 localhost 8080 <<<"Hello from client"')
+        server.succeed('grep "Hello from client" /tmp/msg')
+
+      with subtest("connection from server to client"):
+        client.succeed("nc -l 8081 >/tmp/msg &")
+        server.sleep(1)
+        server.succeed('nc -w1 localhost 2081 <<<"Hello from server"')
+        client.succeed('grep "Hello from server" /tmp/msg')
+
+      client.systemctl("stop wstunnel-client-my-client.service")
+    '';
+}
diff --git a/nixos/tests/xfce.nix b/nixos/tests/xfce.nix
index d97f07d752712..1cef8daf3d49f 100644
--- a/nixos/tests/xfce.nix
+++ b/nixos/tests/xfce.nix
@@ -19,9 +19,6 @@ import ./make-test-python.nix ({ pkgs, ...} : {
 
       services.xserver.desktopManager.xfce.enable = true;
       environment.systemPackages = [ pkgs.xfce.xfce4-whiskermenu-plugin ];
-
-      hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
-
     };
 
   enableOCR = true;
diff --git a/nixos/tests/ydotool.nix b/nixos/tests/ydotool.nix
new file mode 100644
index 0000000000000..7a739392aa565
--- /dev/null
+++ b/nixos/tests/ydotool.nix
@@ -0,0 +1,184 @@
+{
+  system ? builtins.currentSystem,
+  config ? { },
+  pkgs ? import ../.. { inherit system config; },
+  lib ? pkgs.lib,
+}:
+let
+  makeTest = import ./make-test-python.nix;
+  textInput = "This works.";
+  inputBoxText = "Enter input";
+  inputBox = pkgs.writeShellScript "zenity-input" ''
+    ${lib.getExe pkgs.zenity} --entry --text '${inputBoxText}:' > /tmp/output &
+  '';
+  asUser = ''
+    def as_user(cmd: str):
+        """
+        Return a shell command for running a shell command as a specific user.
+        """
+        return f"sudo -u alice -i {cmd}"
+  '';
+in
+{
+  headless = makeTest {
+    name = "headless";
+
+    enableOCR = true;
+
+    nodes.machine = {
+      imports = [ ./common/user-account.nix ];
+
+      users.users.alice.extraGroups = [ "ydotool" ];
+
+      programs.ydotool.enable = true;
+
+      services.getty.autologinUser = "alice";
+    };
+
+    testScript =
+      asUser
+      + ''
+        start_all()
+
+        machine.wait_for_unit("multi-user.target")
+        machine.wait_for_text("alice")
+        machine.succeed(as_user("ydotool type 'echo ${textInput} > /tmp/output'")) # text input
+        machine.succeed(as_user("ydotool key 28:1 28:0")) # text input
+        machine.screenshot("headless_input")
+        machine.wait_for_file("/tmp/output")
+        machine.wait_until_succeeds("grep '${textInput}' /tmp/output") # text input
+      '';
+
+    meta.maintainers = with lib.maintainers; [
+      OPNA2608
+      quantenzitrone
+    ];
+  };
+
+  x11 = makeTest {
+    name = "x11";
+
+    enableOCR = true;
+
+    nodes.machine = {
+      imports = [
+        ./common/user-account.nix
+        ./common/auto.nix
+        ./common/x11.nix
+      ];
+
+      users.users.alice.extraGroups = [ "ydotool" ];
+
+      programs.ydotool.enable = true;
+
+      test-support.displayManager.auto = {
+        enable = true;
+        user = "alice";
+      };
+
+      services.xserver.windowManager.dwm.enable = true;
+      services.displayManager.defaultSession = lib.mkForce "none+dwm";
+    };
+
+    testScript =
+      asUser
+      + ''
+        start_all()
+
+        machine.wait_for_x()
+        machine.execute(as_user("${inputBox}"))
+        machine.wait_for_text("${inputBoxText}")
+        machine.succeed(as_user("ydotool type '${textInput}'")) # text input
+        machine.screenshot("x11_input")
+        machine.succeed(as_user("ydotool mousemove -a 400 110")) # mouse input
+        machine.succeed(as_user("ydotool click 0xC0")) # mouse input
+        machine.wait_for_file("/tmp/output")
+        machine.wait_until_succeeds("grep '${textInput}' /tmp/output") # text input
+      '';
+
+    meta.maintainers = with lib.maintainers; [
+      OPNA2608
+      quantenzitrone
+    ];
+  };
+
+  wayland = makeTest {
+    name = "wayland";
+
+    enableOCR = true;
+
+    nodes.machine = {
+      imports = [ ./common/user-account.nix ];
+
+      services.cage = {
+        enable = true;
+        user = "alice";
+      };
+
+      programs.ydotool.enable = true;
+
+      services.cage.program = inputBox;
+    };
+
+    testScript = ''
+      start_all()
+
+      machine.wait_for_unit("graphical.target")
+      machine.wait_for_text("${inputBoxText}")
+      machine.succeed("ydotool type '${textInput}'") # text input
+      machine.screenshot("wayland_input")
+      machine.succeed("ydotool mousemove -a 100 100") # mouse input
+      machine.succeed("ydotool click 0xC0") # mouse input
+      machine.wait_for_file("/tmp/output")
+      machine.wait_until_succeeds("grep '${textInput}' /tmp/output") # text input
+    '';
+
+    meta.maintainers = with lib.maintainers; [
+      OPNA2608
+      quantenzitrone
+    ];
+  };
+
+  customGroup =
+    let
+      name = "customGroup";
+      nodeName = "${name}Node";
+      insideGroupUsername = "ydotool-user";
+      outsideGroupUsername = "other-user";
+      groupName = "custom-group";
+    in
+    makeTest {
+      inherit name;
+
+      nodes."${nodeName}" = {
+        programs.ydotool = {
+          enable = true;
+          group = groupName;
+        };
+
+        users.users = {
+          "${insideGroupUsername}" = {
+            isNormalUser = true;
+            extraGroups = [ groupName ];
+          };
+          "${outsideGroupUsername}".isNormalUser = true;
+        };
+      };
+
+      testScript = ''
+        start_all()
+
+        # Wait for service to start
+        ${nodeName}.wait_for_unit("multi-user.target")
+        ${nodeName}.wait_for_unit("ydotoold.service")
+
+        # Verify that user with the configured group can use the service
+        ${nodeName}.succeed("sudo --login --user=${insideGroupUsername} ydotool type 'Hello, World!'")
+
+        # Verify that user without the configured group can't use the service
+        ${nodeName}.fail("sudo --login --user=${outsideGroupUsername} ydotool type 'Hello, World!'")
+      '';
+
+      meta.maintainers = with lib.maintainers; [ l0b0 ];
+    };
+}
diff --git a/nixos/tests/your_spotify.nix b/nixos/tests/your_spotify.nix
new file mode 100644
index 0000000000000..a1fa0e459a8e1
--- /dev/null
+++ b/nixos/tests/your_spotify.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({pkgs, ...}: {
+  name = "your_spotify";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [patrickdag];
+  };
+
+  nodes.machine = {
+    services.your_spotify = {
+      enable = true;
+      spotifySecretFile = pkgs.writeText "spotifySecretFile" "deadbeef";
+      settings = {
+        CLIENT_ENDPOINT = "http://localhost";
+        API_ENDPOINT = "http://localhost:3000";
+        SPOTIFY_PUBLIC = "beefdead";
+      };
+      enableLocalDB = true;
+      nginxVirtualHost = "localhost";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("your_spotify.service")
+
+    machine.wait_for_open_port(3000)
+    machine.wait_for_open_port(80)
+
+    out = machine.succeed("curl --fail -X GET 'http://localhost:3000/'")
+    assert "Hello !" in out
+
+    out = machine.succeed("curl --fail -X GET 'http://localhost:80/'")
+    assert "<title>Your Spotify</title>" in out
+  '';
+})
diff --git a/nixos/tests/zfs.nix b/nixos/tests/zfs.nix
index 851fced2c5e1e..877749a8048be 100644
--- a/nixos/tests/zfs.nix
+++ b/nixos/tests/zfs.nix
@@ -45,13 +45,13 @@ let
         specialisation.samba.configuration = {
           services.samba = {
             enable = true;
-            extraConfig = ''
-              registry shares = yes
-              usershare path = ${usersharePath}
-              usershare allow guests = yes
-              usershare max shares = 100
-              usershare owner only = no
-            '';
+            settings.global = {
+              "registry shares" = true;
+              "usershare path" = "${usersharePath}";
+              "usershare allow guests" = true;
+              "usershare max shares" = "100";
+              "usershare owner only" = false;
+            };
           };
           systemd.services.samba-smbd.serviceConfig.ExecStartPre =
             "${pkgs.coreutils}/bin/mkdir -m +t -p ${usersharePath}";
@@ -213,8 +213,8 @@ in {
     enableSystemdStage1 = true;
   };
 
-  installerBoot = (import ./installer.nix { }).separateBootZfs;
-  installer = (import ./installer.nix { }).zfsroot;
+  installerBoot = (import ./installer.nix { inherit system; }).separateBootZfs;
+  installer = (import ./installer.nix { inherit system; }).zfsroot;
 
   expand-partitions = makeTest {
     name = "multi-disk-zfs";
diff --git a/nixos/tests/zigbee2mqtt.nix b/nixos/tests/zigbee2mqtt.nix
index 9d6d03a4b9bbe..3311bb11eb1a2 100644
--- a/nixos/tests/zigbee2mqtt.nix
+++ b/nixos/tests/zigbee2mqtt.nix
@@ -23,7 +23,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       machine.wait_for_unit("multi-user.target")
       machine.wait_until_fails("systemctl status zigbee2mqtt.service")
       machine.succeed(
-          "journalctl -eu zigbee2mqtt | grep 'Failed to connect to the adapter'"
+          "journalctl -eu zigbee2mqtt | grep 'Error: Inappropriate ioctl for device, cannot set'"
       )
 
       machine.log(machine.succeed("systemd-analyze security zigbee2mqtt.service"))