about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorJörg Thalheim <Mic92@users.noreply.github.com>2024-01-07 11:40:30 +0100
committerGitHub <noreply@github.com>2024-01-07 11:40:30 +0100
commitdb59d03371a1ca74896c812e1653d5bccc0bc0f4 (patch)
tree59507d6d1cfbbebb6467774befc5656f5a41bc52 /nixos
parent9598daf31aec96e4a987e2e03d68cf55ce575d0a (diff)
parent12c489d36b19907c6def5f73e420cd019857e2eb (diff)
Merge branch 'master' into pr/bcachefs
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/administration/nixos-state.section.md28
-rw-r--r--nixos/doc/manual/administration/running.md1
-rw-r--r--nixos/doc/manual/administration/system-state.chapter.md17
-rw-r--r--nixos/doc/manual/administration/systemd-state.section.md52
-rw-r--r--nixos/doc/manual/administration/zfs-state.section.md16
-rw-r--r--nixos/doc/manual/development/option-types.section.md7
-rw-r--r--nixos/doc/manual/installation/changing-config.chapter.md8
-rw-r--r--nixos/doc/manual/release-notes/release-notes.md1
-rw-r--r--nixos/doc/manual/release-notes/rl-2111.section.md2
-rw-r--r--nixos/doc/manual/release-notes/rl-2311.section.md88
-rw-r--r--nixos/doc/manual/release-notes/rl-2405.section.md104
-rw-r--r--nixos/lib/make-disk-image.nix9
-rw-r--r--nixos/lib/test-driver/test_driver/machine.py32
-rwxr-xr-xnixos/maintainers/scripts/ec2/create-amis.sh54
-rw-r--r--nixos/modules/config/iproute2.nix5
-rw-r--r--nixos/modules/config/ldap.nix101
-rw-r--r--nixos/modules/config/ldso.nix58
-rw-r--r--nixos/modules/config/nix-channel.nix1
-rw-r--r--nixos/modules/config/nix.nix8
-rw-r--r--nixos/modules/config/no-x-libs.nix5
-rw-r--r--nixos/modules/config/pulseaudio.nix22
-rw-r--r--nixos/modules/config/stub-ld.nix56
-rw-r--r--nixos/modules/config/swap.nix3
-rw-r--r--nixos/modules/config/sysctl.nix30
-rw-r--r--nixos/modules/config/users-groups.nix2
-rw-r--r--nixos/modules/hardware/all-firmware.nix5
-rw-r--r--nixos/modules/hardware/keyboard/qmk.nix1
-rw-r--r--nixos/modules/hardware/video/amdgpu-pro.nix7
-rw-r--r--nixos/modules/hardware/video/nvidia.nix72
-rw-r--r--nixos/modules/i18n/input-method/fcitx5.nix13
-rw-r--r--nixos/modules/image/repart-image.nix80
-rw-r--r--nixos/modules/image/repart.nix62
-rw-r--r--nixos/modules/installer/tools/tools.nix3
-rw-r--r--nixos/modules/misc/documentation.nix8
-rw-r--r--nixos/modules/misc/ids.nix4
-rw-r--r--nixos/modules/misc/mandoc.nix161
-rw-r--r--nixos/modules/module-list.nix14
-rw-r--r--nixos/modules/profiles/installation-device.nix2
-rw-r--r--nixos/modules/profiles/minimal.nix2
-rw-r--r--nixos/modules/programs/atop.nix5
-rw-r--r--nixos/modules/programs/direnv.nix9
-rw-r--r--nixos/modules/programs/firejail.nix2
-rw-r--r--nixos/modules/programs/gamemode.nix2
-rw-r--r--nixos/modules/programs/gpaste.nix2
-rw-r--r--nixos/modules/programs/hyprland.nix11
-rw-r--r--nixos/modules/programs/mininet.nix35
-rw-r--r--nixos/modules/programs/mosh.nix24
-rw-r--r--nixos/modules/programs/nix-ld.nix2
-rw-r--r--nixos/modules/programs/ssh.nix20
-rw-r--r--nixos/modules/programs/starship.nix24
-rw-r--r--nixos/modules/programs/wayland/river.nix3
-rw-r--r--nixos/modules/programs/wayland/sway.nix43
-rw-r--r--nixos/modules/programs/winbox.nix24
-rw-r--r--nixos/modules/security/acme/default.md16
-rw-r--r--nixos/modules/security/apparmor.nix3
-rw-r--r--nixos/modules/security/auditd.nix4
-rw-r--r--nixos/modules/security/duosec.nix6
-rw-r--r--nixos/modules/security/ipa.nix46
-rw-r--r--nixos/modules/security/sudo-rs.nix2
-rw-r--r--nixos/modules/security/wrappers/default.nix4
-rw-r--r--nixos/modules/security/wrappers/wrapper.nix4
-rw-r--r--nixos/modules/services/admin/pgadmin.nix7
-rw-r--r--nixos/modules/services/audio/mopidy.nix2
-rw-r--r--nixos/modules/services/audio/mympd.nix129
-rw-r--r--nixos/modules/services/audio/wyoming/faster-whisper.nix1
-rw-r--r--nixos/modules/services/audio/wyoming/piper.nix1
-rw-r--r--nixos/modules/services/backup/borgbackup.nix56
-rw-r--r--nixos/modules/services/backup/btrbk.nix54
-rw-r--r--nixos/modules/services/backup/restic.nix3
-rw-r--r--nixos/modules/services/backup/snapraid.nix (renamed from nixos/modules/tasks/snapraid.nix)9
-rw-r--r--nixos/modules/services/cluster/kubernetes/flannel.nix9
-rw-r--r--nixos/modules/services/cluster/kubernetes/pki.nix2
-rw-r--r--nixos/modules/services/cluster/spark/default.nix16
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/master.nix2
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/worker.nix2
-rw-r--r--nixos/modules/services/continuous-integration/buildkite-agents.nix8
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/default.nix1
-rw-r--r--nixos/modules/services/databases/aerospike.nix13
-rw-r--r--nixos/modules/services/databases/postgresql.md2
-rw-r--r--nixos/modules/services/databases/postgresql.nix13
-rw-r--r--nixos/modules/services/desktops/flatpak.nix1
-rw-r--r--nixos/modules/services/desktops/pipewire/pipewire.nix182
-rw-r--r--nixos/modules/services/development/livebook.md13
-rw-r--r--nixos/modules/services/development/livebook.nix15
-rw-r--r--nixos/modules/services/development/zammad.nix55
-rw-r--r--nixos/modules/services/display-managers/greetd.nix12
-rw-r--r--nixos/modules/services/games/teeworlds.nix2
-rw-r--r--nixos/modules/services/hardware/kanata.nix8
-rw-r--r--nixos/modules/services/hardware/keyd.nix6
-rw-r--r--nixos/modules/services/hardware/power-profiles-daemon.nix19
-rw-r--r--nixos/modules/services/hardware/sane.nix9
-rw-r--r--nixos/modules/services/hardware/thermald.nix7
-rw-r--r--nixos/modules/services/hardware/udev.nix6
-rw-r--r--nixos/modules/services/hardware/vdr.nix95
-rw-r--r--nixos/modules/services/home-automation/home-assistant.nix15
-rw-r--r--nixos/modules/services/logging/logcheck.nix14
-rw-r--r--nixos/modules/services/logging/vector.nix6
-rw-r--r--nixos/modules/services/mail/postfix.nix15
-rw-r--r--nixos/modules/services/mail/rspamd-trainer.nix76
-rw-r--r--nixos/modules/services/matrix/matrix-sliding-sync.nix9
-rw-r--r--nixos/modules/services/matrix/maubot.nix2
-rw-r--r--nixos/modules/services/matrix/synapse.md8
-rw-r--r--nixos/modules/services/matrix/synapse.nix14
-rw-r--r--nixos/modules/services/misc/gitea.nix15
-rw-r--r--nixos/modules/services/misc/guix/default.nix46
-rw-r--r--nixos/modules/services/misc/llama-cpp.nix111
-rw-r--r--nixos/modules/services/misc/moonraker.nix6
-rw-r--r--nixos/modules/services/misc/nitter.nix23
-rw-r--r--nixos/modules/services/misc/ollama.nix42
-rw-r--r--nixos/modules/services/misc/paperless.nix47
-rw-r--r--nixos/modules/services/misc/portunus.nix5
-rw-r--r--nixos/modules/services/misc/redmine.nix2
-rw-r--r--nixos/modules/services/misc/tandoor-recipes.nix2
-rw-r--r--nixos/modules/services/misc/tuxclocker.nix71
-rw-r--r--nixos/modules/services/monitoring/grafana.nix4
-rw-r--r--nixos/modules/services/monitoring/netdata.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/default.nix27
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/kea.nix8
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/nginx.nix4
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/ping.nix48
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix22
-rw-r--r--nixos/modules/services/monitoring/thanos.nix3
-rw-r--r--nixos/modules/services/monitoring/ups.nix422
-rw-r--r--nixos/modules/services/network-filesystems/drbd.nix4
-rw-r--r--nixos/modules/services/network-filesystems/eris-server.nix56
-rw-r--r--nixos/modules/services/network-filesystems/kubo.nix42
-rw-r--r--nixos/modules/services/networking/avahi-daemon.nix38
-rw-r--r--nixos/modules/services/networking/ddclient.nix4
-rw-r--r--nixos/modules/services/networking/dnsmasq.md68
-rw-r--r--nixos/modules/services/networking/dnsmasq.nix2
-rw-r--r--nixos/modules/services/networking/ejabberd.nix6
-rw-r--r--nixos/modules/services/networking/firewall-iptables.nix3
-rw-r--r--nixos/modules/services/networking/gns3-server.md31
-rw-r--r--nixos/modules/services/networking/gns3-server.nix263
-rw-r--r--nixos/modules/services/networking/harmonia.nix7
-rw-r--r--nixos/modules/services/networking/iwd.nix2
-rw-r--r--nixos/modules/services/networking/jigasi.nix237
-rw-r--r--nixos/modules/services/networking/kea.nix22
-rw-r--r--nixos/modules/services/networking/nebula.nix2
-rw-r--r--nixos/modules/services/networking/quicktun.nix146
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix6
-rw-r--r--nixos/modules/services/networking/syncthing.nix11
-rw-r--r--nixos/modules/services/networking/tailscale.nix4
-rw-r--r--nixos/modules/services/networking/teamspeak3.nix42
-rw-r--r--nixos/modules/services/networking/tinyproxy.nix2
-rw-r--r--nixos/modules/services/networking/vdirsyncer.nix8
-rw-r--r--nixos/modules/services/networking/wpa_supplicant.nix4
-rw-r--r--nixos/modules/services/networking/yggdrasil.nix28
-rw-r--r--nixos/modules/services/networking/zerotierone.nix27
-rw-r--r--nixos/modules/services/search/hound.nix77
-rw-r--r--nixos/modules/services/security/clamav.nix52
-rw-r--r--nixos/modules/services/security/munge.nix14
-rw-r--r--nixos/modules/services/security/shibboleth-sp.nix33
-rw-r--r--nixos/modules/services/security/tor.nix2
-rw-r--r--nixos/modules/services/system/cachix-watch-store.nix12
-rw-r--r--nixos/modules/services/system/dbus.nix5
-rw-r--r--nixos/modules/services/system/zram-generator.nix2
-rw-r--r--nixos/modules/services/torrent/transmission.nix37
-rw-r--r--nixos/modules/services/video/frigate.nix1
-rw-r--r--nixos/modules/services/web-apps/code-server.nix259
-rw-r--r--nixos/modules/services/web-apps/freshrss.nix1
-rw-r--r--nixos/modules/services/web-apps/invidious.nix313
-rw-r--r--nixos/modules/services/web-apps/jitsi-meet.nix38
-rw-r--r--nixos/modules/services/web-apps/keycloak.nix14
-rw-r--r--nixos/modules/services/web-apps/mastodon.nix21
-rw-r--r--nixos/modules/services/web-apps/mobilizon.nix2
-rw-r--r--nixos/modules/services/web-apps/nextcloud.md4
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix363
-rw-r--r--nixos/modules/services/web-apps/node-red.nix2
-rw-r--r--nixos/modules/services/web-apps/windmill.nix177
-rw-r--r--nixos/modules/services/web-apps/wordpress.nix2
-rw-r--r--nixos/modules/services/web-servers/caddy/default.nix9
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix49
-rw-r--r--nixos/modules/services/web-servers/nginx/location-options.nix2
-rw-r--r--nixos/modules/services/web-servers/nginx/tailscale-auth.nix158
-rw-r--r--nixos/modules/services/web-servers/nginx/vhost-options.nix25
-rw-r--r--nixos/modules/services/x11/desktop-managers/cinnamon.nix27
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix4
-rw-r--r--nixos/modules/services/x11/hardware/libinput.nix6
-rw-r--r--nixos/modules/system/activation/bootspec.nix2
-rw-r--r--nixos/modules/system/boot/binfmt.nix2
-rw-r--r--nixos/modules/system/boot/grow-partition.nix2
-rw-r--r--nixos/modules/system/boot/initrd-ssh.nix6
-rw-r--r--nixos/modules/system/boot/loader/grub/grub.nix12
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py7
-rw-r--r--nixos/modules/system/boot/luksroot.nix2
-rw-r--r--nixos/modules/system/boot/networkd.nix5
-rw-r--r--nixos/modules/system/boot/resolved.nix9
-rw-r--r--nixos/modules/system/boot/systemd/initrd-secrets.nix3
-rw-r--r--nixos/modules/system/boot/systemd/initrd.nix7
-rw-r--r--nixos/modules/system/boot/systemd/oomd.nix26
-rw-r--r--nixos/modules/tasks/filesystems.nix3
-rw-r--r--nixos/modules/tasks/filesystems/bcachefs.nix5
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix32
-rw-r--r--nixos/modules/tasks/network-interfaces.nix6
-rw-r--r--nixos/modules/tasks/trackpoint.nix13
-rw-r--r--nixos/modules/virtualisation/azure-agent.nix28
-rw-r--r--nixos/modules/virtualisation/incus.nix4
-rw-r--r--nixos/modules/virtualisation/lxc-container.nix4
-rw-r--r--nixos/modules/virtualisation/lxc.nix26
-rw-r--r--nixos/modules/virtualisation/lxcfs.nix12
-rw-r--r--nixos/modules/virtualisation/lxd-agent.nix18
-rw-r--r--nixos/modules/virtualisation/lxd-virtual-machine.nix4
-rw-r--r--nixos/modules/virtualisation/lxd.nix6
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix2
-rw-r--r--nixos/modules/virtualisation/vmware-host.nix61
-rw-r--r--nixos/modules/virtualisation/waydroid.nix2
-rw-r--r--nixos/release-combined.nix4
-rw-r--r--nixos/tests/all-tests.nix16
-rw-r--r--nixos/tests/anbox.nix2
-rw-r--r--nixos/tests/angie-api.nix148
-rw-r--r--nixos/tests/avahi.nix2
-rw-r--r--nixos/tests/bootspec.nix31
-rw-r--r--nixos/tests/btrbk-section-order.nix7
-rw-r--r--nixos/tests/btrbk.nix1
-rw-r--r--nixos/tests/buildbot.nix2
-rw-r--r--nixos/tests/caddy.nix15
-rw-r--r--nixos/tests/ceph-single-node.nix21
-rw-r--r--nixos/tests/cinnamon-wayland.nix6
-rw-r--r--nixos/tests/cinnamon.nix6
-rw-r--r--nixos/tests/code-server.nix22
-rw-r--r--nixos/tests/containers-custom-pkgs.nix2
-rw-r--r--nixos/tests/containers-imperative.nix1
-rw-r--r--nixos/tests/dhparams.nix2
-rw-r--r--nixos/tests/docker-tools.nix2
-rw-r--r--nixos/tests/drawterm.nix58
-rw-r--r--nixos/tests/frr.nix2
-rw-r--r--nixos/tests/ft2-clone.nix4
-rw-r--r--nixos/tests/gitlab.nix2
-rw-r--r--nixos/tests/gns3-server.nix55
-rw-r--r--nixos/tests/google-oslogin/default.nix3
-rw-r--r--nixos/tests/guix/basic.nix6
-rw-r--r--nixos/tests/guix/publish.nix2
-rw-r--r--nixos/tests/harmonia.nix3
-rw-r--r--nixos/tests/home-assistant.nix4
-rw-r--r--nixos/tests/incron.nix6
-rw-r--r--nixos/tests/incus/container.nix8
-rw-r--r--nixos/tests/incus/preseed.nix4
-rw-r--r--nixos/tests/incus/socket-activated.nix4
-rw-r--r--nixos/tests/incus/virtual-machine.nix7
-rw-r--r--nixos/tests/initrd-network-openvpn/default.nix15
-rw-r--r--nixos/tests/installed-tests/flatpak.nix1
-rw-r--r--nixos/tests/installer-systemd-stage-1.nix1
-rw-r--r--nixos/tests/installer.nix86
-rw-r--r--nixos/tests/invidious.nix120
-rw-r--r--nixos/tests/iscsi-root.nix4
-rw-r--r--nixos/tests/kernel-generic.nix1
-rw-r--r--nixos/tests/kubo/default.nix4
-rw-r--r--nixos/tests/kubo/kubo.nix7
-rw-r--r--nixos/tests/livebook-service.nix4
-rw-r--r--nixos/tests/lvm2/systemd-stage-1.nix4
-rw-r--r--nixos/tests/lvm2/thinpool.nix2
-rw-r--r--nixos/tests/lvm2/vdo.nix4
-rw-r--r--nixos/tests/lxd/container.nix4
-rw-r--r--nixos/tests/lxd/nftables.nix6
-rw-r--r--nixos/tests/lxd/preseed.nix2
-rw-r--r--nixos/tests/lxd/ui.nix4
-rw-r--r--nixos/tests/lxd/virtual-machine.nix4
-rw-r--r--nixos/tests/mate.nix27
-rw-r--r--nixos/tests/mobilizon.nix4
-rw-r--r--nixos/tests/mongodb.nix2
-rw-r--r--nixos/tests/munin.nix2
-rw-r--r--nixos/tests/musescore.nix7
-rw-r--r--nixos/tests/mympd.nix27
-rw-r--r--nixos/tests/mysql/mariadb-galera.nix4
-rw-r--r--nixos/tests/mysql/mysql-backup.nix3
-rw-r--r--nixos/tests/mysql/mysql-replication.nix4
-rw-r--r--nixos/tests/mysql/mysql.nix4
-rw-r--r--nixos/tests/nextcloud/basic.nix10
-rw-r--r--nixos/tests/nextcloud/default.nix2
-rw-r--r--nixos/tests/nextcloud/with-postgresql-and-redis.nix2
-rw-r--r--nixos/tests/nginx-http3.nix182
-rw-r--r--nixos/tests/nginx-redirectcode.nix25
-rw-r--r--nixos/tests/nginx-variants.nix10
-rw-r--r--nixos/tests/nitter.nix21
-rw-r--r--nixos/tests/nixos-rebuild-specialisations.nix2
-rw-r--r--nixos/tests/npmrc.nix22
-rw-r--r--nixos/tests/oci-containers.nix2
-rw-r--r--nixos/tests/opentabletdriver.nix4
-rw-r--r--nixos/tests/pgjwt.nix2
-rw-r--r--nixos/tests/postgis.nix6
-rw-r--r--nixos/tests/prometheus-exporters.nix51
-rw-r--r--nixos/tests/promscale.nix2
-rw-r--r--nixos/tests/quicktun.nix18
-rw-r--r--nixos/tests/rspamd-trainer.nix155
-rw-r--r--nixos/tests/slimserver.nix4
-rw-r--r--nixos/tests/sogo.nix2
-rw-r--r--nixos/tests/spark/default.nix66
-rw-r--r--nixos/tests/ssh-agent-auth.nix51
-rw-r--r--nixos/tests/stub-ld.nix73
-rw-r--r--nixos/tests/stunnel.nix4
-rw-r--r--nixos/tests/sway.nix2
-rw-r--r--nixos/tests/systemd-boot.nix2
-rw-r--r--nixos/tests/systemd-initrd-networkd.nix3
-rw-r--r--nixos/tests/telegraf.nix1
-rw-r--r--nixos/tests/timescaledb.nix2
-rw-r--r--nixos/tests/tomcat.nix2
-rw-r--r--nixos/tests/tsja.nix2
-rw-r--r--nixos/tests/unbound.nix16
-rw-r--r--nixos/tests/varnish.nix6
-rw-r--r--nixos/tests/zammad.nix9
-rw-r--r--nixos/tests/zfs.nix3
304 files changed, 6304 insertions, 1626 deletions
diff --git a/nixos/doc/manual/administration/nixos-state.section.md b/nixos/doc/manual/administration/nixos-state.section.md
new file mode 100644
index 0000000000000..9819d613198c3
--- /dev/null
+++ b/nixos/doc/manual/administration/nixos-state.section.md
@@ -0,0 +1,28 @@
+# NixOS {#sec-nixos-state}
+
+## `/nix` {#sec-state-nix}
+
+NixOS needs the entirety of `/nix` to be persistent, as it includes:
+- `/nix/store`, which contains all the system's executables, libraries, and supporting data;
+- `/nix/var/nix`, which contains:
+  - the Nix daemon's database;
+  - roots whose transitive closure is preserved when garbage-collecting the Nix store;
+  - system-wide and per-user profiles.
+
+## `/boot` {#sec-state-boot}
+
+`/boot` should also be persistent, as it contains:
+- the kernel and initrd which the bootloader loads,
+- the bootloader's configuration, including the kernel's command-line which
+  determines the store path to use as system environment.
+
+
+## Users and groups {#sec-state-users}
+
+- `/var/lib/nixos` should persist: it holds state needed to generate stable
+  uids and gids for declaratively-managed users and groups, etc.
+- `users.mutableUsers` should be false, *or* the following files under `/etc`
+  should all persist:
+  - {manpage}`passwd(5)` and {manpage}`group(5)`,
+  - {manpage}`shadow(5)` and {manpage}`gshadow(5)`,
+  - {manpage}`subuid(5)` and {manpage}`subgid(5)`.
diff --git a/nixos/doc/manual/administration/running.md b/nixos/doc/manual/administration/running.md
index 48e8c7c6668b7..83412d9b7af58 100644
--- a/nixos/doc/manual/administration/running.md
+++ b/nixos/doc/manual/administration/running.md
@@ -8,6 +8,7 @@ rebooting.chapter.md
 user-sessions.chapter.md
 control-groups.chapter.md
 logging.chapter.md
+system-state.chapter.md
 cleaning-store.chapter.md
 containers.chapter.md
 troubleshooting.chapter.md
diff --git a/nixos/doc/manual/administration/system-state.chapter.md b/nixos/doc/manual/administration/system-state.chapter.md
new file mode 100644
index 0000000000000..6840cc3902578
--- /dev/null
+++ b/nixos/doc/manual/administration/system-state.chapter.md
@@ -0,0 +1,17 @@
+# Necessary system state {#ch-system-state}
+
+Normally — on systems with a persistent `rootfs` — system services can persist state to
+the filesystem without administrator intervention.
+
+However, it is possible and not-uncommon to create [impermanent systems], whose
+`rootfs` is either a `tmpfs` or reset during boot. While NixOS itself supports
+this kind of configuration, special care needs to be taken.
+
+[impermanent systems]: https://nixos.wiki/wiki/Impermanence
+
+
+```{=include=} sections
+nixos-state.section.md
+systemd-state.section.md
+zfs-state.section.md
+```
diff --git a/nixos/doc/manual/administration/systemd-state.section.md b/nixos/doc/manual/administration/systemd-state.section.md
new file mode 100644
index 0000000000000..84f074871a655
--- /dev/null
+++ b/nixos/doc/manual/administration/systemd-state.section.md
@@ -0,0 +1,52 @@
+# systemd {#sec-systemd-state}
+
+## `machine-id(5)` {#sec-machine-id}
+
+`systemd` uses per-machine identifier — {manpage}`machine-id(5)` — which must be
+unique and persistent; otherwise, the system journal may fail to list earlier
+boots, etc.
+
+`systemd` generates a random `machine-id(5)` during boot if it does not already exist,
+and persists it in `/etc/machine-id`.  As such, it suffices to make that file persistent.
+
+Alternatively, it is possible to generate a random `machine-id(5)`; while the
+specification allows for *any* hex-encoded 128b value, systemd itself uses
+[UUIDv4], *i.e.* random UUIDs, and it is thus preferable to do so as well, in
+case some software assumes `machine-id(5)` to be a UUIDv4. Those can be
+generated with `uuidgen -r | tr -d -` (`tr` being used to remove the dashes).
+
+Such a `machine-id(5)` can be set by writing it to `/etc/machine-id` or through
+the kernel's command-line, though NixOS' systemd maintainers [discourage] the
+latter approach.
+
+[UUIDv4]: https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)
+[discourage]: https://github.com/NixOS/nixpkgs/pull/268995
+
+
+## `/var/lib/systemd` {#sec-var-systemd}
+
+Moreover, `systemd` expects its state directory — `/var/lib/systemd` — to persist, for:
+- {manpage}`systemd-random-seed(8)`, which loads a 256b “seed” into the kernel's RNG
+  at boot time, and saves a fresh one during shutdown;
+- {manpage}`systemd.timer(5)` with `Persistent=yes`, which are then run after boot if
+  the timer would have triggered during the time the system was shut down;
+- {manpage}`systemd-coredump(8)` to store core dumps there by default;
+  (see {manpage}`coredump.conf(5)`)
+- {manpage}`systemd-timesyncd(8)`;
+- {manpage}`systemd-backlight(8)` and {manpage}`systemd-rfkill(8)` persist hardware-related
+  state;
+- possibly other things, this list is not meant to be exhaustive.
+
+In any case, making `/var/lib/systemd` persistent is recommended.
+
+
+## `/var/log/journal/{machine-id}` {#sec-var-journal}
+
+Lastly, {manpage}`systemd-journald(8)` writes the system's journal in binary
+form to `/var/log/journal/{machine-id}`; if (locally) persisting the entire log
+is desired, it is recommended to make all of `/var/log/journal` persistent.
+
+If not, one can set `Storage=volatile` in {manpage}`journald.conf(5)`
+([`services.journald.storage = "volatile";`](#opt-services.journald.storage)),
+which disables journal persistence and causes it to be written to
+`/run/log/journal`.
diff --git a/nixos/doc/manual/administration/zfs-state.section.md b/nixos/doc/manual/administration/zfs-state.section.md
new file mode 100644
index 0000000000000..11ad5badea7ed
--- /dev/null
+++ b/nixos/doc/manual/administration/zfs-state.section.md
@@ -0,0 +1,16 @@
+# ZFS {#sec-zfs-state}
+
+When using ZFS, `/etc/zfs/zpool.cache` should be persistent (or a symlink to a persistent
+location) as it is the default value for the `cachefile` [property](man:zpoolprops(7)).
+
+This cachefile is used on system startup to discover ZFS pools, so ZFS pools
+holding the `rootfs` and/or early-boot datasets such as `/nix` can be set to
+`cachefile=none`.
+
+In principle, if there are no other pools attached to the system, `zpool.cache`
+does not need to be persisted; it is however *strongly recommended* to persist
+it, in case additional pools are added later on, temporarily or permanently:
+
+While mishandling the cachefile does not lead to data loss by itself, it may
+cause zpools not to be imported during boot, and services may then write to a
+location where a dataset was expected to be mounted.
diff --git a/nixos/doc/manual/development/option-types.section.md b/nixos/doc/manual/development/option-types.section.md
index 2ad3d6c4f9495..f9c7ac80018e4 100644
--- a/nixos/doc/manual/development/option-types.section.md
+++ b/nixos/doc/manual/development/option-types.section.md
@@ -13,6 +13,13 @@ merging is handled.
 `types.bool`
 
 :   A boolean, its values can be `true` or `false`.
+    All definitions must have the same value, after priorities. An error is thrown in case of a conflict.
+
+`types.boolByOr`
+
+:   A boolean, its values can be `true` or `false`.
+    The result is `true` if _any_ of multiple definitions is `true`.
+    In other words, definitions are merged with the logical _OR_ operator.
 
 `types.path`
 
diff --git a/nixos/doc/manual/installation/changing-config.chapter.md b/nixos/doc/manual/installation/changing-config.chapter.md
index 12abf90b718fd..9e56b15a880f6 100644
--- a/nixos/doc/manual/installation/changing-config.chapter.md
+++ b/nixos/doc/manual/installation/changing-config.chapter.md
@@ -55,6 +55,14 @@ which causes the new configuration (and previous ones created using
 This can be useful to separate test configurations from "stable"
 configurations.
 
+A repl, or read-eval-print loop, is also available. You can inspect your configuration and use the Nix language with
+
+```ShellSession
+# nixos-rebuild repl
+```
+
+Your configuration is loaded into the `config` variable. Use tab for autocompletion, use the `:r` command to reload the configuration files. See `:?` or [`nix repl` in the Nix manual](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-repl.html) to learn more.
+
 Finally, you can do
 
 ```ShellSession
diff --git a/nixos/doc/manual/release-notes/release-notes.md b/nixos/doc/manual/release-notes/release-notes.md
index 3f926fb21a5ca..0514a1b0044af 100644
--- a/nixos/doc/manual/release-notes/release-notes.md
+++ b/nixos/doc/manual/release-notes/release-notes.md
@@ -3,6 +3,7 @@
 This section lists the release notes for each stable version of NixOS and current unstable revision.
 
 ```{=include=} sections
+rl-2405.section.md
 rl-2311.section.md
 rl-2305.section.md
 rl-2211.section.md
diff --git a/nixos/doc/manual/release-notes/rl-2111.section.md b/nixos/doc/manual/release-notes/rl-2111.section.md
index 400eb1062d9a7..8edf4fd35e4fb 100644
--- a/nixos/doc/manual/release-notes/rl-2111.section.md
+++ b/nixos/doc/manual/release-notes/rl-2111.section.md
@@ -100,7 +100,7 @@ In addition to numerous new and upgraded packages, this release has the followin
 - [opensnitch](https://github.com/evilsocket/opensnitch), an application firewall. Available as [services.opensnitch](#opt-services.opensnitch.enable).
 
 - [snapraid](https://www.snapraid.it/), a backup program for disk arrays.
-  Available as [snapraid](#opt-snapraid.enable).
+  Available as [snapraid](#opt-services.snapraid.enable).
 
 - [Hockeypuck](https://github.com/hockeypuck/hockeypuck), a OpenPGP Key Server. Available as [services.hockeypuck](#opt-services.hockeypuck.enable).
 
diff --git a/nixos/doc/manual/release-notes/rl-2311.section.md b/nixos/doc/manual/release-notes/rl-2311.section.md
index e693067561a4d..1aef1828908f8 100644
--- a/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -20,7 +20,7 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
   - [Breaking Changes](#sec-release-23.11-nixos-breaking-changes)
   - [New Services](#sec-release-23.11-nixos-new-services)
   - [Other Notable Changes](#sec-release-23.11-nixos-notable-changes)
-- [Nixpkgs Library Changes](#sec-release-23.11-nixpkgs-lib)
+- [Nixpkgs Library](#sec-release-23.11-nixpkgs-lib)
   - [Breaking Changes](#sec-release-23.11-lib-breaking)
   - [Additions and Improvements](#sec-release-23.11-lib-additions-improvements)
   - [Deprecations](#sec-release-23.11-lib-deprecations)
@@ -1313,18 +1313,26 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
 - When using [split parity files](https://www.snapraid.it/manual#7.1) in `snapraid`,
   the snapraid-sync systemd service will no longer fail to run.
 
+- `wpa_supplicant`'s configuration file cannot be read by non-root users, and
+  secrets (such as Pre-Shared Keys) can safely be passed via
+  `networking.wireless.environmentFile`.
+
+  The configuration file could previously be read, when `userControlled.enable` (non-default),
+  by users who are in both `wheel` and `userControlled.group` (defaults to `wheel`)
+
+
 ## Nixpkgs Library {#sec-release-23.11-nixpkgs-lib}
 
 ### Breaking Changes {#sec-release-23.11-lib-breaking}
 
-- [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.foldl-prime)
+- [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl-prime)
   now always evaluates the initial accumulator argument first. If you depend on
   the lazier behavior, consider using
-  [`lib.lists.foldl`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.foldl)
+  [`lib.lists.foldl`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl)
   or
   [`builtins.foldl'`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-foldl')
   instead.
-- [`lib.attrsets.foldlAttrs`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.attrsets.foldlAttrs)
+- [`lib.attrsets.foldlAttrs`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.attrsets.foldlAttrs)
   now always evaluates the initial accumulator argument first.
 - Now that the internal NixOS transition to Markdown documentation is complete,
   `lib.options.literalDocBook` has been removed after deprecation in 22.11.
@@ -1332,7 +1340,7 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
 
 ### Additions and Improvements {#sec-release-23.11-lib-additions-improvements}
 
-- [`lib.fileset`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-fileset):
+- [`lib.fileset`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-fileset):
   A new sub-library to select local files to use for sources, designed to be
   easy and safe to use.
 
@@ -1341,7 +1349,7 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
   post](https://www.tweag.io/blog/2023-11-28-file-sets/) or [the
   tutorial](https://nix.dev/tutorials/file-sets).
 
-- [`lib.gvariant`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-gvariant):
+- [`lib.gvariant`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-gvariant):
   A partial and basic implementation of GVariant formatted strings. See
   [GVariant Format
   Strings](https://docs.gtk.org/glib/gvariant-format-strings.html) for details.
@@ -1351,58 +1359,58 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
   change in backwards incompatible ways without prior notice.
   :::
 
-- [`lib.asserts`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-asserts):
+- [`lib.asserts`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-asserts):
   New function:
-  [`assertEachOneOf`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.asserts.assertEachOneOf).
-- [`lib.attrsets`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-attrsets):
+  [`assertEachOneOf`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.asserts.assertEachOneOf).
+- [`lib.attrsets`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-attrsets):
   New function:
-  [`attrsToList`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.attrsets.attrsToList).
-- [`lib.customisation`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-customisation):
+  [`attrsToList`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.attrsets.attrsToList).
+- [`lib.customisation`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-customisation):
   New function:
-  [`makeScopeWithSplicing'`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.customisation.makeScopeWithSplicing-prime).
-- [`lib.fixedPoints`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-fixedPoints):
+  [`makeScopeWithSplicing'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.customisation.makeScopeWithSplicing-prime).
+- [`lib.fixedPoints`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-fixedPoints):
   Documentation improvements for
-  [`lib.fixedPoints.fix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.fixedPoints.fix).
+  [`lib.fixedPoints.fix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.fixedPoints.fix).
 - `lib.generators`: New functions:
-  [`mkDconfKeyValue`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.generators.mkDconfKeyValue),
-  [`toDconfINI`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.generators.toDconfINI).
+  [`mkDconfKeyValue`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.generators.mkDconfKeyValue),
+  [`toDconfINI`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.generators.toDconfINI).
 
   `lib.generators.toKeyValue` now supports the `indent` attribute in its first
   argument.
-- [`lib.lists`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-lists):
+- [`lib.lists`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-lists):
   New functions:
-  [`findFirstIndex`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.findFirstIndex),
-  [`hasPrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.hasPrefix),
-  [`removePrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.removePrefix),
-  [`commonPrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.commonPrefix),
-  [`allUnique`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.allUnique).
+  [`findFirstIndex`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.findFirstIndex),
+  [`hasPrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.hasPrefix),
+  [`removePrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.removePrefix),
+  [`commonPrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.commonPrefix),
+  [`allUnique`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.allUnique).
 
   Documentation improvements for
-  [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.foldl-prime).
-- [`lib.meta`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-meta):
+  [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl-prime).
+- [`lib.meta`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-meta):
   Documentation of functions now gets rendered
-- [`lib.path`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-path):
+- [`lib.path`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-path):
   New functions:
-  [`hasPrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.path.hasPrefix),
-  [`removePrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.path.removePrefix),
-  [`splitRoot`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.path.splitRoot),
-  [`subpath.components`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.path.subpath.components).
-- [`lib.strings`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-strings):
+  [`hasPrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.path.hasPrefix),
+  [`removePrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.path.removePrefix),
+  [`splitRoot`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.path.splitRoot),
+  [`subpath.components`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.path.subpath.components).
+- [`lib.strings`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-strings):
   New functions:
-  [`replicate`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.strings.replicate),
-  [`cmakeOptionType`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.strings.cmakeOptionType),
-  [`cmakeBool`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.strings.cmakeBool),
-  [`cmakeFeature`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.strings.cmakeFeature).
-- [`lib.trivial`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-trivial):
+  [`replicate`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.strings.replicate),
+  [`cmakeOptionType`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.strings.cmakeOptionType),
+  [`cmakeBool`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.strings.cmakeBool),
+  [`cmakeFeature`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.strings.cmakeFeature).
+- [`lib.trivial`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-trivial):
   New function:
-  [`mirrorFunctionArgs`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.trivial.mirrorFunctionArgs).
+  [`mirrorFunctionArgs`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.trivial.mirrorFunctionArgs).
 - `lib.systems`: New function:
-  [`equals`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.systems.equals).
-- [`lib.options`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-options):
+  [`equals`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.systems.equals).
+- [`lib.options`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-options):
   Improved documentation for
-  [`mkPackageOption`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.options.mkPackageOption).
+  [`mkPackageOption`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.options.mkPackageOption).
 
-  [`mkPackageOption`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.options.mkPackageOption).
+  [`mkPackageOption`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.options.mkPackageOption).
   now also supports the `pkgsText` attribute.
 
 Module system:
diff --git a/nixos/doc/manual/release-notes/rl-2405.section.md b/nixos/doc/manual/release-notes/rl-2405.section.md
index 24ee46b649d14..5a748607f2a58 100644
--- a/nixos/doc/manual/release-notes/rl-2405.section.md
+++ b/nixos/doc/manual/release-notes/rl-2405.section.md
@@ -12,6 +12,12 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - `linuxPackages_testing_bcachefs` is now fully deprecated by `linuxPackages_testing`, and is therefore no longer available.
 
+- NixOS now installs a stub ELF loader that prints an informative error message when users attempt to run binaries not made for NixOS.
+   - This can be disabled through the `environment.stub-ld.enable` option.
+   - If you use `programs.nix-ld.enable`, no changes are needed. The stub will be disabled automatically.
+
+- Julia environments can now be built with arbitrary packages from the ecosystem using the `.withPackages` function. For example: `julia.withPackages ["Plots"]`.
+
 ## New Services {#sec-release-24.05-new-services}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@@ -20,18 +26,75 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - [maubot](https://github.com/maubot/maubot), a plugin-based Matrix bot framework. Available as [services.maubot](#opt-services.maubot.enable).
 
+- [GNS3](https://www.gns3.com/), a network software emulator. Available as [services.gns3-server](#opt-services.gns3-server.enable).
+
+- [rspamd-trainer](https://gitlab.com/onlime/rspamd-trainer), script triggered by a helper which reads mails from a specific mail inbox and feeds them into rspamd for spam/ham training.
+
+- [ollama](https://ollama.ai), server for running large language models locally.
+
 - [Anki Sync Server](https://docs.ankiweb.net/sync-server.html), the official sync server built into recent versions of Anki. Available as [services.anki-sync-server](#opt-services.anki-sync-server.enable).
 The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been marked deprecated and will be dropped after 24.05 due to lack of maintenance of the anki-sync-server softwares.
 
+- [ping_exporter](https://github.com/czerwonk/ping_exporter), a Prometheus exporter for ICMP echo requests. Available as [services.prometheus.exporters.ping](#opt-services.prometheus.exporters.ping.enable).
+
 - [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable).
 
+- [TuxClocker](https://github.com/Lurkki14/tuxclocker), a hardware control and monitoring program. Available as [programs.tuxclocker](#opt-programs.tuxclocker.enable).
+
 ## Backward Incompatibilities {#sec-release-24.05-incompatibilities}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
 
+- The `power.ups` module now generates `upsd.conf`, `upsd.users` and `upsmon.conf` automatically from a set of new configuration options. This breaks compatibility with existing `power.ups` setups where these files were created manually. Back up these files before upgrading NixOS.
+
+- `k9s` was updated to v0.30. There have been various breaking changes in the config file format,
+  check out the changelog of [v0.29](https://github.com/derailed/k9s/releases/tag/v0.29.0) and
+  [v0.30](https://github.com/derailed/k9s/releases/tag/v0.30.0) for details. It is recommended
+  to back up your current configuration and let k9s recreate the new base configuration.
+
+- `idris2` was updated to v0.7.0. This version introduces breaking changes. Check out the [changelog](https://github.com/idris-lang/Idris2/blob/v0.7.0/CHANGELOG.md#v070) for details.
+
+- `nitter` requires a `guest_accounts.jsonl` to be provided as a path or loaded into the default location at `/var/lib/nitter/guest_accounts.jsonl`. See [Guest Account Branch Deployment](https://github.com/zedeus/nitter/wiki/Guest-Account-Branch-Deployment) for details.
+
+- Invidious has changed its default database username from `kemal` to `invidious`. Setups involving an externally provisioned database (i.e. `services.invidious.database.createLocally == false`) should adjust their configuration accordingly. The old `kemal` user will not be removed automatically even when the database is provisioned automatically.(https://github.com/NixOS/nixpkgs/pull/265857)
+
+- `paperless`' `services.paperless.extraConfig` setting has been removed and converted to the freeform type and option named `services.paperless.settings`.
+
 - `mkosi` was updated to v19. Parts of the user interface have changed. Consult the
   [release notes](https://github.com/systemd/mkosi/releases/tag/v19) for a list of changes.
 
+- The `kanata` package has been updated to v1.5.0, which includes [breaking changes](https://github.com/jtroo/kanata/releases/tag/v1.5.0).
+
+- The latest available version of Nextcloud is v28 (available as `pkgs.nextcloud28`). The installation logic is as follows:
+  - If [`services.nextcloud.package`](#opt-services.nextcloud.package) is specified explicitly, this package will be installed (**recommended**)
+  - If [`system.stateVersion`](#opt-system.stateVersion) is >=24.05, `pkgs.nextcloud28` will be installed by default.
+  - If [`system.stateVersion`](#opt-system.stateVersion) is >=23.11, `pkgs.nextcloud27` will be installed by default.
+  - Please note that an upgrade from v26 (or older) to v28 directly is not possible. Please upgrade to `nextcloud27` (or earlier) first. Nextcloud prohibits skipping major versions while upgrading. You can upgrade by declaring [`services.nextcloud.package = pkgs.nextcloud27;`](options.html#opt-services.nextcloud.package).
+
+- `services.resolved.fallbackDns` can now be used to disable the upstream fallback servers entirely by setting it to an empty list. To get the previous behaviour of the upstream defaults set it to null, the new default, instead.
+
+- `services.avahi.nssmdns` got split into `services.avahi.nssmdns4` and `services.avahi.nssmdns6` which enable the mDNS NSS switch for IPv4 and IPv6 respectively.
+  Since most mDNS responders only register IPv4 addresses, most users want to keep the IPv6 support disabled to avoid long timeouts.
+
+- `networking.iproute2.enable` now does not set `environment.etc."iproute2/rt_tables".text`.
+
+  Setting `environment.etc."iproute2/{CONFIG_FILE_NAME}".text` will override the whole configuration file instead of appending it to the upstream configuration file.
+
+  `CONFIG_FILE_NAME` includes `bpf_pinning`, `ematch_map`, `group`, `nl_protos`, `rt_dsfield`, `rt_protos`, `rt_realms`, `rt_scopes`, and `rt_tables`.
+
+- The executable file names for `firefox-devedition`, `firefox-beta`, `firefox-esr` now matches their package names, which is consistent with the `firefox-*-bin` packages. The desktop entries are also updated so that you can have multiple editions of firefox in your app launcher.
+
+- The `systemd.oomd` module behavior is changed as:
+
+  - Raise ManagedOOMMemoryPressureLimit from 50% to 80%. This should make systemd-oomd kill things less often, and fix issues like [this](https://pagure.io/fedora-workstation/issue/358).
+    Reference: [commit](https://src.fedoraproject.org/rpms/systemd/c/806c95e1c70af18f81d499b24cd7acfa4c36ffd6?branch=806c95e1c70af18f81d499b24cd7acfa4c36ffd6)
+
+  - Remove swap policy. This helps prevent killing processes when user's swap is small.
+
+  - Expand the memory pressure policy to system.slice, user-.slice, and all user owned slices. Reference: [commit](https://src.fedoraproject.org/rpms/systemd/c/7665e1796f915dedbf8e014f0a78f4f576d609bb)
+
+  - `systemd.oomd.enableUserServices` is renamed to `systemd.oomd.enableUserSlices`.
+
 ## Other Notable Changes {#sec-release-24.05-notable-changes}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@@ -40,20 +103,61 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - Cinnamon has been updated to 6.0. Please beware that the [Wayland session](https://blog.linuxmint.com/?p=4591) is still experimental in this release.
 
+- `services.postgresql.extraPlugins` changed its type from just a list of packages to also a function that returns such a list.
+  For example a config line like ``services.postgresql.extraPlugins = with pkgs.postgresql_11.pkgs; [ postgis ];`` is recommended to be changed to ``services.postgresql.extraPlugins = ps: with ps; [ postgis ];``;
+
 - Programs written in [Nim](https://nim-lang.org/) are built with libraries selected by lockfiles.
   The `nimPackages` and `nim2Packages` sets have been removed.
   See https://nixos.org/manual/nixpkgs/unstable#nim for more information.
 
+- [Portunus](https://github.com/majewsky/portunus) has been updated to major version 2.
+  This version of Portunus supports strong password hashes, but the legacy hash SHA-256 is also still supported to ensure a smooth migration of existing user accounts.
+  After upgrading, follow the instructions on the [upstream release notes](https://github.com/majewsky/portunus/releases/tag/v2.0.0) to upgrade all user accounts to strong password hashes.
+  Support for weak password hashes will be removed in NixOS 24.11.
+
+- `libass` now uses the native CoreText backend on Darwin, which may fix subtitle rendering issues with `mpv`, `ffmpeg`, etc.
+
+- [Lilypond](https://lilypond.org/index.html) and [Denemo](https://www.denemo.org) are now compiled with Guile 3.0.
+
+- The following options of the Nextcloud module were moved into [`services.nextcloud.extraOptions`](#opt-services.nextcloud.extraOptions) and renamed to match the name from Nextcloud's `config.php`:
+  - `logLevel` -> [`loglevel`](#opt-services.nextcloud.extraOptions.loglevel),
+  - `logType` -> [`log_type`](#opt-services.nextcloud.extraOptions.log_type),
+  - `defaultPhoneRegion` -> [`default_phone_region`](#opt-services.nextcloud.extraOptions.default_phone_region),
+  - `overwriteProtocol` -> [`overwriteprotocol`](#opt-services.nextcloud.extraOptions.overwriteprotocol),
+  - `skeletonDirectory` -> [`skeletondirectory`](#opt-services.nextcloud.extraOptions.skeletondirectory),
+  - `globalProfiles` -> [`profile.enabled`](#opt-services.nextcloud.extraOptions._profile.enabled_),
+  - `extraTrustedDomains` -> [`trusted_domains`](#opt-services.nextcloud.extraOptions.trusted_domains) and
+  - `trustedProxies` -> [`trusted_proxies`](#opt-services.nextcloud.extraOptions.trusted_proxies).
+
+- The option [`services.nextcloud.config.dbport`] of the Nextcloud module was removed to match upstream.
+  The port can be specified in [`services.nextcloud.config.dbhost`](#opt-services.nextcloud.config.dbhost).
+
 - The Yama LSM is now enabled by default in the kernel, which prevents ptracing
   non-child processes. This means you will not be able to attach gdb to an
   existing process, but will need to start that process from gdb (so it is a
   child). Or you can set `boot.kernel.sysctl."kernel.yama.ptrace_scope"` to 0.
 
+- [Nginx virtual hosts](#opt-services.nginx.virtualHosts) using `forceSSL` or
+  `globalRedirect` can now have redirect codes other than 301 through
+  `redirectCode`.
+
+- The source of the `mockgen` package has changed to the [go.uber.org/mock](https://github.com/uber-go/mock) fork because [the original repository is no longer maintained](https://github.com/golang/mock#gomock).
+
+- [](#opt-boot.kernel.sysctl._net.core.wmem_max_) changed from a string to an integer because of the addition of a custom merge option (taking the highest value defined to avoid conflicts between 2 services trying to set that value), just as [](#opt-boot.kernel.sysctl._net.core.rmem_max_) since 22.11.
+
+- `services.zfs.zed.enableMail` now uses the global `sendmail` wrapper defined by an email module
+  (such as msmtp or Postfix). It no longer requires using a special ZFS build with email support.
+
 - Gitea 1.21 upgrade has several breaking changes, including:
   - Custom themes and other assets that were previously stored in `custom/public/*` now belong in `custom/public/assets/*`
   - New instances of Gitea using MySQL now ignore the `[database].CHARSET` config option and always use the `utf8mb4` charset, existing instances should migrate via the `gitea doctor convert` CLI command.
 
 - The `hardware.pulseaudio` module now sets permission of pulse user home directory to 755 when running in "systemWide" mode. It fixes [issue 114399](https://github.com/NixOS/nixpkgs/issues/114399).
 
+- The `btrbk` module now automatically selects and provides required compression
+  program depending on the configured `stream_compress` option. Since this
+  replaces the need for the `extraPackages` option, this option will be
+  deprecated in future releases.
+
 - QtMultimedia has changed its default backend to `QT_MEDIA_BACKEND=ffmpeg` (previously `gstreamer` on Linux or `darwin` on MacOS).
   The previous native backends remain available but are now minimally maintained. Refer to [upstream documentation](https://doc.qt.io/qt-6/qtmultimedia-index.html#ffmpeg-as-the-default-backend) for further details about each platform.
diff --git a/nixos/lib/make-disk-image.nix b/nixos/lib/make-disk-image.nix
index e5d82f4de7c9d..1a33abd01ea18 100644
--- a/nixos/lib/make-disk-image.nix
+++ b/nixos/lib/make-disk-image.nix
@@ -522,11 +522,16 @@ let format' = format; in let
     chmod 0644 $efiVars
   '';
 
+  createHydraBuildProducts = ''
+    mkdir -p $out/nix-support
+    echo "file ${format}-image $out/${filename}" >> $out/nix-support/hydra-build-products
+  '';
+
   buildImage = pkgs.vmTools.runInLinuxVM (
     pkgs.runCommand name {
       preVM = prepareImage + lib.optionalString touchEFIVars createEFIVars;
       buildInputs = with pkgs; [ util-linux e2fsprogs dosfstools ];
-      postVM = moveOrConvertImage + postVM;
+      postVM = moveOrConvertImage + createHydraBuildProducts + postVM;
       QEMU_OPTS =
         concatStringsSep " " (lib.optional useEFIBoot "-drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}"
         ++ lib.optionals touchEFIVars [
@@ -616,5 +621,5 @@ let format' = format; in let
 in
   if onlyNixStore then
     pkgs.runCommand name {}
-      (prepareImage + moveOrConvertImage + postVM)
+      (prepareImage + moveOrConvertImage + createHydraBuildProducts + postVM)
   else buildImage
diff --git a/nixos/lib/test-driver/test_driver/machine.py b/nixos/lib/test-driver/test_driver/machine.py
index f430321bb6076..da60b669fa27e 100644
--- a/nixos/lib/test-driver/test_driver/machine.py
+++ b/nixos/lib/test-driver/test_driver/machine.py
@@ -447,8 +447,7 @@ class Machine:
         """
 
         def check_active(_: Any) -> bool:
-            info = self.get_unit_info(unit, user)
-            state = info["ActiveState"]
+            state = self.get_unit_property(unit, "ActiveState", user)
             if state == "failed":
                 raise Exception(f'unit "{unit}" reached state "{state}"')
 
@@ -491,6 +490,35 @@ class Machine:
             if line_pattern.match(line)
         )
 
+    def get_unit_property(
+        self,
+        unit: str,
+        property: str,
+        user: Optional[str] = None,
+    ) -> str:
+        status, lines = self.systemctl(
+            f'--no-pager show "{unit}" --property="{property}"',
+            user,
+        )
+        if status != 0:
+            raise Exception(
+                f'retrieving systemctl property "{property}" for unit "{unit}"'
+                + ("" if user is None else f' under user "{user}"')
+                + f" failed with exit code {status}"
+            )
+
+        invalid_output_message = (
+            f'systemctl show --property "{property}" "{unit}"'
+            f"produced invalid output: {lines}"
+        )
+
+        line_pattern = re.compile(r"^([^=]+)=(.*)$")
+        match = line_pattern.match(lines)
+        assert match is not None, invalid_output_message
+
+        assert match[1] == property, invalid_output_message
+        return match[2]
+
     def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
         """
         Runs `systemctl` commands with optional support for
diff --git a/nixos/maintainers/scripts/ec2/create-amis.sh b/nixos/maintainers/scripts/ec2/create-amis.sh
index 0c1656efaf1ca..d182c5c2a4794 100755
--- a/nixos/maintainers/scripts/ec2/create-amis.sh
+++ b/nixos/maintainers/scripts/ec2/create-amis.sh
@@ -27,31 +27,37 @@ var ${bucket:=nixos-amis}
 var ${service_role_name:=vmimport}
 
 # Output of the command:
-# > aws ec2 describe-regions --all-regions --query "Regions[].{Name:RegionName}" --output text | sort
+# $ nix-shell -I nixpkgs=. -p awscli --run 'aws ec2 describe-regions --region us-east-1 --all-regions --query "Regions[].{Name:RegionName}" --output text | sort | sed -e s/^/\ \ /'
 var ${regions:=
-         af-south-1
-         ap-east-1
-         ap-northeast-1
-         ap-northeast-2
-         ap-northeast-3
-         ap-south-1
-         ap-southeast-1
-         ap-southeast-2
-         ap-southeast-3
-         ca-central-1
-         eu-central-1
-         eu-north-1
-         eu-south-1
-         eu-west-1
-         eu-west-2
-         eu-west-3
-         me-south-1
-         sa-east-1
-         us-east-1
-         us-east-2
-         us-west-1
-         us-west-2
-     }
+  af-south-1
+  ap-east-1
+  ap-northeast-1
+  ap-northeast-2
+  ap-northeast-3
+  ap-south-1
+  ap-south-2
+  ap-southeast-1
+  ap-southeast-2
+  ap-southeast-3
+  ap-southeast-4
+  ca-central-1
+  eu-central-1
+  eu-central-2
+  eu-north-1
+  eu-south-1
+  eu-south-2
+  eu-west-1
+  eu-west-2
+  eu-west-3
+  il-central-1
+  me-central-1
+  me-south-1
+  sa-east-1
+  us-east-1
+  us-east-2
+  us-west-1
+  us-west-2
+}
 
 regions=($regions)
 
diff --git a/nixos/modules/config/iproute2.nix b/nixos/modules/config/iproute2.nix
index 78bd07d680e20..0cde57b759be3 100644
--- a/nixos/modules/config/iproute2.nix
+++ b/nixos/modules/config/iproute2.nix
@@ -18,10 +18,9 @@ in
   };
 
   config = mkIf cfg.enable {
-    environment.etc."iproute2/rt_tables" = {
+    environment.etc."iproute2/rt_tables.d/nixos.conf" = {
       mode = "0644";
-      text = (fileContents "${pkgs.iproute2}/lib/iproute2/rt_tables")
-        + (optionalString (cfg.rttablesExtraConfig != "") "\n\n${cfg.rttablesExtraConfig}");
+      text = cfg.rttablesExtraConfig;
     };
   };
 }
diff --git a/nixos/modules/config/ldap.nix b/nixos/modules/config/ldap.nix
index d2f01fb87d32d..e374e4a7a27e9 100644
--- a/nixos/modules/config/ldap.nix
+++ b/nixos/modules/config/ldap.nix
@@ -226,18 +226,6 @@ in
       "ldap.conf" = ldapConfig;
     };
 
-    system.activationScripts = mkIf (!cfg.daemon.enable) {
-      ldap = stringAfter [ "etc" "groups" "users" ] ''
-        if test -f "${cfg.bind.passwordFile}" ; then
-          umask 0077
-          conf="$(mktemp)"
-          printf 'bindpw %s\n' "$(cat ${cfg.bind.passwordFile})" |
-          cat ${ldapConfig.source} - >"$conf"
-          mv -fT "$conf" /etc/ldap.conf
-        fi
-      '';
-    };
-
     system.nssModules = mkIf cfg.nsswitch (singleton (
       if cfg.daemon.enable then nss_pam_ldapd else nss_ldap
     ));
@@ -258,42 +246,63 @@ in
       };
     };
 
-    systemd.services = mkIf cfg.daemon.enable {
-      nslcd = {
-        wantedBy = [ "multi-user.target" ];
-
-        preStart = ''
-          umask 0077
-          conf="$(mktemp)"
-          {
-            cat ${nslcdConfig}
-            test -z '${cfg.bind.distinguishedName}' -o ! -f '${cfg.bind.passwordFile}' ||
-            printf 'bindpw %s\n' "$(cat '${cfg.bind.passwordFile}')"
-            test -z '${cfg.daemon.rootpwmoddn}' -o ! -f '${cfg.daemon.rootpwmodpwFile}' ||
-            printf 'rootpwmodpw %s\n' "$(cat '${cfg.daemon.rootpwmodpwFile}')"
-          } >"$conf"
-          mv -fT "$conf" /run/nslcd/nslcd.conf
-        '';
-
-        restartTriggers = [
-          nslcdConfig
-          cfg.bind.passwordFile
-          cfg.daemon.rootpwmodpwFile
-        ];
-
-        serviceConfig = {
-          ExecStart = "${nslcdWrapped}/bin/nslcd";
-          Type = "forking";
-          Restart = "always";
-          User = "nslcd";
-          Group = "nslcd";
-          RuntimeDirectory = [ "nslcd" ];
-          PIDFile = "/run/nslcd/nslcd.pid";
-          AmbientCapabilities = "CAP_SYS_RESOURCE";
+    systemd.services = mkMerge [
+      (mkIf (!cfg.daemon.enable) {
+        ldap-password = {
+          wantedBy = [ "sysinit.target" ];
+          before = [ "sysinit.target" "shutdown.target" ];
+          conflicts = [ "shutdown.target" ];
+          unitConfig.DefaultDependencies = false;
+          serviceConfig.Type = "oneshot";
+          serviceConfig.RemainAfterExit = true;
+          script = ''
+            if test -f "${cfg.bind.passwordFile}" ; then
+              umask 0077
+              conf="$(mktemp)"
+              printf 'bindpw %s\n' "$(cat ${cfg.bind.passwordFile})" |
+              cat ${ldapConfig.source} - >"$conf"
+              mv -fT "$conf" /etc/ldap.conf
+            fi
+          '';
         };
-      };
+      })
+
+      (mkIf cfg.daemon.enable {
+        nslcd = {
+          wantedBy = [ "multi-user.target" ];
+
+          preStart = ''
+            umask 0077
+            conf="$(mktemp)"
+            {
+              cat ${nslcdConfig}
+              test -z '${cfg.bind.distinguishedName}' -o ! -f '${cfg.bind.passwordFile}' ||
+              printf 'bindpw %s\n' "$(cat '${cfg.bind.passwordFile}')"
+              test -z '${cfg.daemon.rootpwmoddn}' -o ! -f '${cfg.daemon.rootpwmodpwFile}' ||
+              printf 'rootpwmodpw %s\n' "$(cat '${cfg.daemon.rootpwmodpwFile}')"
+            } >"$conf"
+            mv -fT "$conf" /run/nslcd/nslcd.conf
+          '';
 
-    };
+          restartTriggers = [
+            nslcdConfig
+            cfg.bind.passwordFile
+            cfg.daemon.rootpwmodpwFile
+          ];
+
+          serviceConfig = {
+            ExecStart = "${nslcdWrapped}/bin/nslcd";
+            Type = "forking";
+            Restart = "always";
+            User = "nslcd";
+            Group = "nslcd";
+            RuntimeDirectory = [ "nslcd" ];
+            PIDFile = "/run/nslcd/nslcd.pid";
+            AmbientCapabilities = "CAP_SYS_RESOURCE";
+          };
+        };
+      })
+    ];
 
   };
 
diff --git a/nixos/modules/config/ldso.nix b/nixos/modules/config/ldso.nix
new file mode 100644
index 0000000000000..72ae3958d8869
--- /dev/null
+++ b/nixos/modules/config/ldso.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) last splitString mkOption types mdDoc optionals;
+
+  libDir = pkgs.stdenv.hostPlatform.libDir;
+  ldsoBasename = builtins.unsafeDiscardStringContext (last (splitString "/" pkgs.stdenv.cc.bintools.dynamicLinker));
+
+  pkgs32 = pkgs.pkgsi686Linux;
+  libDir32 = pkgs32.stdenv.hostPlatform.libDir;
+  ldsoBasename32 = builtins.unsafeDiscardStringContext (last (splitString "/" pkgs32.stdenv.cc.bintools.dynamicLinker));
+in {
+  options = {
+    environment.ldso = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = mdDoc ''
+        The executable to link into the normal FHS location of the ELF loader.
+      '';
+    };
+
+    environment.ldso32 = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = mdDoc ''
+        The executable to link into the normal FHS location of the 32-bit ELF loader.
+
+        This currently only works on x86_64 architectures.
+      '';
+    };
+  };
+
+  config = {
+    assertions = [
+      { assertion = isNull config.environment.ldso32 || pkgs.stdenv.isx86_64;
+        message = "Option environment.ldso32 currently only works on x86_64.";
+      }
+    ];
+
+    systemd.tmpfiles.rules = (
+      if isNull config.environment.ldso then [
+        "r /${libDir}/${ldsoBasename} - - - - -"
+      ] else [
+        "d /${libDir} 0755 root root - -"
+        "L+ /${libDir}/${ldsoBasename} - - - - ${config.environment.ldso}"
+      ]
+    ) ++ optionals pkgs.stdenv.isx86_64 (
+      if isNull config.environment.ldso32 then [
+        "r /${libDir32}/${ldsoBasename32} - - - - -"
+      ] else [
+        "d /${libDir32} 0755 root root - -"
+        "L+ /${libDir32}/${ldsoBasename32} - - - - ${config.environment.ldso32}"
+      ]
+    );
+  };
+
+  meta.maintainers = with lib.maintainers; [ tejing ];
+}
diff --git a/nixos/modules/config/nix-channel.nix b/nixos/modules/config/nix-channel.nix
index a7ca7a5c74a40..dd97cb730ae41 100644
--- a/nixos/modules/config/nix-channel.nix
+++ b/nixos/modules/config/nix-channel.nix
@@ -12,7 +12,6 @@ let
     mkDefault
     mkIf
     mkOption
-    stringAfter
     types
     ;
 
diff --git a/nixos/modules/config/nix.nix b/nixos/modules/config/nix.nix
index cee4f54db0cb5..2769d8b25ef6f 100644
--- a/nixos/modules/config/nix.nix
+++ b/nixos/modules/config/nix.nix
@@ -109,13 +109,17 @@ let
         if pkgs.stdenv.hostPlatform != pkgs.stdenv.buildPlatform then ''
           echo "Ignoring validation for cross-compilation"
         ''
-        else ''
+        else
+        let
+          showCommand = if isNixAtLeast "2.20pre" then "config show" else "show-config";
+        in
+        ''
           echo "Validating generated nix.conf"
           ln -s $out ./nix.conf
           set -e
           set +o pipefail
           NIX_CONF_DIR=$PWD \
-            ${cfg.package}/bin/nix show-config ${optionalString (isNixAtLeast "2.3pre") "--no-net"} \
+            ${cfg.package}/bin/nix ${showCommand} ${optionalString (isNixAtLeast "2.3pre") "--no-net"} \
               ${optionalString (isNixAtLeast "2.4pre") "--option experimental-features nix-command"} \
             |& sed -e 's/^warning:/error:/' \
             | (! grep '${if cfg.checkAllErrors then "^error:" else "^error: unknown setting"}')
diff --git a/nixos/modules/config/no-x-libs.nix b/nixos/modules/config/no-x-libs.nix
index b2eb46f273b14..0f5888f5ea3b0 100644
--- a/nixos/modules/config/no-x-libs.nix
+++ b/nixos/modules/config/no-x-libs.nix
@@ -34,6 +34,7 @@ with lib;
       ffmpeg_5 = super.ffmpeg_5.override { ffmpegVariant = "headless"; };
       # dep of graphviz, libXpm is optional for Xpm support
       gd = super.gd.override { withXorg = false; };
+      ghostscript = super.ghostscript.override { cupsSupport = false; x11Support = false; };
       gobject-introspection = super.gobject-introspection.override { x11Support = false; };
       gpsd = super.gpsd.override { guiSupport = false; };
       graphviz = super.graphviz-nox;
@@ -44,6 +45,7 @@ with lib;
       };
       imagemagick = super.imagemagick.override { libX11Support = false; libXtSupport = false; };
       imagemagickBig = super.imagemagickBig.override { libX11Support = false; libXtSupport = false; };
+      intel-vaapi-driver = super.intel-vaapi-driver.override { enableGui = false; };
       libdevil = super.libdevil-nox;
       libextractor = super.libextractor.override { gtkSupport = false; };
       libva = super.libva-minimal;
@@ -51,6 +53,7 @@ with lib;
       mc = super.mc.override { x11Support = false; };
       mpv-unwrapped = super.mpv-unwrapped.override { sdl2Support = false; x11Support = false; waylandSupport = false; };
       msmtp = super.msmtp.override { withKeyring = false; };
+      mupdf = super.mupdf.override { enableGL = false; enableX11 = false; };
       neofetch = super.neofetch.override { x11Support = false; };
       networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
       networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
@@ -71,7 +74,7 @@ with lib;
       qemu = super.qemu.override { gtkSupport = false; spiceSupport = false; sdlSupport = false; };
       qrencode = super.qrencode.overrideAttrs (_: { doCheck = false; });
       qt5 = super.qt5.overrideScope (const (super': {
-        qtbase = super'.qtbase.override { withGtk3 = false; };
+        qtbase = super'.qtbase.override { withGtk3 = false; withQttranslation = false; };
       }));
       stoken = super.stoken.override { withGTK3 = false; };
       # translateManpages -> perlPackages.po4a -> texlive-combined-basic -> texlive-core-big -> libX11
diff --git a/nixos/modules/config/pulseaudio.nix b/nixos/modules/config/pulseaudio.nix
index b10edeb75604d..662959bf0071b 100644
--- a/nixos/modules/config/pulseaudio.nix
+++ b/nixos/modules/config/pulseaudio.nix
@@ -8,8 +8,6 @@ let
   cfg = config.hardware.pulseaudio;
   alsaCfg = config.sound;
 
-  systemWide = cfg.enable && cfg.systemWide;
-  nonSystemWide = cfg.enable && !cfg.systemWide;
   hasZeroconf = let z = cfg.zeroconf; in z.publish.enable || z.discovery.enable;
 
   overriddenPackage = cfg.package.override
@@ -217,16 +215,10 @@ in {
   };
 
 
-  config = mkMerge [
+  config = lib.mkIf cfg.enable (mkMerge [
     {
-      environment.etc = {
-        "pulse/client.conf".source = clientConf;
-      };
-
-      hardware.pulseaudio.configFile = mkDefault "${getBin overriddenPackage}/etc/pulse/default.pa";
-    }
+      environment.etc."pulse/client.conf".source = clientConf;
 
-    (mkIf cfg.enable {
       environment.systemPackages = [ overriddenPackage ];
 
       sound.enable = true;
@@ -242,6 +234,8 @@ in {
         "libao.conf".source = writeText "libao.conf" "default_driver=pulse";
       };
 
+      hardware.pulseaudio.configFile = mkDefault "${getBin overriddenPackage}/etc/pulse/default.pa";
+
       # Disable flat volumes to enable relative ones
       hardware.pulseaudio.daemon.config.flat-volumes = mkDefault "no";
 
@@ -255,7 +249,7 @@ in {
 
       # PulseAudio is packaged with udev rules to handle various audio device quirks
       services.udev.packages = [ overriddenPackage ];
-    })
+    }
 
     (mkIf (cfg.extraModules != []) {
       hardware.pulseaudio.daemon.config.dl-search-path = let
@@ -277,7 +271,7 @@ in {
       services.avahi.publish.userServices = true;
     })
 
-    (mkIf nonSystemWide {
+    (mkIf (!cfg.systemWide) {
       environment.etc = {
         "pulse/default.pa".source = myConfigFile;
       };
@@ -297,7 +291,7 @@ in {
       };
     })
 
-    (mkIf systemWide {
+    (mkIf cfg.systemWide {
       users.users.pulse = {
         # For some reason, PulseAudio wants UID == GID.
         uid = assert uid == gid; uid;
@@ -328,6 +322,6 @@ in {
 
       environment.variables.PULSE_COOKIE = "${stateDir}/.config/pulse/cookie";
     })
-  ];
+  ]);
 
 }
diff --git a/nixos/modules/config/stub-ld.nix b/nixos/modules/config/stub-ld.nix
new file mode 100644
index 0000000000000..14c07466d0611
--- /dev/null
+++ b/nixos/modules/config/stub-ld.nix
@@ -0,0 +1,56 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) optionalString mkOption types mdDoc mkIf mkDefault;
+
+  cfg = config.environment.stub-ld;
+
+  message = ''
+    NixOS cannot run dynamically linked executables intended for generic
+    linux environments out of the box. For more information, see:
+    https://nix.dev/permalink/stub-ld
+  '';
+
+  stub-ld-for = pkgsArg: messageArg: pkgsArg.pkgsStatic.runCommandCC "stub-ld" {
+    nativeBuildInputs = [ pkgsArg.unixtools.xxd ];
+    inherit messageArg;
+  } ''
+    printf "%s" "$messageArg" | xxd -i -n message >main.c
+    cat <<EOF >>main.c
+    #include <stdio.h>
+    int main(int argc, char * argv[]) {
+      fprintf(stderr, "Could not start dynamically linked executable: %s\n", argv[0]);
+      fwrite(message, sizeof(unsigned char), message_len, stderr);
+      return 127; // matches behavior of bash and zsh without a loader. fish uses 139
+    }
+    EOF
+    $CC -Os main.c -o $out
+  '';
+
+  pkgs32 = pkgs.pkgsi686Linux;
+
+  stub-ld = stub-ld-for pkgs message;
+  stub-ld32 = stub-ld-for pkgs32 message;
+in {
+  options = {
+    environment.stub-ld = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        example = false;
+        description = mdDoc ''
+          Install a stub ELF loader to print an informative error message
+          in the event that a user attempts to run an ELF binary not
+          compiled for NixOS.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.ldso = mkDefault stub-ld;
+    environment.ldso32 = mkIf pkgs.stdenv.isx86_64 (mkDefault stub-ld32);
+  };
+
+  meta.maintainers = with lib.maintainers; [ tejing ];
+}
diff --git a/nixos/modules/config/swap.nix b/nixos/modules/config/swap.nix
index 8989a64082643..21046d6f1697b 100644
--- a/nixos/modules/config/swap.nix
+++ b/nixos/modules/config/swap.nix
@@ -258,7 +258,8 @@ in
             # avoid this race condition.
             after = [ "systemd-modules-load.service" ];
             wantedBy = [ "${realDevice'}.swap" ];
-            before = [ "${realDevice'}.swap" ];
+            before = [ "${realDevice'}.swap" "shutdown.target"];
+            conflicts = [ "shutdown.target" ];
             path = [ pkgs.util-linux pkgs.e2fsprogs ]
               ++ optional sw.randomEncryption.enable pkgs.cryptsetup;
 
diff --git a/nixos/modules/config/sysctl.nix b/nixos/modules/config/sysctl.nix
index 452c050b6dda9..bedba984a3c23 100644
--- a/nixos/modules/config/sysctl.nix
+++ b/nixos/modules/config/sysctl.nix
@@ -21,18 +21,28 @@ in
   options = {
 
     boot.kernel.sysctl = mkOption {
-      type = types.submodule {
+      type = let
+        highestValueType = types.ints.unsigned // {
+          merge = loc: defs:
+            foldl
+              (a: b: if b.value == null then null else lib.max a b.value)
+              0
+              (filterOverrides defs);
+        };
+      in types.submodule {
         freeformType = types.attrsOf sysctlOption;
-        options."net.core.rmem_max" = mkOption {
-          type = types.nullOr types.ints.unsigned // {
-            merge = loc: defs:
-              foldl
-                (a: b: if b.value == null then null else lib.max a b.value)
-                0
-                (filterOverrides defs);
+        options = {
+          "net.core.rmem_max" = mkOption {
+            type = types.nullOr highestValueType;
+            default = null;
+            description = lib.mdDoc "The maximum receive socket buffer size in bytes. In case of conflicting values, the highest will be used.";
+          };
+
+          "net.core.wmem_max" = mkOption {
+            type = types.nullOr highestValueType;
+            default = null;
+            description = lib.mdDoc "The maximum send socket buffer size in bytes. In case of conflicting values, the highest will be used.";
           };
-          default = null;
-          description = lib.mdDoc "The maximum socket receive buffer size. In case of conflicting values, the highest will be used.";
         };
       };
       default = {};
diff --git a/nixos/modules/config/users-groups.nix b/nixos/modules/config/users-groups.nix
index 39aac9fb821bd..2aed620eb154c 100644
--- a/nixos/modules/config/users-groups.nix
+++ b/nixos/modules/config/users-groups.nix
@@ -475,7 +475,7 @@ let
   sdInitrdUidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) config.boot.initrd.systemd.users) "uid";
   sdInitrdGidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) config.boot.initrd.systemd.groups) "gid";
   groupNames = lib.mapAttrsToList (n: g: g.name) cfg.groups;
-  usersWithoutExistingGroup = lib.filterAttrs (n: u: !lib.elem u.group groupNames) cfg.users;
+  usersWithoutExistingGroup = lib.filterAttrs (n: u: u.group != "" && !lib.elem u.group groupNames) cfg.users;
 
   spec = pkgs.writeText "users-groups.json" (builtins.toJSON {
     inherit (cfg) mutableUsers;
diff --git a/nixos/modules/hardware/all-firmware.nix b/nixos/modules/hardware/all-firmware.nix
index 6f58e848b38ae..a97c8c418c865 100644
--- a/nixos/modules/hardware/all-firmware.nix
+++ b/nixos/modules/hardware/all-firmware.nix
@@ -48,10 +48,7 @@ in {
         alsa-firmware
         sof-firmware
         libreelec-dvb-firmware
-      ] ++ optional pkgs.stdenv.hostPlatform.isAarch raspberrypiWirelessFirmware
-        ++ optionals (versionOlder config.boot.kernelPackages.kernel.version "4.13") [
-        rtl8723bs-firmware
-      ];
+      ] ++ optional pkgs.stdenv.hostPlatform.isAarch raspberrypiWirelessFirmware;
     })
     (mkIf cfg.enableAllFirmware {
       assertions = [{
diff --git a/nixos/modules/hardware/keyboard/qmk.nix b/nixos/modules/hardware/keyboard/qmk.nix
index df3bcaeccd2ec..d95d36dedb44e 100644
--- a/nixos/modules/hardware/keyboard/qmk.nix
+++ b/nixos/modules/hardware/keyboard/qmk.nix
@@ -12,5 +12,6 @@ in
 
   config = mkIf cfg.enable {
     services.udev.packages = [ pkgs.qmk-udev-rules ];
+    users.groups.plugdev = {};
   };
 }
diff --git a/nixos/modules/hardware/video/amdgpu-pro.nix b/nixos/modules/hardware/video/amdgpu-pro.nix
index 605aa6ef8b88a..2a86280eec8cb 100644
--- a/nixos/modules/hardware/video/amdgpu-pro.nix
+++ b/nixos/modules/hardware/video/amdgpu-pro.nix
@@ -39,9 +39,10 @@ in
 
     hardware.firmware = [ package.fw ];
 
-    system.activationScripts.setup-amdgpu-pro = ''
-      ln -sfn ${package}/opt/amdgpu{,-pro} /run
-    '';
+    systemd.tmpfiles.settings.amdgpu-pro = {
+      "/run/amdgpu"."L+".argument = "${package}/opt/amdgpu";
+      "/run/amdgpu-pro"."L+".argument = "${package}/opt/amdgpu-pro";
+    };
 
     system.requiredKernelConfig = with config.lib.kernelConfig; [
       (isYes "DEVICE_PRIVATE")
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index c76883b656d40..3b983f768f91a 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -47,7 +47,8 @@ in {
           TRUNK_LINK_FAILURE_MODE=0;
           NVSWITCH_FAILURE_MODE=0;
           ABORT_CUDA_JOBS_ON_FM_EXIT=1;
-          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+          TOPOLOGY_FILE_PATH="${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
+          DATABASE_PATH="${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
         };
         defaultText = lib.literalExpression ''
         {
@@ -69,7 +70,8 @@ in {
           TRUNK_LINK_FAILURE_MODE=0;
           NVSWITCH_FAILURE_MODE=0;
           ABORT_CUDA_JOBS_ON_FM_EXIT=1;
-          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+          TOPOLOGY_FILE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
+          DATABASE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
         }
         '';
         description = lib.mdDoc ''
@@ -584,24 +586,50 @@ in {
         boot.extraModulePackages = [
           nvidia_x11.bin
         ];
-        systemd.services.nvidia-fabricmanager = {
-          enable = true;
-          description = "Start NVIDIA NVLink Management";
-          wantedBy = [ "multi-user.target" ];
-          unitConfig.After = [ "network-online.target" ];
-          unitConfig.Requires = [ "network-online.target" ];
-          serviceConfig = {
-            Type = "forking";
-            TimeoutStartSec = 240;
-            ExecStart = let
-              nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
-              in
-                nvidia_x11.fabricmanager + "/bin/nv-fabricmanager -c " + nv-fab-conf;
-            LimitCORE="infinity";
-          };
-        };
-        environment.systemPackages =
-          lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager;
-      })
-    ]);
+
+        systemd = {
+          tmpfiles.rules =
+            lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+            "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
+
+          services = lib.mkMerge [
+            ({
+              nvidia-fabricmanager = {
+                enable = true;
+                description = "Start NVIDIA NVLink Management";
+                wantedBy = [ "multi-user.target" ];
+                unitConfig.After = [ "network-online.target" ];
+                unitConfig.Requires = [ "network-online.target" ];
+                serviceConfig = {
+                  Type = "forking";
+                  TimeoutStartSec = 240;
+                  ExecStart = let
+                    nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
+                    in
+                      "${lib.getExe nvidia_x11.fabricmanager} -c ${nv-fab-conf}";
+                  LimitCORE="infinity";
+                };
+              };
+            })
+            (lib.mkIf cfg.nvidiaPersistenced {
+              "nvidia-persistenced" = {
+                description = "NVIDIA Persistence Daemon";
+                wantedBy = ["multi-user.target"];
+                serviceConfig = {
+                  Type = "forking";
+                  Restart = "always";
+                  PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+                  ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
+                  ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+                };
+              };
+            })
+          ];
+      };
+
+      environment.systemPackages =
+        lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager
+        ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced;
+    })
+  ]);
 }
diff --git a/nixos/modules/i18n/input-method/fcitx5.nix b/nixos/modules/i18n/input-method/fcitx5.nix
index 3d52c08888eae..530727f3f2928 100644
--- a/nixos/modules/i18n/input-method/fcitx5.nix
+++ b/nixos/modules/i18n/input-method/fcitx5.nix
@@ -19,6 +19,14 @@ in
           Enabled Fcitx5 addons.
         '';
       };
+      waylandFrontend = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Use the Wayland input method frontend.
+          See [Using Fcitx 5 on Wayland](https://fcitx-im.org/wiki/Using_Fcitx_5_on_Wayland).
+        '';
+      };
       quickPhrase = mkOption {
         type = with types; attrsOf str;
         default = { };
@@ -118,10 +126,11 @@ in
       ];
 
     environment.variables = {
-      GTK_IM_MODULE = "fcitx";
-      QT_IM_MODULE = "fcitx";
       XMODIFIERS = "@im=fcitx";
       QT_PLUGIN_PATH = [ "${fcitx5Package}/${pkgs.qt6.qtbase.qtPluginPrefix}" ];
+    } // lib.optionalAttrs (!cfg.waylandFrontend) {
+      GTK_IM_MODULE = "fcitx";
+      QT_IM_MODULE = "fcitx";
     } // lib.optionalAttrs cfg.ignoreUserConfig {
       SKIP_FCITX_USER_PATH = "1";
     };
diff --git a/nixos/modules/image/repart-image.nix b/nixos/modules/image/repart-image.nix
new file mode 100644
index 0000000000000..b4a1dfe51ff3a
--- /dev/null
+++ b/nixos/modules/image/repart-image.nix
@@ -0,0 +1,80 @@
+# This is an expression meant to be called from `./repart.nix`, it is NOT a
+# NixOS module that can be imported.
+
+{ lib
+, runCommand
+, python3
+, black
+, ruff
+, mypy
+, systemd
+, fakeroot
+, util-linux
+, dosfstools
+, mtools
+, e2fsprogs
+, squashfsTools
+, erofs-utils
+, btrfs-progs
+, xfsprogs
+
+  # arguments
+, name
+, fileSystems
+, partitions
+, split
+, seed
+, definitionsDirectory
+}:
+
+let
+  amendRepartDefinitions = runCommand "amend-repart-definitions.py"
+    {
+      # TODO: ruff does not splice properly in nativeBuildInputs
+      depsBuildBuild = [ ruff ];
+      nativeBuildInputs = [ python3 black mypy ];
+    } ''
+    install ${./amend-repart-definitions.py} $out
+    patchShebangs --build $out
+
+    black --check --diff $out
+    ruff --line-length 88 $out
+    mypy --strict $out
+  '';
+
+  fileSystemToolMapping = {
+    "vfat" = [ dosfstools mtools ];
+    "ext4" = [ e2fsprogs.bin ];
+    "squashfs" = [ squashfsTools ];
+    "erofs" = [ erofs-utils ];
+    "btrfs" = [ btrfs-progs ];
+    "xfs" = [ xfsprogs ];
+  };
+
+  fileSystemTools = builtins.concatMap (f: fileSystemToolMapping."${f}") fileSystems;
+in
+
+runCommand name
+{
+  nativeBuildInputs = [
+    systemd
+    fakeroot
+    util-linux
+  ] ++ fileSystemTools;
+} ''
+  amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
+
+  mkdir -p $out
+  cd $out
+
+  unshare --map-root-user fakeroot systemd-repart \
+    --dry-run=no \
+    --empty=create \
+    --size=auto \
+    --seed="${seed}" \
+    --definitions="$amendedRepartDefinitions" \
+    --split="${lib.boolToString split}" \
+    --json=pretty \
+    image.raw \
+    | tee repart-output.json
+''
diff --git a/nixos/modules/image/repart.nix b/nixos/modules/image/repart.nix
index 41e6110885b85..da4f45d9a6392 100644
--- a/nixos/modules/image/repart.nix
+++ b/nixos/modules/image/repart.nix
@@ -90,8 +90,10 @@ in
     };
 
     package = lib.mkPackageOption pkgs "systemd-repart" {
-      default = "systemd";
-      example = "pkgs.systemdMinimal.override { withCryptsetup = true; }";
+      # We use buildPackages so that repart images are built with the build
+      # platform's systemd, allowing for cross-compiled systems to work.
+      default = [ "buildPackages" "systemd" ];
+      example = "pkgs.buildPackages.systemdMinimal.override { withCryptsetup = true; }";
     };
 
     partitions = lib.mkOption {
@@ -131,22 +133,10 @@ in
 
     system.build.image =
       let
-        fileSystemToolMapping = with pkgs; {
-          "vfat" = [ dosfstools mtools ];
-          "ext4" = [ e2fsprogs.bin ];
-          "squashfs" = [ squashfsTools ];
-          "erofs" = [ erofs-utils ];
-          "btrfs" = [ btrfs-progs ];
-          "xfs" = [ xfsprogs ];
-        };
-
         fileSystems = lib.filter
           (f: f != null)
           (lib.mapAttrsToList (_n: v: v.repartConfig.Format or null) cfg.partitions);
 
-        fileSystemTools = builtins.concatMap (f: fileSystemToolMapping."${f}") fileSystems;
-
-
         makeClosure = paths: pkgs.closureInfo { rootPaths = paths; };
 
         # Add the closure of the provided Nix store paths to cfg.partitions so
@@ -157,23 +147,8 @@ in
             { closure = "${makeClosure partitionConfig.storePaths}/store-paths"; }
         );
 
-
         finalPartitions = lib.mapAttrs addClosure cfg.partitions;
 
-
-        amendRepartDefinitions = pkgs.runCommand "amend-repart-definitions.py"
-          {
-            nativeBuildInputs = with pkgs; [ black ruff mypy ];
-            buildInputs = [ pkgs.python3 ];
-          } ''
-          install ${./amend-repart-definitions.py} $out
-          patchShebangs --host $out
-
-          black --check --diff $out
-          ruff --line-length 88 $out
-          mypy --strict $out
-        '';
-
         format = pkgs.formats.ini { };
 
         definitionsDirectory = utils.systemdUtils.lib.definitions
@@ -183,30 +158,11 @@ in
 
         partitions = pkgs.writeText "partitions.json" (builtins.toJSON finalPartitions);
       in
-      pkgs.runCommand cfg.name
-        {
-          nativeBuildInputs = [
-            cfg.package
-            pkgs.fakeroot
-            pkgs.util-linux
-          ] ++ fileSystemTools;
-        } ''
-        amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
-
-        mkdir -p $out
-        cd $out
-
-        unshare --map-root-user fakeroot systemd-repart \
-          --dry-run=no \
-          --empty=create \
-          --size=auto \
-          --seed="${cfg.seed}" \
-          --definitions="$amendedRepartDefinitions" \
-          --split="${lib.boolToString cfg.split}" \
-          --json=pretty \
-          image.raw \
-          | tee repart-output.json
-      '';
+      pkgs.callPackage ./repart-image.nix {
+        systemd = cfg.package;
+        inherit (cfg) name split seed;
+        inherit fileSystems definitionsDirectory partitions;
+      };
 
     meta.maintainers = with lib.maintainers; [ nikstur ];
 
diff --git a/nixos/modules/installer/tools/tools.nix b/nixos/modules/installer/tools/tools.nix
index 9ccc76a82c95a..a7d11370d445e 100644
--- a/nixos/modules/installer/tools/tools.nix
+++ b/nixos/modules/installer/tools/tools.nix
@@ -231,7 +231,8 @@ in
         # even if you've upgraded your system to a new NixOS release.
         #
         # This value does NOT affect the Nixpkgs version your packages and OS are pulled from,
-        # so changing it will NOT upgrade your system.
+        # so changing it will NOT upgrade your system - see https://nixos.org/manual/nixos/stable/#sec-upgrading for how
+        # to actually do that.
         #
         # This value being lower than the current NixOS release does NOT mean your system is
         # out of date, out of support, or vulnerable.
diff --git a/nixos/modules/misc/documentation.nix b/nixos/modules/misc/documentation.nix
index 46462c5abd435..f3e698468e642 100644
--- a/nixos/modules/misc/documentation.nix
+++ b/nixos/modules/misc/documentation.nix
@@ -77,7 +77,11 @@ let
           libPath = filter (pkgs.path + "/lib");
           pkgsLibPath = filter (pkgs.path + "/pkgs/pkgs-lib");
           nixosPath = filter (pkgs.path + "/nixos");
-          modules = map (p: ''"${removePrefix "${modulesPath}/" (toString p)}"'') docModules.lazy;
+          modules =
+            "[ "
+            + concatMapStringsSep " " (p: ''"${removePrefix "${modulesPath}/" (toString p)}"'') docModules.lazy
+            + " ]";
+          passAsFile = [ "modules" ];
         } ''
           export NIX_STORE_DIR=$TMPDIR/store
           export NIX_STATE_DIR=$TMPDIR/state
@@ -87,7 +91,7 @@ let
             --argstr libPath "$libPath" \
             --argstr pkgsLibPath "$pkgsLibPath" \
             --argstr nixosPath "$nixosPath" \
-            --arg modules "[ $modules ]" \
+            --arg modules "import $modulesPath" \
             --argstr stateVersion "${options.system.stateVersion.default}" \
             --argstr release "${config.system.nixos.release}" \
             $nixosPath/lib/eval-cacheable-options.nix > $out \
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index 18928a6bf21bb..5af7284ac71af 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -288,7 +288,7 @@ in
       telegraf = 256;
       gitlab-runner = 257;
       postgrey = 258;
-      hound = 259;
+      # hound = 259; # unused, removed 2023-11-21
       leaps = 260;
       ipfs  = 261;
       # stanchion = 262; # unused, removed 2020-10-14
@@ -599,7 +599,7 @@ in
       #telegraf = 256; # unused
       gitlab-runner = 257;
       postgrey = 258;
-      hound = 259;
+      # hound = 259; # unused, removed 2023-11-21
       leaps = 260;
       ipfs = 261;
       # stanchion = 262; # unused, removed 2020-10-14
diff --git a/nixos/modules/misc/mandoc.nix b/nixos/modules/misc/mandoc.nix
index 9bcef5b1a09bd..73646a60aabb2 100644
--- a/nixos/modules/misc/mandoc.nix
+++ b/nixos/modules/misc/mandoc.nix
@@ -5,25 +5,39 @@ let
 
   cfg = config.documentation.man.mandoc;
 
-in {
+  toMandocOutput = output: (
+    lib.mapAttrsToList
+      (
+        name: value:
+          if lib.isString value || lib.isPath value then "output ${name} ${value}"
+          else if lib.isInt value then "output ${name} ${builtins.toString value}"
+          else if lib.isBool value then lib.optionalString value "output ${name}"
+          else if value == null then ""
+          else throw "Unrecognized value type ${builtins.typeOf value} of key ${name} in mandoc output settings"
+      )
+      output
+  );
+in
+{
   meta.maintainers = [ lib.maintainers.sternenseemann ];
 
   options = {
     documentation.man.mandoc = {
-      enable = lib.mkEnableOption (lib.mdDoc "mandoc as the default man page viewer");
+      enable = lib.mkEnableOption "mandoc as the default man page viewer";
 
       manPath = lib.mkOption {
         type = with lib.types; listOf str;
         default = [ "share/man" ];
         example = lib.literalExpression "[ \"share/man\" \"share/man/fr\" ]";
-        description = lib.mdDoc ''
-          Change the manpath, i. e. the directories where
-          {manpage}`man(1)`
+        description = ''
+          Change the paths included in the MANPATH environment variable,
+          i. e. the directories where {manpage}`man(1)`
           looks for section-specific directories of man pages.
           You only need to change this setting if you want extra man pages
           (e. g. in non-english languages). All values must be strings that
           are a valid path from the target prefix (without including it).
-          The first value given takes priority.
+          The first value given takes priority. Note that this will not
+          add manpath directives to {manpage}`man.conf(5)`.
         '';
       };
 
@@ -31,11 +45,122 @@ in {
         type = lib.types.package;
         default = pkgs.mandoc;
         defaultText = lib.literalExpression "pkgs.mandoc";
-        description = lib.mdDoc ''
+        description = ''
           The `mandoc` derivation to use. Useful to override
           configuration options used for the package.
         '';
       };
+
+      settings = lib.mkOption {
+        description = "Configuration for {manpage}`man.conf(5)`";
+        default = { };
+        type = lib.types.submodule {
+          options = {
+            manpath = lib.mkOption {
+              type = with lib.types; listOf str;
+              default = [ ];
+              example = lib.literalExpression "[ \"/run/current-system/sw/share/man\" ]";
+              description = ''
+                Override the default search path for {manpage}`man(1)`,
+                {manpage}`apropos(1)`, and {manpage}`makewhatis(8)`. It can be
+                used multiple times to specify multiple paths, with the order
+                determining the manual page search order.
+                This is not recommended in favor of
+                {option}`documentation.man.mandoc.manPath`, but if it's needed to
+                specify the manpath in this way, set
+                {option}`documentation.man.mandoc.manPath` to an empty list (`[]`).
+              '';
+            };
+            output.fragment = lib.mkEnableOption ''
+              Omit the <!DOCTYPE> declaration and the <html>, <head>, and <body>
+              elements and only emit the subtree below the <body> element in HTML
+              output of {manpage}`mandoc(1)`. The style argument will be ignored.
+              This is useful when embedding manual content within existing documents.
+            '';
+            output.includes = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              example = lib.literalExpression "../src/%I.html";
+              description = ''
+                A string of relative path used as a template for the output path of
+                linked header files (usually via the In macro) in HTML output.
+                Instances of `%I` are replaced with the include filename. The
+                default is not to present a hyperlink.
+              '';
+            };
+            output.indent = lib.mkOption {
+              type = with lib.types; nullOr int;
+              default = null;
+              description = ''
+                Number of blank characters at the left margin for normal text,
+                default of `5` for {manpage}`mdoc(7)` and `7` for
+                {manpage}`man(7)`. Increasing this is not recommended; it may
+                result in degraded formatting, for example overfull lines or ugly
+                line breaks. When output is to a pager on a terminal that is less
+                than 66 columns wide, the default is reduced to three columns.
+              '';
+            };
+            output.man = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              example = lib.literalExpression "../html%S/%N.%S.html";
+              description = ''
+                A template for linked manuals (usually via the Xr macro) in HTML
+                output. Instances of ‘%N’ and ‘%S’ are replaced with the linked
+                manual's name and section, respectively. If no section is included,
+                section 1 is assumed. The default is not to present a hyperlink.
+                If two formats are given and a file %N.%S exists in the current
+                directory, the first format is used; otherwise, the second format is used.
+              '';
+            };
+            output.paper = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              description = ''
+                This option is for generating PostScript and PDF output. The paper
+                size name may be one of `a3`, `a4`, `a5`, `legal`, or `letter`.
+                You may also manually specify dimensions as `NNxNN`, width by
+                height in millimetres. If an unknown value is encountered, letter
+                is used. Output pages default to letter sized and are rendered in
+                the Times font family, 11-point. Margins are calculated as 1/9 the
+                page length and width. Line-height is 1.4m.
+              '';
+            };
+            output.style = lib.mkOption {
+              type = with lib.types; nullOr path;
+              default = null;
+              description = ''
+                Path to the file used for an external style-sheet. This must be a
+                valid absolute or relative URI.
+              '';
+            };
+            output.toc = lib.mkEnableOption ''
+              In HTML output of {manpage}`mandoc(1)`, If an input file contains
+              at least two non-standard sections, print a table of contents near
+              the beginning of the output.
+            '';
+            output.width = lib.mkOption {
+              type = with lib.types; nullOr int;
+              default = null;
+              description = ''
+                The ASCII and UTF-8 output width, default is `78`. When output is a
+                pager on a terminal that is less than 79 columns wide, the
+                default is reduced to one less than the terminal width. In any case,
+                lines that are output in literal mode are never wrapped and may
+                exceed the output width.
+              '';
+            };
+          };
+        };
+      };
+
+      extraConfig = lib.mkOption {
+        type = lib.types.lines;
+        default = "";
+        description = ''
+          Extra configuration to write to {manpage}`man.conf(5)`.
+        '';
+      };
     };
   };
 
@@ -43,21 +168,29 @@ in {
     environment = {
       systemPackages = [ cfg.package ];
 
-      # tell mandoc about man pages
-      etc."man.conf".text = lib.concatMapStrings (path: ''
-        manpath /run/current-system/sw/${path}
-      '') cfg.manPath;
+      etc."man.conf".text = lib.concatStringsSep "\n" (
+        (map (path: "manpath ${path}") cfg.settings.manpath)
+        ++ (toMandocOutput cfg.settings.output)
+        ++ [ cfg.extraConfig ]
+      );
 
       # create mandoc.db for whatis(1), apropos(1) and man(1) -k
       # TODO(@sternenseemman): fix symlinked directories not getting indexed,
       # see: https://inbox.vuxu.org/mandoc-tech/20210906171231.GF83680@athene.usta.de/T/#e85f773c1781e3fef85562b2794f9cad7b2909a3c
       extraSetup = lib.mkIf config.documentation.man.generateCaches ''
-        ${makewhatis} -T utf8 ${
+        for man_path in ${
           lib.concatMapStringsSep " " (path:
             "$out/" + lib.escapeShellArg path
-          ) cfg.manPath
-        }
+            ) cfg.manPath} ${lib.concatMapStringsSep " " (path:
+            lib.escapeShellArg path) cfg.settings.manpath
+          }
+        do
+          [[ -d "$man_path" ]] && ${makewhatis} -T utf8 $man_path
+        done
       '';
+
+      # tell mandoc the paths containing man pages
+      profileRelativeSessionVariables."MANPATH" = map (path: if builtins.substring 0 1 path != "/" then "/${path}" else path) cfg.manPath;
     };
   };
 }
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index fee7c35ed8f45..65047bdd110a7 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -12,6 +12,7 @@
   ./config/iproute2.nix
   ./config/krb5/default.nix
   ./config/ldap.nix
+  ./config/ldso.nix
   ./config/locale.nix
   ./config/malloc.nix
   ./config/mysql.nix
@@ -28,6 +29,7 @@
   ./config/resolvconf.nix
   ./config/shells-environment.nix
   ./config/stevenblack.nix
+  ./config/stub-ld.nix
   ./config/swap.nix
   ./config/sysctl.nix
   ./config/system-environment.nix
@@ -276,6 +278,7 @@
   ./programs/wayland/waybar.nix
   ./programs/wayland/wayfire.nix
   ./programs/weylus.nix
+  ./programs/winbox.nix
   ./programs/wireshark.nix
   ./programs/xastir.nix
   ./programs/wshowkeys.nix
@@ -341,6 +344,7 @@
   ./services/audio/mopidy.nix
   ./services/audio/mpd.nix
   ./services/audio/mpdscribble.nix
+  ./services/audio/mympd.nix
   ./services/audio/navidrome.nix
   ./services/audio/networkaudiod.nix
   ./services/audio/roon-bridge.nix
@@ -364,6 +368,7 @@
   ./services/backup/mysql-backup.nix
   ./services/backup/postgresql-backup.nix
   ./services/backup/postgresql-wal-receiver.nix
+  ./services/backup/snapraid.nix
   ./services/backup/restic-rest-server.nix
   ./services/backup/restic.nix
   ./services/backup/rsnapshot.nix
@@ -615,6 +620,7 @@
   ./services/mail/public-inbox.nix
   ./services/mail/roundcube.nix
   ./services/mail/rspamd.nix
+  ./services/mail/rspamd-trainer.nix
   ./services/mail/rss2email.nix
   ./services/mail/schleuder.nix
   ./services/mail/spamassassin.nix
@@ -717,6 +723,7 @@
   ./services/misc/nzbget.nix
   ./services/misc/nzbhydra2.nix
   ./services/misc/octoprint.nix
+  ./services/misc/ollama.nix
   ./services/misc/ombi.nix
   ./services/misc/osrm.nix
   ./services/misc/owncast.nix
@@ -766,6 +773,7 @@
   ./services/misc/tautulli.nix
   ./services/misc/tiddlywiki.nix
   ./services/misc/tp-auto-kbbl.nix
+  ./services/misc/tuxclocker.nix
   ./services/misc/tzupdate.nix
   ./services/misc/uhub.nix
   ./services/misc/weechat.nix
@@ -941,6 +949,7 @@
   ./services/networking/ghostunnel.nix
   ./services/networking/git-daemon.nix
   ./services/networking/globalprotect-vpn.nix
+  ./services/networking/gns3-server.nix
   ./services/networking/gnunet.nix
   ./services/networking/go-autoconfig.nix
   ./services/networking/go-neb.nix
@@ -971,6 +980,7 @@
   ./services/networking/iwd.nix
   ./services/networking/jibri/default.nix
   ./services/networking/jicofo.nix
+  ./services/networking/jigasi.nix
   ./services/networking/jitsi-videobridge.nix
   ./services/networking/jool.nix
   ./services/networking/kea.nix
@@ -1253,6 +1263,7 @@
   ./services/web-apps/changedetection-io.nix
   ./services/web-apps/chatgpt-retrieval-plugin.nix
   ./services/web-apps/cloudlog.nix
+  ./services/web-apps/code-server.nix
   ./services/web-apps/convos.nix
   ./services/web-apps/dex.nix
   ./services/web-apps/discourse.nix
@@ -1334,6 +1345,7 @@
   ./services/web-apps/vikunja.nix
   ./services/web-apps/whitebophir.nix
   ./services/web-apps/wiki-js.nix
+  ./services/web-apps/windmill.nix
   ./services/web-apps/wordpress.nix
   ./services/web-apps/writefreely.nix
   ./services/web-apps/youtrack.nix
@@ -1359,6 +1371,7 @@
   ./services/web-servers/molly-brown.nix
   ./services/web-servers/nginx/default.nix
   ./services/web-servers/nginx/gitweb.nix
+  ./services/web-servers/nginx/tailscale-auth.nix
   ./services/web-servers/phpfpm/default.nix
   ./services/web-servers/pomerium.nix
   ./services/web-servers/rustus.nix
@@ -1504,7 +1517,6 @@
   ./tasks/network-interfaces.nix
   ./tasks/powertop.nix
   ./tasks/scsi-link-power-management.nix
-  ./tasks/snapraid.nix
   ./tasks/stratis.nix
   ./tasks/swraid.nix
   ./tasks/trackpoint.nix
diff --git a/nixos/modules/profiles/installation-device.nix b/nixos/modules/profiles/installation-device.nix
index 52750cd472dad..58f07b050b5c4 100644
--- a/nixos/modules/profiles/installation-device.nix
+++ b/nixos/modules/profiles/installation-device.nix
@@ -105,6 +105,8 @@ with lib;
       ];
 
     boot.swraid.enable = true;
+    # remove warning about unset mail
+    boot.swraid.mdadmConf = "PROGRAM ${pkgs.coreutils}/bin/true";
 
     # Show all debug messages from the kernel but don't log refused packets
     # because we have the firewall enabled. This makes installs from the
diff --git a/nixos/modules/profiles/minimal.nix b/nixos/modules/profiles/minimal.nix
index 75f355b4a002b..b76740f7cc587 100644
--- a/nixos/modules/profiles/minimal.nix
+++ b/nixos/modules/profiles/minimal.nix
@@ -21,6 +21,8 @@ with lib;
   # Perl is a default package.
   environment.defaultPackages = mkDefault [ ];
 
+  environment.stub-ld.enable = false;
+
   # The lessopen package pulls in Perl.
   programs.less.lessopen = mkDefault null;
 
diff --git a/nixos/modules/programs/atop.nix b/nixos/modules/programs/atop.nix
index 7d9491d1fc1f3..003cfdbfc8fad 100644
--- a/nixos/modules/programs/atop.nix
+++ b/nixos/modules/programs/atop.nix
@@ -137,6 +137,7 @@ in
                 atop.preStart = ''
                   set -e -u
                   shopt -s nullglob
+                  rm -f "$LOGPATH"/atop_*.new
                   for logfile in "$LOGPATH"/atop_*
                   do
                     ${atop}/bin/atopconvert "$logfile" "$logfile".new
@@ -144,9 +145,9 @@ in
                     # false positives for atop-rotate.service
                     if ! ${pkgs.diffutils}/bin/cmp -s "$logfile" "$logfile".new
                     then
-                      ${pkgs.coreutils}/bin/mv -v -f "$logfile".new "$logfile"
+                      mv -v -f "$logfile".new "$logfile"
                     else
-                      ${pkgs.coreutils}/bin/rm -f "$logfile".new
+                      rm -f "$logfile".new
                     fi
                   done
                 '';
diff --git a/nixos/modules/programs/direnv.nix b/nixos/modules/programs/direnv.nix
index 1aa62ea54d2c5..fdc646eb4b164 100644
--- a/nixos/modules/programs/direnv.nix
+++ b/nixos/modules/programs/direnv.nix
@@ -49,7 +49,14 @@ in {
           default = true;
         };
 
-      package = lib.mkPackageOption pkgs "nix-direnv" {};
+      package = lib.mkOption {
+        default = pkgs.nix-direnv.override { nix = config.nix.package; };
+        defaultText = "pkgs.nix-direnv";
+        type = lib.types.package;
+        description = lib.mdDoc ''
+          The nix-direnv package to use
+        '';
+      };
     };
   };
 
diff --git a/nixos/modules/programs/firejail.nix b/nixos/modules/programs/firejail.nix
index 6f79c13d94b44..046c31ce64f6b 100644
--- a/nixos/modules/programs/firejail.nix
+++ b/nixos/modules/programs/firejail.nix
@@ -53,7 +53,7 @@ in {
           desktop = mkOption {
             type = types.nullOr types.path;
             default = null;
-            description = lib.mkDoc ".desktop file to modify. Only necessary if it uses the absolute path to the executable.";
+            description = lib.mdDoc ".desktop file to modify. Only necessary if it uses the absolute path to the executable.";
             example = literalExpression ''"''${pkgs.firefox}/share/applications/firefox.desktop"'';
           };
           profile = mkOption {
diff --git a/nixos/modules/programs/gamemode.nix b/nixos/modules/programs/gamemode.nix
index c43e2c2296f5a..344f392852e2a 100644
--- a/nixos/modules/programs/gamemode.nix
+++ b/nixos/modules/programs/gamemode.nix
@@ -18,7 +18,7 @@ in
 
       settings = mkOption {
         type = settingsFormat.type;
-        default = {};
+        default = { };
         description = lib.mdDoc ''
           System-wide configuration for GameMode (/etc/gamemode.ini).
           See gamemoded(8) man page for available settings.
diff --git a/nixos/modules/programs/gpaste.nix b/nixos/modules/programs/gpaste.nix
index 074b4d59a365a..37172c9583a37 100644
--- a/nixos/modules/programs/gpaste.nix
+++ b/nixos/modules/programs/gpaste.nix
@@ -32,5 +32,7 @@ with lib;
     systemd.packages = [ pkgs.gnome.gpaste ];
     # gnome-control-center crashes in Keyboard Shortcuts pane without the GSettings schemas.
     services.xserver.desktopManager.gnome.sessionPath = [ pkgs.gnome.gpaste ];
+    # gpaste-reloaded applet doesn't work without the typelib
+    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gnome.gpaste ];
   };
 }
diff --git a/nixos/modules/programs/hyprland.nix b/nixos/modules/programs/hyprland.nix
index 166c6cbc5c183..9061ce5da83a8 100644
--- a/nixos/modules/programs/hyprland.nix
+++ b/nixos/modules/programs/hyprland.nix
@@ -30,7 +30,6 @@ in
       readOnly = true;
       default = cfg.package.override {
         enableXWayland = cfg.xwayland.enable;
-        enableNvidiaPatches = cfg.enableNvidiaPatches;
       };
       defaultText = literalExpression
         "`programs.hyprland.package` with applied configuration";
@@ -42,8 +41,6 @@ in
     portalPackage = mkPackageOption pkgs "xdg-desktop-portal-hyprland" { };
 
     xwayland.enable = mkEnableOption (mdDoc "XWayland") // { default = true; };
-
-    enableNvidiaPatches = mkEnableOption (mdDoc "patching wlroots for better Nvidia support");
   };
 
   config = mkIf cfg.enable {
@@ -73,9 +70,13 @@ in
       [ "programs" "hyprland" "xwayland" "hidpi" ]
       "XWayland patches are deprecated. Refer to https://wiki.hyprland.org/Configuring/XWayland"
     )
-    (mkRenamedOptionModule
-      [ "programs" "hyprland" "nvidiaPatches" ]
+    (mkRemovedOptionModule
       [ "programs" "hyprland" "enableNvidiaPatches" ]
+      "Nvidia patches are no longer needed"
+    )
+    (mkRemovedOptionModule
+      [ "programs" "hyprland" "nvidiaPatches" ]
+      "Nvidia patches are no longer needed"
     )
   ];
 }
diff --git a/nixos/modules/programs/mininet.nix b/nixos/modules/programs/mininet.nix
index 01ffd811e70e2..3568736854d8e 100644
--- a/nixos/modules/programs/mininet.nix
+++ b/nixos/modules/programs/mininet.nix
@@ -6,39 +6,6 @@ with lib;
 
 let
   cfg = config.programs.mininet;
-
-  telnet = pkgs.runCommand "inetutils-telnet"
-    { }
-    ''
-      mkdir -p $out/bin
-      ln -s ${pkgs.inetutils}/bin/telnet $out/bin
-    '';
-
-  generatedPath = with pkgs; makeSearchPath "bin" [
-    iperf
-    ethtool
-    iproute2
-    socat
-    # mn errors out without a telnet binary
-    # pkgs.inetutils brings an undesired ifconfig into PATH see #43105
-    nettools
-    telnet
-  ];
-
-  pyEnv = pkgs.python3.withPackages (ps: [ ps.mininet-python ]);
-
-  mnexecWrapped = pkgs.runCommand "mnexec-wrapper"
-    { nativeBuildInputs = [ pkgs.makeWrapper pkgs.python3Packages.wrapPython ]; }
-    ''
-      makeWrapper ${pkgs.mininet}/bin/mnexec \
-        $out/bin/mnexec \
-        --prefix PATH : "${generatedPath}"
-
-      makeWrapper ${pyEnv}/bin/mn \
-        $out/bin/mn \
-        --prefix PYTHONPATH : "${pyEnv}/${pyEnv.sitePackages}" \
-        --prefix PATH : "${generatedPath}"
-    '';
 in
 {
   options.programs.mininet.enable = mkEnableOption (lib.mdDoc "Mininet");
@@ -47,6 +14,6 @@ in
 
     virtualisation.vswitch.enable = true;
 
-    environment.systemPackages = [ mnexecWrapped ];
+    environment.systemPackages = [ pkgs.mininet ];
   };
 }
diff --git a/nixos/modules/programs/mosh.nix b/nixos/modules/programs/mosh.nix
index 9e56e1731d7cc..593246ab6dcd1 100644
--- a/nixos/modules/programs/mosh.nix
+++ b/nixos/modules/programs/mosh.nix
@@ -1,7 +1,5 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
 
   cfg  = config.programs.mosh;
@@ -9,28 +7,26 @@ let
 in
 {
   options.programs.mosh = {
-    enable = mkOption {
-      description = lib.mdDoc ''
-        Whether to enable mosh. Note, this will open ports in your firewall!
-      '';
-      default = false;
-      type = lib.types.bool;
+    enable = lib.mkEnableOption "mosh";
+    openFirewall = lib.mkEnableOption "" // {
+      description = "Whether to automatically open the necessary ports in the firewall.";
+      default = true;
     };
-    withUtempter = mkOption {
+    withUtempter = lib.mkEnableOption "" // {
       description = lib.mdDoc ''
         Whether to enable libutempter for mosh.
+
         This is required so that mosh can write to /var/run/utmp (which can be queried with `who` to display currently connected user sessions).
         Note, this will add a guid wrapper for the group utmp!
       '';
       default = true;
-      type = lib.types.bool;
     };
   };
 
-  config = mkIf cfg.enable {
-    environment.systemPackages = with pkgs; [ mosh ];
-    networking.firewall.allowedUDPPortRanges = [ { from = 60000; to = 61000; } ];
-    security.wrappers = mkIf cfg.withUtempter {
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.mosh ];
+    networking.firewall.allowedUDPPortRanges = lib.optional cfg.openFirewall { from = 60000; to = 61000; };
+    security.wrappers = lib.mkIf cfg.withUtempter {
       utempter = {
         source = "${pkgs.libutempter}/lib/utempter/utempter";
         owner = "root";
diff --git a/nixos/modules/programs/nix-ld.nix b/nixos/modules/programs/nix-ld.nix
index e3a9bb16410c9..6f36ce33640cd 100644
--- a/nixos/modules/programs/nix-ld.nix
+++ b/nixos/modules/programs/nix-ld.nix
@@ -47,7 +47,7 @@ in
   };
 
   config = lib.mkIf config.programs.nix-ld.enable {
-    systemd.tmpfiles.packages = [ cfg.package ];
+    environment.ldso = "${cfg.package}/libexec/nix-ld";
 
     environment.systemPackages = [ nix-ld-libraries ];
 
diff --git a/nixos/modules/programs/ssh.nix b/nixos/modules/programs/ssh.nix
index 18eb3f938f3d8..c39a3c8d509be 100644
--- a/nixos/modules/programs/ssh.nix
+++ b/nixos/modules/programs/ssh.nix
@@ -8,14 +8,12 @@ let
 
   cfg  = config.programs.ssh;
 
-  askPassword = cfg.askPassword;
-
   askPasswordWrapper = pkgs.writeScript "ssh-askpass-wrapper"
     ''
       #! ${pkgs.runtimeShell} -e
       export DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^DISPLAY=\(.*\)/\1/; t; d')"
       export WAYLAND_DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^WAYLAND_DISPLAY=\(.*\)/\1/; t; d')"
-      exec ${askPassword} "$@"
+      exec ${cfg.askPassword} "$@"
     '';
 
   knownHosts = attrValues cfg.knownHosts;
@@ -52,10 +50,11 @@ in
       };
 
       forwardX11 = mkOption {
-        type = types.bool;
+        type = with lib.types; nullOr bool;
         default = false;
         description = lib.mdDoc ''
           Whether to request X11 forwarding on outgoing connections by default.
+          If set to null, the option is not set at all.
           This is useful for running graphical programs on the remote machine and have them display to your local X11 server.
           Historically, this value has depended on the value used by the local sshd daemon, but there really isn't a relation between the two.
           Note: there are some security risks to forwarding an X11 connection.
@@ -274,10 +273,10 @@ in
   config = {
 
     programs.ssh.setXAuthLocation =
-      mkDefault (config.services.xserver.enable || config.programs.ssh.forwardX11 || config.services.openssh.settings.X11Forwarding);
+      mkDefault (config.services.xserver.enable || config.programs.ssh.forwardX11 == true || config.services.openssh.settings.X11Forwarding);
 
     assertions =
-      [ { assertion = cfg.forwardX11 -> cfg.setXAuthLocation;
+      [ { assertion = cfg.forwardX11 == true -> cfg.setXAuthLocation;
           message = "cannot enable X11 forwarding without setting XAuth location";
         }
       ] ++ flip mapAttrsToList cfg.knownHosts (name: data: {
@@ -298,11 +297,8 @@ in
         AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
         GlobalKnownHostsFile ${concatStringsSep " " knownHostsFiles}
 
-        ${optionalString cfg.setXAuthLocation ''
-          XAuthLocation ${pkgs.xorg.xauth}/bin/xauth
-        ''}
-
-        ForwardX11 ${if cfg.forwardX11 then "yes" else "no"}
+        ${optionalString cfg.setXAuthLocation "XAuthLocation ${pkgs.xorg.xauth}/bin/xauth"}
+        ${lib.optionalString (cfg.forwardX11 != null) "ForwardX11 ${if cfg.forwardX11 then "yes" else "no"}"}
 
         ${optionalString (cfg.pubkeyAcceptedKeyTypes != []) "PubkeyAcceptedKeyTypes ${concatStringsSep "," cfg.pubkeyAcceptedKeyTypes}"}
         ${optionalString (cfg.hostKeyAlgorithms != []) "HostKeyAlgorithms ${concatStringsSep "," cfg.hostKeyAlgorithms}"}
@@ -344,7 +340,7 @@ in
         fi
       '';
 
-    environment.variables.SSH_ASKPASS = optionalString cfg.enableAskPassword askPassword;
+    environment.variables.SSH_ASKPASS = optionalString cfg.enableAskPassword cfg.askPassword;
 
   };
 }
diff --git a/nixos/modules/programs/starship.nix b/nixos/modules/programs/starship.nix
index 9dca39da5edc0..bec3900496fd9 100644
--- a/nixos/modules/programs/starship.nix
+++ b/nixos/modules/programs/starship.nix
@@ -44,21 +44,39 @@ in
   config = mkIf cfg.enable {
     programs.bash.${initOption} = ''
       if [[ $TERM != "dumb" ]]; then
-        export STARSHIP_CONFIG=${settingsFile}
+        # don't set STARSHIP_CONFIG automatically if there's a user-specified
+        # config file.  starship appears to use a hardcoded config location
+        # rather than one inside an XDG folder:
+        # https://github.com/starship/starship/blob/686bda1706e5b409129e6694639477a0f8a3f01b/src/configure.rs#L651
+        if [[ ! -f "$HOME/.config/starship.toml" ]]; then
+          export STARSHIP_CONFIG=${settingsFile}
+        fi
         eval "$(${pkgs.starship}/bin/starship init bash)"
       fi
     '';
 
     programs.fish.${initOption} = ''
       if test "$TERM" != "dumb"
-        set -x STARSHIP_CONFIG ${settingsFile}
+        # don't set STARSHIP_CONFIG automatically if there's a user-specified
+        # config file.  starship appears to use a hardcoded config location
+        # rather than one inside an XDG folder:
+        # https://github.com/starship/starship/blob/686bda1706e5b409129e6694639477a0f8a3f01b/src/configure.rs#L651
+        if not test -f "$HOME/.config/starship.toml";
+          set -x STARSHIP_CONFIG ${settingsFile}
+        end
         eval (${pkgs.starship}/bin/starship init fish)
       end
     '';
 
     programs.zsh.${initOption} = ''
       if [[ $TERM != "dumb" ]]; then
-        export STARSHIP_CONFIG=${settingsFile}
+        # don't set STARSHIP_CONFIG automatically if there's a user-specified
+        # config file.  starship appears to use a hardcoded config location
+        # rather than one inside an XDG folder:
+        # https://github.com/starship/starship/blob/686bda1706e5b409129e6694639477a0f8a3f01b/src/configure.rs#L651
+        if [[ ! -f "$HOME/.config/starship.toml" ]]; then
+          export STARSHIP_CONFIG=${settingsFile}
+        fi
         eval "$(${pkgs.starship}/bin/starship init zsh)"
       fi
     '';
diff --git a/nixos/modules/programs/wayland/river.nix b/nixos/modules/programs/wayland/river.nix
index ec59bd50a0150..995129b9710ae 100644
--- a/nixos/modules/programs/wayland/river.nix
+++ b/nixos/modules/programs/wayland/river.nix
@@ -48,6 +48,9 @@ in {
 
         # To make a river session available if a display manager like SDDM is enabled:
         services.xserver.displayManager.sessionPackages = optionals (cfg.package != null) [ cfg.package ];
+
+        # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050913
+        xdg.portal.config.river.default = mkDefault [ "wlr" "gtk" ];
       }
       (import ./wayland-session.nix { inherit lib pkgs; })
     ]);
diff --git a/nixos/modules/programs/wayland/sway.nix b/nixos/modules/programs/wayland/sway.nix
index f96c833856dbb..57ee629b28810 100644
--- a/nixos/modules/programs/wayland/sway.nix
+++ b/nixos/modules/programs/wayland/sway.nix
@@ -26,13 +26,28 @@ let
     };
   };
 
-  defaultSwayPackage = pkgs.sway.override {
-    extraSessionCommands = cfg.extraSessionCommands;
-    extraOptions = cfg.extraOptions;
-    withBaseWrapper = cfg.wrapperFeatures.base;
-    withGtkWrapper = cfg.wrapperFeatures.gtk;
-    isNixOS = true;
-  };
+  genFinalPackage = pkg:
+    let
+      expectedArgs = lib.naturalSort [
+        "extraSessionCommands"
+        "extraOptions"
+        "withBaseWrapper"
+        "withGtkWrapper"
+        "isNixOS"
+      ];
+      existedArgs = with lib;
+        naturalSort
+        (intersectLists expectedArgs (attrNames (functionArgs pkg.override)));
+    in if existedArgs != expectedArgs then
+      pkg
+    else
+      pkg.override {
+        extraSessionCommands = cfg.extraSessionCommands;
+        extraOptions = cfg.extraOptions;
+        withBaseWrapper = cfg.wrapperFeatures.base;
+        withGtkWrapper = cfg.wrapperFeatures.gtk;
+        isNixOS = true;
+      };
 in {
   options.programs.sway = {
     enable = mkEnableOption (lib.mdDoc ''
@@ -44,14 +59,16 @@ in {
 
     package = mkOption {
       type = with types; nullOr package;
-      default = defaultSwayPackage;
+      default = pkgs.sway;
+      apply = p: if p == null then null else genFinalPackage p;
       defaultText = literalExpression "pkgs.sway";
       description = lib.mdDoc ''
-        Sway package to use. Will override the options
-        'wrapperFeatures', 'extraSessionCommands', and 'extraOptions'.
-        Set to `null` to not add any Sway package to your
-        path. This should be done if you want to use the Home Manager Sway
-        module to install Sway.
+        Sway package to use. If the package does not contain the override arguments
+        `extraSessionCommands`, `extraOptions`, `withBaseWrapper`, `withGtkWrapper`,
+        `isNixOS`, then the module options {option}`wrapperFeatures`,
+        {option}`wrapperFeatures` and {option}`wrapperFeatures` will have no effect.
+        Set to `null` to not add any Sway package to your path. This should be done if
+        you want to use the Home Manager Sway module to install Sway.
       '';
     };
 
diff --git a/nixos/modules/programs/winbox.nix b/nixos/modules/programs/winbox.nix
new file mode 100644
index 0000000000000..1337f57839b02
--- /dev/null
+++ b/nixos/modules/programs/winbox.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg  = config.programs.winbox;
+in
+{
+  options.programs.winbox = {
+    enable = lib.mkEnableOption ("MikroTik Winbox");
+    package = lib.mkPackageOption pkgs "winbox" { };
+
+    openFirewall = lib.mkOption {
+      description = ''
+        Whether to open ports for the MikroTik Neighbor Discovery protocol. Required for Winbox neighbor discovery.
+      '';
+      default = false;
+      type = lib.types.bool;
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    networking.firewall.allowedUDPPorts = lib.optionals cfg.openFirewall [ 5678 ];
+  };
+}
diff --git a/nixos/modules/security/acme/default.md b/nixos/modules/security/acme/default.md
index 31548ad181a73..51ee0428d84e5 100644
--- a/nixos/modules/security/acme/default.md
+++ b/nixos/modules/security/acme/default.md
@@ -45,7 +45,7 @@ placeholder certificates in place of the real ACME certs. The placeholder
 certs are overwritten when the ACME certs arrive. For
 `foo.example.com` the config would look like this:
 
-```
+```nix
 security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
 services.nginx = {
@@ -88,7 +88,7 @@ This example uses a vhost called `certs.example.com`, with
 the intent that you will generate certs for all your vhosts and redirect
 everyone to HTTPS.
 
-```
+```nix
 security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
 
@@ -136,7 +136,7 @@ services.httpd = {
 
 Now you need to configure ACME to generate a certificate.
 
-```
+```nix
 security.acme.certs."foo.example.com" = {
   webroot = "/var/lib/acme/.challenges";
   email = "foo@example.com";
@@ -167,7 +167,7 @@ see the [lego docs](https://go-acme.github.io/lego/dns/)
 for provider/server specific configuration values. For the sake of these
 docs, we will provide a fully self-hosted example using bind.
 
-```
+```nix
 services.bind = {
   enable = true;
   extraConfig = ''
@@ -199,7 +199,7 @@ The {file}`dnskeys.conf` and {file}`certs.secret`
 must be kept secure and thus you should not keep their contents in your
 Nix config. Instead, generate them one time with a systemd service:
 
-```
+```nix
 systemd.services.dns-rfc2136-conf = {
   requiredBy = ["acme-example.com.service" "bind.service"];
   before = ["acme-example.com.service" "bind.service"];
@@ -250,7 +250,7 @@ first, however instead of setting the options for one certificate
 you will set them as defaults
 (e.g. [](#opt-security.acme.defaults.dnsProvider)).
 
-```
+```nix
 # Configure ACME appropriately
 security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
@@ -287,7 +287,7 @@ There is no way to change the user the ACME module uses (it will always be
 Below is an example configuration for OpenSMTPD, but this pattern
 can be applied to any service.
 
-```
+```nix
 # Configure ACME however you like (DNS or HTTP validation), adding
 # the following configuration for the relevant certificate.
 # Note: You cannot use `systemctl reload` here as that would mean
@@ -340,7 +340,7 @@ to be regenerated. In this scenario lego will produce the error `JWS verificatio
 The solution is to simply delete the associated accounts file and
 re-run the affected service(s).
 
-```
+```shell
 # Find the accounts folder for the certificate
 systemctl cat acme-example.com.service | grep -Po 'accounts/[^:]*'
 export accountdir="$(!!)"
diff --git a/nixos/modules/security/apparmor.nix b/nixos/modules/security/apparmor.nix
index 24b48338ed772..ea1af6c6e2f29 100644
--- a/nixos/modules/security/apparmor.nix
+++ b/nixos/modules/security/apparmor.nix
@@ -164,7 +164,8 @@ in
         "local-fs.target"
         "systemd-journald-audit.socket"
       ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       wantedBy = [ "multi-user.target" ];
       unitConfig = {
         Description="Load AppArmor policies";
diff --git a/nixos/modules/security/auditd.nix b/nixos/modules/security/auditd.nix
index db4b2701ee2e9..253ee1d4dd0e5 100644
--- a/nixos/modules/security/auditd.nix
+++ b/nixos/modules/security/auditd.nix
@@ -13,6 +13,8 @@ with lib;
     systemd.services.auditd = {
       description = "Linux Audit daemon";
       wantedBy = [ "basic.target" ];
+      before = [ "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
 
       unitConfig = {
         ConditionVirtualization = "!container";
@@ -23,7 +25,7 @@ with lib;
       path = [ pkgs.audit ];
 
       serviceConfig = {
-        ExecStartPre="${pkgs.coreutils}/bin/mkdir -p /var/log/audit";
+        ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p /var/log/audit";
         ExecStart = "${pkgs.audit}/bin/auditd -l -n -s nochange";
       };
     };
diff --git a/nixos/modules/security/duosec.nix b/nixos/modules/security/duosec.nix
index 2a855a77e3a39..ef76bfeb6d66a 100644
--- a/nixos/modules/security/duosec.nix
+++ b/nixos/modules/security/duosec.nix
@@ -195,7 +195,8 @@ in
 
     systemd.services.login-duo = lib.mkIf cfg.ssh.enable {
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       script = ''
         if test -f "${cfg.secretKeyFile}"; then
@@ -216,7 +217,8 @@ in
 
     systemd.services.pam-duo = lib.mkIf cfg.ssh.enable {
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       script = ''
         if test -f "${cfg.secretKeyFile}"; then
diff --git a/nixos/modules/security/ipa.nix b/nixos/modules/security/ipa.nix
index 69a670cd5e4a3..49226ec38199c 100644
--- a/nixos/modules/security/ipa.nix
+++ b/nixos/modules/security/ipa.nix
@@ -181,25 +181,33 @@ in {
       '';
     };
 
-    system.activationScripts.ipa = stringAfter ["etc"] ''
-      # libcurl requires a hard copy of the certificate
-      if ! ${pkgs.diffutils}/bin/diff ${cfg.certificate} /etc/ipa/ca.crt > /dev/null 2>&1; then
-        rm -f /etc/ipa/ca.crt
-        cp ${cfg.certificate} /etc/ipa/ca.crt
-      fi
-
-      if [ ! -f /etc/krb5.keytab ]; then
-        cat <<EOF
-
-          In order to complete FreeIPA integration, please join the domain by completing the following steps:
-          1. Authenticate as an IPA user authorized to join new hosts, e.g. kinit admin@${cfg.realm}
-          2. Join the domain and obtain the keytab file: ipa-join
-          3. Install the keytab file: sudo install -m 600 krb5.keytab /etc/
-          4. Restart sssd systemd service: sudo systemctl restart sssd
-
-      EOF
-      fi
-    '';
+    systemd.services."ipa-activation" = {
+      wantedBy = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
+      unitConfig.DefaultDependencies = false;
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      script = ''
+        # libcurl requires a hard copy of the certificate
+        if ! ${pkgs.diffutils}/bin/diff ${cfg.certificate} /etc/ipa/ca.crt > /dev/null 2>&1; then
+          rm -f /etc/ipa/ca.crt
+          cp ${cfg.certificate} /etc/ipa/ca.crt
+        fi
+
+        if [ ! -f /etc/krb5.keytab ]; then
+          cat <<EOF
+
+            In order to complete FreeIPA integration, please join the domain by completing the following steps:
+            1. Authenticate as an IPA user authorized to join new hosts, e.g. kinit admin@${cfg.realm}
+            2. Join the domain and obtain the keytab file: ipa-join
+            3. Install the keytab file: sudo install -m 600 krb5.keytab /etc/
+            4. Restart sssd systemd service: sudo systemctl restart sssd
+
+        EOF
+        fi
+      '';
+    };
 
     services.sssd.config = ''
       [domain/${cfg.domain}]
diff --git a/nixos/modules/security/sudo-rs.nix b/nixos/modules/security/sudo-rs.nix
index f991675827efb..b4376562c34d7 100644
--- a/nixos/modules/security/sudo-rs.nix
+++ b/nixos/modules/security/sudo-rs.nix
@@ -6,8 +6,6 @@ let
 
   cfg = config.security.sudo-rs;
 
-  inherit (config.security.pam) enableSSHAgentAuth;
-
   toUserString = user: if (isInt user) then "#${toString user}" else "${user}";
   toGroupString = group: if (isInt group) then "%#${toString group}" else "%${group}";
 
diff --git a/nixos/modules/security/wrappers/default.nix b/nixos/modules/security/wrappers/default.nix
index 250f9775be14d..a298686b34e97 100644
--- a/nixos/modules/security/wrappers/default.nix
+++ b/nixos/modules/security/wrappers/default.nix
@@ -278,7 +278,9 @@ in
     systemd.services.suid-sgid-wrappers = {
       description = "Create SUID/SGID Wrappers";
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
+      after = [ "systemd-sysusers.service" ];
       unitConfig.DefaultDependencies = false;
       unitConfig.RequiresMountsFor = [ "/nix/store" "/run/wrappers" ];
       serviceConfig.Type = "oneshot";
diff --git a/nixos/modules/security/wrappers/wrapper.nix b/nixos/modules/security/wrappers/wrapper.nix
index 27d46c630af54..ca4b27bff1801 100644
--- a/nixos/modules/security/wrappers/wrapper.nix
+++ b/nixos/modules/security/wrappers/wrapper.nix
@@ -1,8 +1,8 @@
 { stdenv, unsecvars, linuxHeaders, sourceProg, debug ? false }:
 # For testing:
-# $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { parentWrapperDir = "/run/wrappers"; debug = true; }'
+# $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { sourceProg = "${pkgs.hello}/bin/hello"; debug = true; }'
 stdenv.mkDerivation {
-  name = "security-wrapper";
+  name = "security-wrapper-${baseNameOf sourceProg}";
   buildInputs = [ linuxHeaders ];
   dontUnpack = true;
   CFLAGS = [
diff --git a/nixos/modules/services/admin/pgadmin.nix b/nixos/modules/services/admin/pgadmin.nix
index 390c80d1a2d42..5eaa911e37f18 100644
--- a/nixos/modules/services/admin/pgadmin.nix
+++ b/nixos/modules/services/admin/pgadmin.nix
@@ -3,7 +3,6 @@
 with lib;
 
 let
-  pkg = pkgs.pgadmin4;
   cfg = config.services.pgadmin;
 
   _base = with types; [ int bool str ];
@@ -36,6 +35,8 @@ in
       default = 5050;
     };
 
+    package = mkPackageOptionMD pkgs "pgadmin4" { };
+
     initialEmail = mkOption {
       description = lib.mdDoc "Initial email for the pgAdmin account";
       type = types.str;
@@ -150,7 +151,7 @@ in
           echo "$PW"
           # Retype password:
           echo "$PW"
-        ) | ${pkg}/bin/pgadmin4-setup
+        ) | ${cfg.package}/bin/pgadmin4-setup
       '';
 
       restartTriggers = [
@@ -162,7 +163,7 @@ in
         DynamicUser = true;
         LogsDirectory = "pgadmin";
         StateDirectory = "pgadmin";
-        ExecStart = "${pkg}/bin/pgadmin4";
+        ExecStart = "${cfg.package}/bin/pgadmin4";
       };
     };
 
diff --git a/nixos/modules/services/audio/mopidy.nix b/nixos/modules/services/audio/mopidy.nix
index 40e8679f53d74..9d8e67b0ea478 100644
--- a/nixos/modules/services/audio/mopidy.nix
+++ b/nixos/modules/services/audio/mopidy.nix
@@ -76,7 +76,7 @@ in {
 
     systemd.services.mopidy = {
       wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" "sound.target" ];
+      after = [ "network-online.target" "sound.target" ];
       description = "mopidy music player daemon";
       serviceConfig = {
         ExecStart = "${mopidyEnv}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)}";
diff --git a/nixos/modules/services/audio/mympd.nix b/nixos/modules/services/audio/mympd.nix
new file mode 100644
index 0000000000000..f1c7197085d7d
--- /dev/null
+++ b/nixos/modules/services/audio/mympd.nix
@@ -0,0 +1,129 @@
+{ pkgs, config, lib, ... }:
+
+let
+  cfg = config.services.mympd;
+in {
+  options = {
+
+    services.mympd = {
+
+      enable = lib.mkEnableOption (lib.mdDoc "MyMPD server");
+
+      package = lib.mkPackageOption pkgs "mympd" {};
+
+      openFirewall = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports needed for the functionality of the program.
+        '';
+      };
+
+      extraGroups = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        default = [ ];
+        example = [ "music" ];
+        description = lib.mdDoc ''
+          Additional groups for the systemd service.
+        '';
+      };
+
+      settings = lib.mkOption {
+        type = lib.types.submodule {
+          freeformType = with lib.types; attrsOf (nullOr (oneOf [ str bool int ]));
+          options = {
+            http_port = lib.mkOption {
+              type = lib.types.port;
+              description = lib.mdDoc ''
+                The HTTP port where mympd's web interface will be available.
+
+                The HTTPS/SSL port can be configured via {option}`config`.
+              '';
+              example = "8080";
+            };
+
+            ssl = lib.mkOption {
+              type = lib.types.bool;
+              description = lib.mdDoc ''
+                Whether to enable listening on the SSL port.
+
+                Refer to <https://jcorporation.github.io/myMPD/configuration/configuration-files#ssl-options>
+                for more information.
+              '';
+              default = false;
+            };
+          };
+        };
+        description = lib.mdDoc ''
+          Manages the configuration files declaratively. For all the configuration
+          options, see <https://jcorporation.github.io/myMPD/configuration/configuration-files>.
+
+          Each key represents the "File" column from the upstream configuration table, and the
+          value is the content of that file.
+        '';
+      };
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.mympd = {
+      # upstream service config: https://github.com/jcorporation/myMPD/blob/master/contrib/initscripts/mympd.service.in
+      after = [ "mpd.service" ];
+      wantedBy = [ "multi-user.target" ];
+      preStart = with lib; ''
+        config_dir="/var/lib/mympd/config"
+        mkdir -p "$config_dir"
+
+        ${pipe cfg.settings [
+          (mapAttrsToList (name: value: ''
+            echo -n "${if isBool value then boolToString value else toString value}" > "$config_dir/${name}"
+            ''))
+          (concatStringsSep "\n")
+        ]}
+      '';
+      unitConfig = {
+        Description = "myMPD server daemon";
+        Documentation = "man:mympd(1)";
+      };
+      serviceConfig = {
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        CapabilityBoundingSet = "CAP_NET_BIND_SERVICE";
+        DynamicUser = true;
+        ExecStart = lib.getExe cfg.package;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictRealtime = true;
+        StateDirectory = "mympd";
+        CacheDirectory = "mympd";
+        RestrictAddressFamilies = "AF_INET AF_INET6 AF_NETLINK AF_UNIX";
+        RestrictNamespaces = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "@system-service";
+        SupplementaryGroups = cfg.extraGroups;
+      };
+    };
+
+    networking.firewall = lib.mkMerge [
+      (lib.mkIf cfg.openFirewall {
+        allowedTCPPorts = [ cfg.settings.http_port ];
+      })
+      (lib.mkIf (cfg.openFirewall && cfg.settings.ssl && cfg.settings.ssl_port != null) {
+        allowedTCPPorts = [ cfg.settings.ssl_port ];
+      })
+    ];
+
+  };
+
+  meta.maintainers = [ lib.maintainers.eliandoran ];
+
+}
diff --git a/nixos/modules/services/audio/wyoming/faster-whisper.nix b/nixos/modules/services/audio/wyoming/faster-whisper.nix
index eda409f1f8004..dd7f62744cd02 100644
--- a/nixos/modules/services/audio/wyoming/faster-whisper.nix
+++ b/nixos/modules/services/audio/wyoming/faster-whisper.nix
@@ -121,6 +121,7 @@ in
   in mkIf (cfg.servers != {}) {
     systemd.services = mapAttrs' (server: options:
       nameValuePair "wyoming-faster-whisper-${server}" {
+        inherit (options) enable;
         description = "Wyoming faster-whisper server instance ${server}";
         after = [
           "network-online.target"
diff --git a/nixos/modules/services/audio/wyoming/piper.nix b/nixos/modules/services/audio/wyoming/piper.nix
index 698828aa6cbaf..2828fdf078921 100644
--- a/nixos/modules/services/audio/wyoming/piper.nix
+++ b/nixos/modules/services/audio/wyoming/piper.nix
@@ -116,6 +116,7 @@ in
   in mkIf (cfg.servers != {}) {
     systemd.services = mapAttrs' (server: options:
       nameValuePair "wyoming-piper-${server}" {
+        inherit (options) enable;
         description = "Wyoming Piper server instance ${server}";
         after = [
           "network-online.target"
diff --git a/nixos/modules/services/backup/borgbackup.nix b/nixos/modules/services/backup/borgbackup.nix
index 039a5f227ac41..6f4455d3be605 100644
--- a/nixos/modules/services/backup/borgbackup.nix
+++ b/nixos/modules/services/backup/borgbackup.nix
@@ -143,20 +143,15 @@ let
   };
 
   # Paths listed in ReadWritePaths must exist before service is started
-  mkActivationScript = name: cfg:
+  mkTmpfiles = name: cfg:
     let
-      install = "install -o ${cfg.user} -g ${cfg.group}";
-    in
-      nameValuePair "borgbackup-job-${name}" (stringAfter [ "users" ] (''
-        # Ensure that the home directory already exists
-        # We can't assert createHome == true because that's not the case for root
-        cd "${config.users.users.${cfg.user}.home}"
-        # Create each directory separately to prevent root owned parent dirs
-        ${install} -d .config .config/borg
-        ${install} -d .cache .cache/borg
-      '' + optionalString (isLocalPath cfg.repo && !cfg.removableDevice) ''
-        ${install} -d ${escapeShellArg cfg.repo}
-      ''));
+      settings = { inherit (cfg) user group; };
+    in lib.nameValuePair "borgbackup-job-${name}" ({
+      "${config.users.users."${cfg.user}".home}/.config/borg".d = settings;
+      "${config.users.users."${cfg.user}".home}/.cache/borg".d = settings;
+    } // optionalAttrs (isLocalPath cfg.repo && !cfg.removableDevice) {
+      "${cfg.repo}".d = settings;
+    });
 
   mkPassAssertion = name: cfg: {
     assertion = with cfg.encryption;
@@ -602,53 +597,56 @@ in {
           };
 
           extraArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for all {command}`borg` calls the
               service has. Handle with care.
             '';
-            default = "";
-            example = "--remote-path=/path/to/borg";
+            default = [ ];
+            example = [ "--remote-path=/path/to/borg" ];
           };
 
           extraInitArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg init`.
               Can also be set at runtime using `$extraInitArgs`.
             '';
-            default = "";
-            example = "--append-only";
+            default = [ ];
+            example = [ "--append-only" ];
           };
 
           extraCreateArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg create`.
               Can also be set at runtime using `$extraCreateArgs`.
             '';
-            default = "";
-            example = "--stats --checkpoint-interval 600";
+            default = [ ];
+            example = [
+              "--stats"
+              "--checkpoint-interval 600"
+            ];
           };
 
           extraPruneArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg prune`.
               Can also be set at runtime using `$extraPruneArgs`.
             '';
-            default = "";
-            example = "--save-space";
+            default = [ ];
+            example = [ "--save-space" ];
           };
 
           extraCompactArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg compact`.
               Can also be set at runtime using `$extraCompactArgs`.
             '';
-            default = "";
-            example = "--cleanup-commits";
+            default = [ ];
+            example = [ "--cleanup-commits" ];
           };
         };
       }
@@ -757,7 +755,7 @@ in {
         ++ mapAttrsToList mkSourceAssertions jobs
         ++ mapAttrsToList mkRemovableDeviceAssertions jobs;
 
-      system.activationScripts = mapAttrs' mkActivationScript jobs;
+      systemd.tmpfiles.settings = mapAttrs' mkTmpfiles jobs;
 
       systemd.services =
         # A job named "foo" is mapped to systemd.services.borgbackup-job-foo
diff --git a/nixos/modules/services/backup/btrbk.nix b/nixos/modules/services/backup/btrbk.nix
index 1e90ef54d33f9..364b77b6a21c1 100644
--- a/nixos/modules/services/backup/btrbk.nix
+++ b/nixos/modules/services/backup/btrbk.nix
@@ -6,14 +6,17 @@ let
     concatMapStringsSep
     concatStringsSep
     filterAttrs
+    flatten
+    getAttr
     isAttrs
     literalExpression
     mapAttrs'
     mapAttrsToList
     mkIf
     mkOption
+    optional
     optionalString
-    sort
+    sortOn
     types
     ;
 
@@ -37,7 +40,7 @@ let
   genConfig = set:
     let
       pairs = mapAttrsToList (name: value: { inherit name value; }) set;
-      sortedPairs = sort (a: b: prioOf a < prioOf b) pairs;
+      sortedPairs = sortOn prioOf pairs;
     in
       concatMap genPair sortedPairs;
   genSection = sec: secName: value:
@@ -84,6 +87,18 @@ let
     '';
   };
 
+  streamCompressMap = {
+    gzip = pkgs.gzip;
+    pigz = pkgs.pigz;
+    bzip2 = pkgs.bzip2;
+    pbzip2 = pkgs.pbzip2;
+    bzip3 = pkgs.bzip3;
+    xz = pkgs.xz;
+    lzo = pkgs.lzo;
+    lz4 = pkgs.lz4;
+    zstd = pkgs.zstd;
+  };
+
   cfg = config.services.btrbk;
   sshEnabled = cfg.sshAccess != [ ];
   serviceEnabled = cfg.instances != { };
@@ -94,7 +109,14 @@ in
   options = {
     services.btrbk = {
       extraPackages = mkOption {
-        description = lib.mdDoc "Extra packages for btrbk, like compression utilities for `stream_compress`";
+        description = lib.mdDoc ''
+          Extra packages for btrbk, like compression utilities for `stream_compress`.
+
+          **Note**: This option will get deprecated in future releases.
+          Required compression programs will get automatically provided to btrbk
+          depending on configured compression method in
+          `services.btrbk.instances.<name>.settings` option.
+        '';
         type = types.listOf types.package;
         default = [ ];
         example = literalExpression "[ pkgs.xz ]";
@@ -124,7 +146,19 @@ in
                   '';
                 };
                 settings = mkOption {
-                  type = let t = types.attrsOf (types.either types.str (t // { description = "instances of this type recursively"; })); in t;
+                  type = types.submodule {
+                    freeformType = let t = types.attrsOf (types.either types.str (t // { description = "instances of this type recursively"; })); in t;
+                    options = {
+                      stream_compress = mkOption {
+                        description = lib.mdDoc ''
+                          Compress the btrfs send stream before transferring it from/to remote locations using a
+                          compression command.
+                        '';
+                        type = types.enum ["gzip" "pigz" "bzip2" "pbzip2" "bzip3" "xz" "lzo" "lz4" "zstd" "no"];
+                        default = "no";
+                      };
+                    };
+                  };
                   default = { };
                   example = {
                     snapshot_preserve_min = "2d";
@@ -169,6 +203,11 @@ in
 
   };
   config = mkIf (sshEnabled || serviceEnabled) {
+
+    warnings = optional (cfg.extraPackages != []) ''
+      extraPackages option will be deprecated in future releases. Programs required for compression are now automatically selected depending on services.btrbk.instances.<name>.settings.stream_compress option.
+    '';
+
     environment.systemPackages = [ pkgs.btrbk ] ++ cfg.extraPackages;
 
     security.sudo.extraRules = mkIf (sudo_doas == "sudo") [ sudoRule ];
@@ -232,12 +271,15 @@ in
       cfg.instances;
     systemd.services = mapAttrs'
       (
-        name: _: {
+        name: instance: {
           name = "btrbk-${name}";
           value = {
             description = "Takes BTRFS snapshots and maintains retention policies.";
             unitConfig.Documentation = "man:btrbk(1)";
-            path = [ "/run/wrappers" ] ++ cfg.extraPackages;
+            path = [ "/run/wrappers" ]
+              ++ cfg.extraPackages
+              ++ optional (instance.settings.stream_compress != "no")
+                (getAttr instance.settings.stream_compress streamCompressMap);
             serviceConfig = {
               User = "btrbk";
               Group = "btrbk";
diff --git a/nixos/modules/services/backup/restic.nix b/nixos/modules/services/backup/restic.nix
index e3eb504e0adfc..b222dd952d159 100644
--- a/nixos/modules/services/backup/restic.nix
+++ b/nixos/modules/services/backup/restic.nix
@@ -384,10 +384,11 @@ in
       ${lib.optionalString (backup.environmentFile != null) "source ${backup.environmentFile}"}
       # set same environment variables as the systemd service
       ${lib.pipe config.systemd.services."restic-backups-${name}".environment [
-        (lib.filterAttrs (_: v: v != null))
+        (lib.filterAttrs (n: v: v != null && n != "PATH"))
         (lib.mapAttrsToList (n: v: "${n}=${v}"))
         (lib.concatStringsSep "\n")
       ]}
+      PATH=${config.systemd.services."restic-backups-${name}".environment.PATH}:$PATH
 
       exec ${resticCmd} $@
     '') (lib.filterAttrs (_: v: v.createWrapper) config.services.restic.backups);
diff --git a/nixos/modules/tasks/snapraid.nix b/nixos/modules/services/backup/snapraid.nix
index 9570c6b76123b..c9b2550e80e81 100644
--- a/nixos/modules/tasks/snapraid.nix
+++ b/nixos/modules/services/backup/snapraid.nix
@@ -2,10 +2,15 @@
 
 with lib;
 
-let cfg = config.snapraid;
+let cfg = config.services.snapraid;
 in
 {
-  options.snapraid = with types; {
+  imports = [
+    # Should have never been on the top-level.
+    (mkRenamedOptionModule [ "snapraid" ] [ "services" "snapraid" ])
+  ];
+
+  options.services.snapraid = with types; {
     enable = mkEnableOption (lib.mdDoc "SnapRAID");
     dataDisks = mkOption {
       default = { };
diff --git a/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixos/modules/services/cluster/kubernetes/flannel.nix
index 11c5adc6a8859..dca8996df0831 100644
--- a/nixos/modules/services/cluster/kubernetes/flannel.nix
+++ b/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -13,6 +13,13 @@ in
   ###### interface
   options.services.kubernetes.flannel = {
     enable = mkEnableOption (lib.mdDoc "flannel networking");
+
+    openFirewallPorts = mkOption {
+      description = lib.mdDoc ''
+        Whether to open the Flannel UDP ports in the firewall on all interfaces.'';
+      type = types.bool;
+      default = true;
+    };
   };
 
   ###### implementation
@@ -38,7 +45,7 @@ in
     };
 
     networking = {
-      firewall.allowedUDPPorts = [
+      firewall.allowedUDPPorts = mkIf cfg.openFirewallPorts [
         8285  # flannel udp
         8472  # flannel vxlan
       ];
diff --git a/nixos/modules/services/cluster/kubernetes/pki.nix b/nixos/modules/services/cluster/kubernetes/pki.nix
index 38682701ea151..35151ebd6bd7b 100644
--- a/nixos/modules/services/cluster/kubernetes/pki.nix
+++ b/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -212,7 +212,7 @@ in
 
     services.certmgr = {
       enable = true;
-      package = pkgs.certmgr-selfsigned;
+      package = pkgs.certmgr;
       svcManager = "command";
       specs =
         let
diff --git a/nixos/modules/services/cluster/spark/default.nix b/nixos/modules/services/cluster/spark/default.nix
index 2e3914a734bea..b3e1ac399ae9f 100644
--- a/nixos/modules/services/cluster/spark/default.nix
+++ b/nixos/modules/services/cluster/spark/default.nix
@@ -69,8 +69,8 @@ with lib;
       confDir = mkOption {
         type = types.path;
         description = lib.mdDoc "Spark configuration directory. Spark will use the configuration files (spark-defaults.conf, spark-env.sh, log4j.properties, etc) from this directory.";
-        default = "${cfg.package}/lib/${cfg.package.untarDir}/conf";
-        defaultText = literalExpression ''"''${package}/lib/''${package.untarDir}/conf"'';
+        default = "${cfg.package}/conf";
+        defaultText = literalExpression ''"''${package}/conf"'';
       };
       logDir = mkOption {
         type = types.path;
@@ -111,9 +111,9 @@ with lib;
             Type = "forking";
             User = "spark";
             Group = "spark";
-            WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
-            ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-master.sh";
-            ExecStop  = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-master.sh";
+            WorkingDirectory = "${cfg.package}/";
+            ExecStart = "${cfg.package}/sbin/start-master.sh";
+            ExecStop  = "${cfg.package}/sbin/stop-master.sh";
             TimeoutSec = 300;
             StartLimitBurst=10;
             Restart = "always";
@@ -134,9 +134,9 @@ with lib;
           serviceConfig = {
             Type = "forking";
             User = "spark";
-            WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
-            ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-worker.sh spark://${cfg.worker.master}";
-            ExecStop  = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-worker.sh";
+            WorkingDirectory = "${cfg.package}/";
+            ExecStart = "${cfg.package}/sbin/start-worker.sh spark://${cfg.worker.master}";
+            ExecStop  = "${cfg.package}/sbin/stop-worker.sh";
             TimeoutSec = 300;
             StartLimitBurst=10;
             Restart = "always";
diff --git a/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixos/modules/services/continuous-integration/buildbot/master.nix
index 56abeda3a5cdd..446d19b8fd5a0 100644
--- a/nixos/modules/services/continuous-integration/buildbot/master.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -305,5 +305,5 @@ in {
     '')
   ];
 
-  meta.maintainers = with lib.maintainers; [ mic92 lopsided98 ];
+  meta.maintainers = lib.teams.buildbot.members;
 }
diff --git a/nixos/modules/services/continuous-integration/buildbot/worker.nix b/nixos/modules/services/continuous-integration/buildbot/worker.nix
index 2a836c24dda36..9c7b2bdd06e02 100644
--- a/nixos/modules/services/continuous-integration/buildbot/worker.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/worker.nix
@@ -188,6 +188,6 @@ in {
     };
   };
 
-  meta.maintainers = with lib.maintainers; [ ];
+  meta.maintainers = lib.teams.buildbot.members;
 
 }
diff --git a/nixos/modules/services/continuous-integration/buildkite-agents.nix b/nixos/modules/services/continuous-integration/buildkite-agents.nix
index a35ca4168074f..2e488f83d4c3b 100644
--- a/nixos/modules/services/continuous-integration/buildkite-agents.nix
+++ b/nixos/modules/services/continuous-integration/buildkite-agents.nix
@@ -35,6 +35,12 @@ let
         type = lib.types.str;
       };
 
+      extraGroups = lib.mkOption {
+        default = [ "keys" ];
+        description = lib.mdDoc "Groups the user for this buildkite agent should belong to";
+        type = lib.types.listOf lib.types.str;
+      };
+
       runtimePackages = lib.mkOption {
         default = [ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ];
         defaultText = lib.literalExpression "[ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ]";
@@ -150,7 +156,7 @@ in
       home = cfg.dataDir;
       createHome = true;
       description = "Buildkite agent user";
-      extraGroups = [ "keys" ];
+      extraGroups = cfg.extraGroups;
       isSystemUser = true;
       group = "buildkite-agent-${name}";
     };
diff --git a/nixos/modules/services/continuous-integration/jenkins/default.nix b/nixos/modules/services/continuous-integration/jenkins/default.nix
index e96743784e047..d69cf4587aaba 100644
--- a/nixos/modules/services/continuous-integration/jenkins/default.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/default.nix
@@ -236,6 +236,7 @@ in {
 
       serviceConfig = {
         User = cfg.user;
+        StateDirectory = mkIf (hasPrefix "/var/lib/jenkins" cfg.home) "jenkins";
       };
     };
   };
diff --git a/nixos/modules/services/databases/aerospike.nix b/nixos/modules/services/databases/aerospike.nix
index 373c8f4bffb0d..4923c0f00ddb4 100644
--- a/nixos/modules/services/databases/aerospike.nix
+++ b/nixos/modules/services/databases/aerospike.nix
@@ -108,6 +108,11 @@ in
     };
     users.groups.aerospike.gid = config.ids.gids.aerospike;
 
+    boot.kernel.sysctl = {
+      "net.core.rmem_max" = mkDefault 15728640;
+      "net.core.wmem_max" = mkDefault 5242880;
+    };
+
     systemd.services.aerospike = rec {
       description = "Aerospike server";
 
@@ -131,14 +136,6 @@ in
           echo "kernel.shmmax too low, setting to 1GB"
           ${pkgs.procps}/bin/sysctl -w kernel.shmmax=1073741824
         fi
-        if [ $(echo "$(cat /proc/sys/net/core/rmem_max) < 15728640" | ${pkgs.bc}/bin/bc) == "1" ]; then
-          echo "increasing socket buffer limit (/proc/sys/net/core/rmem_max): $(cat /proc/sys/net/core/rmem_max) -> 15728640"
-          echo 15728640 > /proc/sys/net/core/rmem_max
-        fi
-        if [ $(echo "$(cat /proc/sys/net/core/wmem_max) <  5242880" | ${pkgs.bc}/bin/bc) == "1"  ]; then
-          echo "increasing socket buffer limit (/proc/sys/net/core/wmem_max): $(cat /proc/sys/net/core/wmem_max) -> 5242880"
-          echo  5242880 > /proc/sys/net/core/wmem_max
-        fi
         install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}"
         install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/smd"
         install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/udf"
diff --git a/nixos/modules/services/databases/postgresql.md b/nixos/modules/services/databases/postgresql.md
index e5e0b7efec29a..7d141f12b5dea 100644
--- a/nixos/modules/services/databases/postgresql.md
+++ b/nixos/modules/services/databases/postgresql.md
@@ -258,7 +258,7 @@ postgresql_15.pkgs.pg_partman        postgresql_15.pkgs.pgroonga
 To add plugins via NixOS configuration, set `services.postgresql.extraPlugins`:
 ```
 services.postgresql.package = pkgs.postgresql_12;
-services.postgresql.extraPlugins = with pkgs.postgresql_12.pkgs; [
+services.postgresql.extraPlugins = ps: with ps; [
   pg_repack
   postgis
 ];
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 690f2d85a4c9a..ed5915735730b 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -18,7 +18,7 @@ let
     in
     if cfg.extraPlugins == []
       then base
-      else base.withPackages (_: cfg.extraPlugins);
+      else base.withPackages cfg.extraPlugins;
 
   toStr = value:
     if true == value then "yes"
@@ -391,12 +391,11 @@ in
       };
 
       extraPlugins = mkOption {
-        type = types.listOf types.path;
-        default = [];
-        example = literalExpression "with pkgs.postgresql_15.pkgs; [ postgis pg_repack ]";
+        type = with types; coercedTo (listOf path) (path: _ignorePg: path) (functionTo (listOf path));
+        default = _: [];
+        example = literalExpression "ps: with ps; [ postgis pg_repack ]";
         description = lib.mdDoc ''
-          List of PostgreSQL plugins. PostgreSQL version for each plugin should
-          match version for `services.postgresql.package` value.
+          List of PostgreSQL plugins.
         '';
       };
 
@@ -405,7 +404,7 @@ in
         default = {};
         description = lib.mdDoc ''
           PostgreSQL configuration. Refer to
-          <https://www.postgresql.org/docs/15/config-setting.html#CONFIG-SETTING-CONFIGURATION-FILE>
+          <https://www.postgresql.org/docs/current/config-setting.html#CONFIG-SETTING-CONFIGURATION-FILE>
           for an overview of `postgresql.conf`.
 
           ::: {.note}
diff --git a/nixos/modules/services/desktops/flatpak.nix b/nixos/modules/services/desktops/flatpak.nix
index d99faf381e019..4c26e6874023a 100644
--- a/nixos/modules/services/desktops/flatpak.nix
+++ b/nixos/modules/services/desktops/flatpak.nix
@@ -35,6 +35,7 @@ in {
     services.dbus.packages = [ pkgs.flatpak ];
 
     systemd.packages = [ pkgs.flatpak ];
+    systemd.tmpfiles.packages = [ pkgs.flatpak ];
 
     environment.profiles = [
       "$HOME/.local/share/flatpak/exports"
diff --git a/nixos/modules/services/desktops/pipewire/pipewire.nix b/nixos/modules/services/desktops/pipewire/pipewire.nix
index 04ac415c177cb..da409030b3a35 100644
--- a/nixos/modules/services/desktops/pipewire/pipewire.nix
+++ b/nixos/modules/services/desktops/pipewire/pipewire.nix
@@ -4,6 +4,8 @@
 with lib;
 
 let
+  json = pkgs.formats.json {};
+  mapToFiles = location: config: concatMapAttrs (name: value: { "pipewire/${location}.conf.d/${name}.conf".source = json.generate "${name}" value;}) config;
   cfg = config.services.pipewire;
   enable32BitAlsaPlugins = cfg.alsa.support32Bit
                            && pkgs.stdenv.isx86_64
@@ -72,15 +74,140 @@ in {
           https://github.com/PipeWire/pipewire/blob/master/NEWS
         '';
       };
+
+      extraConfig = {
+        pipewire = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "10-clock-rate" = {
+              "context.properties" = {
+                "default.clock.rate" = 44100;
+              };
+            };
+            "11-no-upmixing" = {
+              "stream.properties" = {
+                "channelmix.upmix" = false;
+              };
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire server.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire.conf.d`.
+
+            See `man pipewire.conf` for details, and [the PipeWire wiki][wiki] for examples.
+
+            See also:
+            - [PipeWire wiki - virtual devices][wiki-virtual-device] for creating virtual devices or remapping channels
+            - [PipeWire wiki - filter-chain][wiki-filter-chain] for creating more complex processing pipelines
+            - [PipeWire wiki - network][wiki-network] for streaming audio over a network
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-PipeWire
+            [wiki-virtual-device]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Virtual-Devices
+            [wiki-filter-chain]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Filter-Chain
+            [wiki-network]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Network
+          '';
+        };
+        client = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "10-no-resample" = {
+              "stream.properties" = {
+                "resample.disable" = true;
+              };
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire client library, used by most applications.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client.conf.d`.
+
+            See the [PipeWire wiki][wiki] for examples.
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-client
+          '';
+        };
+        client-rt = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "10-alsa-linear-volume" = {
+              "alsa.properties" = {
+                "alsa.volume-method" = "linear";
+              };
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire client library, used by real-time applications and legacy ALSA clients.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client-rt.conf.d`.
+
+            See the [PipeWire wiki][wiki] for examples of general configuration, and [PipeWire wiki - ALSA][wiki-alsa] for ALSA clients.
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-client
+            [wiki-alsa]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-ALSA
+          '';
+        };
+        jack = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "20-hide-midi" = {
+              "jack.properties" = {
+                "jack.show-midi" = false;
+              };
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire JACK server and client library.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/jack.conf.d`.
+
+            See the [PipeWire wiki][wiki] for examples.
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-JACK
+          '';
+        };
+        pipewire-pulse = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "15-force-s16-info" = {
+              "pulse.rules" = [{
+                matches = [
+                  { "application.process.binary" = "my-broken-app"; }
+                ];
+                actions = {
+                  quirks = [ "force-s16-info" ];
+                };
+              }];
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire PulseAudio server.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire-pulse.conf.d`.
+
+            See `man pipewire-pulse.conf` for details, and [the PipeWire wiki][wiki] for examples.
+
+            See also:
+            - [PipeWire wiki - PulseAudio tricks guide][wiki-tricks] for more examples.
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-PulseAudio
+            [wiki-tricks]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Guide-PulseAudio-Tricks
+          '';
+        };
+      };
     };
   };
 
   imports = [
     (lib.mkRemovedOptionModule ["services" "pipewire" "config"] ''
-      Overriding default Pipewire configuration through NixOS options never worked correctly and is no longer supported.
-      Please create drop-in files in /etc/pipewire/pipewire.conf.d/ to make the desired setting changes instead.
+      Overriding default PipeWire configuration through NixOS options never worked correctly and is no longer supported.
+      Please create drop-in configuration files via `services.pipewire.extraConfig` instead.
     '')
-
     (lib.mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
       pipewire-media-session is no longer supported upstream and has been removed.
       Please switch to `services.pipewire.wireplumber` instead.
@@ -133,26 +260,35 @@ in {
     services.udev.packages = [ cfg.package ];
 
     # If any paths are updated here they must also be updated in the package test.
-    environment.etc."alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable {
-      text = ''
-        pcm_type.pipewire {
-          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
-          ${optionalString enable32BitAlsaPlugins
-            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
-        }
-        ctl_type.pipewire {
-          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
-          ${optionalString enable32BitAlsaPlugins
-            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
-        }
-      '';
-    };
-    environment.etc."alsa/conf.d/50-pipewire.conf" = mkIf cfg.alsa.enable {
-      source = "${cfg.package}/share/alsa/alsa.conf.d/50-pipewire.conf";
-    };
-    environment.etc."alsa/conf.d/99-pipewire-default.conf" = mkIf cfg.alsa.enable {
-      source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf";
-    };
+    environment.etc = {
+      "alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable {
+        text = ''
+          pcm_type.pipewire {
+            libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
+            ${optionalString enable32BitAlsaPlugins
+              "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
+          }
+          ctl_type.pipewire {
+            libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
+            ${optionalString enable32BitAlsaPlugins
+              "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
+          }
+        '';
+      };
+
+      "alsa/conf.d/50-pipewire.conf" = mkIf cfg.alsa.enable {
+        source = "${cfg.package}/share/alsa/alsa.conf.d/50-pipewire.conf";
+      };
+
+      "alsa/conf.d/99-pipewire-default.conf" = mkIf cfg.alsa.enable {
+        source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf";
+      };
+    }
+    // mapToFiles "pipewire" cfg.extraConfig.pipewire
+    // mapToFiles "client" cfg.extraConfig.client
+    // mapToFiles "client-rt" cfg.extraConfig.client-rt
+    // mapToFiles "jack" cfg.extraConfig.jack
+    // mapToFiles "pipewire-pulse" cfg.extraConfig.pipewire-pulse;
 
     environment.sessionVariables.LD_LIBRARY_PATH =
       lib.mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
diff --git a/nixos/modules/services/development/livebook.md b/nixos/modules/services/development/livebook.md
index 73ddc57f6179a..5012e977a4f7f 100644
--- a/nixos/modules/services/development/livebook.md
+++ b/nixos/modules/services/development/livebook.md
@@ -18,7 +18,7 @@ which runs the server.
     port = 20123;
     # See note below about security
     environmentFile = pkgs.writeText "livebook.env" ''
-      LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+      LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
     '';
   };
 }
@@ -37,3 +37,14 @@ A better approach would be to put the password in some secure
 user-readable location and set `environmentFile = /home/user/secure/livebook.env`.
 
 :::
+
+### Extra dependencies {#module-services-livebook-extra-dependencies}
+
+By default, the Livebook service is run with minimum dependencies, but
+some features require additional packages.  For example, the machine
+learning Kinos require `gcc` and `gnumake`.  To add these, use
+`extraPackages`:
+
+```
+services.livebook.extraPackages = with pkgs; [ gcc gnumake ];
+```
diff --git a/nixos/modules/services/development/livebook.nix b/nixos/modules/services/development/livebook.nix
index 3991a4125ec39..75729ff28efaf 100644
--- a/nixos/modules/services/development/livebook.nix
+++ b/nixos/modules/services/development/livebook.nix
@@ -12,6 +12,8 @@ in
     # future, this can be changed to a system service.
     enableUserService = mkEnableOption "a user service for Livebook";
 
+    package = mkPackageOption pkgs "livebook" { };
+
     environmentFile = mkOption {
       type = types.path;
       description = lib.mdDoc ''
@@ -63,6 +65,15 @@ in
         }
       '';
     };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = [ ];
+      description = lib.mdDoc ''
+        Extra packages to make available to the Livebook service.
+      '';
+      example = literalExpression "with pkgs; [ gcc gnumake ]";
+    };
   };
 
   config = mkIf cfg.enableUserService {
@@ -79,9 +90,9 @@ in
               sname = cfg.erlang_node_short_name;
             } // cfg.options);
           in
-          "${pkgs.livebook}/bin/livebook server ${args}";
+            "${cfg.package}/bin/livebook server ${args}";
       };
-      path = [ pkgs.bash ];
+      path = [ pkgs.bash ] ++ cfg.extraPackages;
       wantedBy = [ "default.target" ];
     };
   };
diff --git a/nixos/modules/services/development/zammad.nix b/nixos/modules/services/development/zammad.nix
index 87aceddd6635c..c084d6541ad38 100644
--- a/nixos/modules/services/development/zammad.nix
+++ b/nixos/modules/services/development/zammad.nix
@@ -21,6 +21,7 @@ let
     NODE_ENV = "production";
     RAILS_SERVE_STATIC_FILES = "true";
     RAILS_LOG_TO_STDOUT = "true";
+    REDIS_URL = "redis://${cfg.redis.host}:${toString cfg.redis.port}";
   };
   databaseConfig = settingsFormat.generate "database.yml" cfg.database.settings;
 in
@@ -65,6 +66,36 @@ in
         description = lib.mdDoc "Websocket service port.";
       };
 
+      redis = {
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to create a local redis automatically.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "zammad";
+          description = lib.mdDoc ''
+            Name of the redis server. Only used if `createLocally` is set to true.
+          '';
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            Redis server address.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 6379;
+          description = lib.mdDoc "Port of the redis server.";
+        };
+      };
+
       database = {
         type = mkOption {
           type = types.enum [ "PostgreSQL" "MySQL" ];
@@ -206,6 +237,10 @@ in
         assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
         message = "a password cannot be specified if services.zammad.database.createLocally is set to true";
       }
+      {
+        assertion = cfg.redis.createLocally -> cfg.redis.host == "localhost";
+        message = "the redis host must be localhost if services.zammad.redis.createLocally is set to true";
+      }
     ];
 
     services.mysql = optionalAttrs (cfg.database.createLocally && cfg.database.type == "MySQL") {
@@ -231,6 +266,13 @@ in
       ];
     };
 
+    services.redis = optionalAttrs cfg.redis.createLocally {
+      servers."${cfg.redis.name}" = {
+        enable = true;
+        port = cfg.redis.port;
+      };
+    };
+
     systemd.services.zammad-web = {
       inherit environment;
       serviceConfig = serviceConfig // {
@@ -240,6 +282,8 @@ in
       after = [
         "network.target"
         "postgresql.service"
+      ] ++ optionals cfg.redis.createLocally [
+        "redis-${cfg.redis.name}.service"
       ];
       requires = [
         "postgresql.service"
@@ -303,16 +347,15 @@ in
       script = "./script/websocket-server.rb -b ${cfg.host} -p ${toString cfg.websocketPort} start";
     };
 
-    systemd.services.zammad-scheduler = {
-      inherit environment;
-      serviceConfig = serviceConfig // { Type = "forking"; };
+    systemd.services.zammad-worker = {
+      inherit serviceConfig environment;
       after = [ "zammad-web.service" ];
       requires = [ "zammad-web.service" ];
-      description = "Zammad scheduler";
+      description = "Zammad background worker";
       wantedBy = [ "multi-user.target" ];
-      script = "./script/scheduler.rb start";
+      script = "./script/background-worker.rb start";
     };
   };
 
-  meta.maintainers = with lib.maintainers; [ garbas taeer ];
+  meta.maintainers = with lib.maintainers; [ taeer netali ];
 }
diff --git a/nixos/modules/services/display-managers/greetd.nix b/nixos/modules/services/display-managers/greetd.nix
index 779e141ca24bd..2212f97a9ffe2 100644
--- a/nixos/modules/services/display-managers/greetd.nix
+++ b/nixos/modules/services/display-managers/greetd.nix
@@ -4,7 +4,7 @@ with lib;
 let
   cfg = config.services.greetd;
   tty = "tty${toString cfg.vt}";
-  settingsFormat = pkgs.formats.toml {};
+  settingsFormat = pkgs.formats.toml { };
 in
 {
   options.services.greetd = {
@@ -27,7 +27,7 @@ in
       '';
     };
 
-    vt = mkOption  {
+    vt = mkOption {
       type = types.int;
       default = 1;
       description = lib.mdDoc ''
@@ -97,12 +97,18 @@ in
 
     systemd.defaultUnit = "graphical.target";
 
+    # Create directories potentially required by supported greeters
+    # See https://github.com/NixOS/nixpkgs/issues/248323
+    systemd.tmpfiles.rules = [
+      "d '/var/cache/tuigreet' - greeter greeter - -"
+    ];
+
     users.users.greeter = {
       isSystemUser = true;
       group = "greeter";
     };
 
-    users.groups.greeter = {};
+    users.groups.greeter = { };
   };
 
   meta.maintainers = with maintainers; [ queezle ];
diff --git a/nixos/modules/services/games/teeworlds.nix b/nixos/modules/services/games/teeworlds.nix
index ffef440330c4e..bd0df1ffca578 100644
--- a/nixos/modules/services/games/teeworlds.nix
+++ b/nixos/modules/services/games/teeworlds.nix
@@ -100,7 +100,7 @@ in
 
       serviceConfig = {
         DynamicUser = true;
-        ExecStart = "${pkgs.teeworlds}/bin/teeworlds_srv -f ${teeworldsConf}";
+        ExecStart = "${pkgs.teeworlds-server}/bin/teeworlds_srv -f ${teeworldsConf}";
 
         # Hardening
         CapabilityBoundingSet = false;
diff --git a/nixos/modules/services/hardware/kanata.nix b/nixos/modules/services/hardware/kanata.nix
index 0b77bfbc33b3f..05e76d8432154 100644
--- a/nixos/modules/services/hardware/kanata.nix
+++ b/nixos/modules/services/hardware/kanata.nix
@@ -78,7 +78,13 @@ let
   mkName = name: "kanata-${name}";
 
   mkDevices = devices:
-    optionalString ((length devices) > 0) "linux-dev ${concatStringsSep ":" devices}";
+    let
+      devicesString = pipe devices [
+        (map (device: "\"" + device + "\""))
+        (concatStringsSep " ")
+      ];
+    in
+    optionalString ((length devices) > 0) "linux-dev (${devicesString})";
 
   mkConfig = name: keyboard: pkgs.writeText "${mkName name}-config.kdb" ''
     (defcfg
diff --git a/nixos/modules/services/hardware/keyd.nix b/nixos/modules/services/hardware/keyd.nix
index 724e9b9568478..77297401a51c7 100644
--- a/nixos/modules/services/hardware/keyd.nix
+++ b/nixos/modules/services/hardware/keyd.nix
@@ -143,7 +143,7 @@ in
         RuntimeDirectory = "keyd";
 
         # Hardening
-        CapabilityBoundingSet = "";
+        CapabilityBoundingSet = [ "CAP_SYS_NICE" ];
         DeviceAllow = [
           "char-input rw"
           "/dev/uinput rw"
@@ -152,7 +152,7 @@ in
         PrivateNetwork = true;
         ProtectHome = true;
         ProtectHostname = true;
-        PrivateUsers = true;
+        PrivateUsers = false;
         PrivateMounts = true;
         PrivateTmp = true;
         RestrictNamespaces = true;
@@ -165,9 +165,9 @@ in
         LockPersonality = true;
         ProtectProc = "invisible";
         SystemCallFilter = [
+          "nice"
           "@system-service"
           "~@privileged"
-          "~@resources"
         ];
         RestrictAddressFamilies = [ "AF_UNIX" ];
         RestrictSUIDSGID = true;
diff --git a/nixos/modules/services/hardware/power-profiles-daemon.nix b/nixos/modules/services/hardware/power-profiles-daemon.nix
index 101da01b4a712..1d84bf8ac937c 100644
--- a/nixos/modules/services/hardware/power-profiles-daemon.nix
+++ b/nixos/modules/services/hardware/power-profiles-daemon.nix
@@ -1,10 +1,7 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.services.power-profiles-daemon;
-  package = pkgs.power-profiles-daemon;
 in
 
 {
@@ -15,8 +12,8 @@ in
 
     services.power-profiles-daemon = {
 
-      enable = mkOption {
-        type = types.bool;
+      enable = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           Whether to enable power-profiles-daemon, a DBus daemon that allows
@@ -24,6 +21,8 @@ in
         '';
       };
 
+      package = lib.mkPackageOption pkgs "power-profiles-daemon" { };
+
     };
 
   };
@@ -31,7 +30,7 @@ in
 
   ###### implementation
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
 
     assertions = [
       { assertion = !config.services.tlp.enable;
@@ -42,13 +41,13 @@ in
       }
     ];
 
-    environment.systemPackages = [ package ];
+    environment.systemPackages = [ cfg.package ];
 
-    services.dbus.packages = [ package ];
+    services.dbus.packages = [ cfg.package ];
 
-    services.udev.packages = [ package ];
+    services.udev.packages = [ cfg.package ];
 
-    systemd.packages = [ package ];
+    systemd.packages = [ cfg.package ];
 
   };
 
diff --git a/nixos/modules/services/hardware/sane.nix b/nixos/modules/services/hardware/sane.nix
index 8408844c4f943..8f64afe60734c 100644
--- a/nixos/modules/services/hardware/sane.nix
+++ b/nixos/modules/services/hardware/sane.nix
@@ -4,7 +4,7 @@ with lib;
 
 let
 
-  pkg = pkgs.sane-backends.override {
+  pkg = config.hardware.sane.backends-package.override {
     scanSnapDriversUnfree = config.hardware.sane.drivers.scanSnap.enable;
     scanSnapDriversPackage = config.hardware.sane.drivers.scanSnap.package;
   };
@@ -57,6 +57,13 @@ in
       '';
     };
 
+    hardware.sane.backends-package = mkOption {
+      type = types.package;
+      default = pkgs.sane-backends;
+      defaultText = literalExpression "pkgs.sane-backends";
+      description = lib.mdDoc "Backends driver package to use.";
+    };
+
     hardware.sane.snapshot = mkOption {
       type = types.bool;
       default = false;
diff --git a/nixos/modules/services/hardware/thermald.nix b/nixos/modules/services/hardware/thermald.nix
index 7ae602823cd65..a4839f326cc45 100644
--- a/nixos/modules/services/hardware/thermald.nix
+++ b/nixos/modules/services/hardware/thermald.nix
@@ -19,6 +19,12 @@ in
         '';
       };
 
+     ignoreCpuidCheck = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to ignore the cpuid check to allow running on unsupported platforms";
+      };
+
       configFile = mkOption {
         type = types.nullOr types.path;
         default = null;
@@ -42,6 +48,7 @@ in
           ${cfg.package}/sbin/thermald \
             --no-daemon \
             ${optionalString cfg.debug "--loglevel=debug"} \
+            ${optionalString cfg.ignoreCpuidCheck "--ignore-cpuid-check"} \
             ${optionalString (cfg.configFile != null) "--config-file ${cfg.configFile}"} \
             --dbus-enable \
             --adaptive
diff --git a/nixos/modules/services/hardware/udev.nix b/nixos/modules/services/hardware/udev.nix
index 08ca7a0d247d0..670b9087f1107 100644
--- a/nixos/modules/services/hardware/udev.nix
+++ b/nixos/modules/services/hardware/udev.nix
@@ -112,7 +112,8 @@ let
       echo "OK"
 
       filesToFixup="$(for i in "$out"/*; do
-        grep -l '\B\(/usr\)\?/s\?bin' "$i" || :
+        # list all files referring to (/usr)/bin paths, but allow references to /bin/sh.
+        grep -P -l '\B(?!\/bin\/sh\b)(\/usr)?\/bin(?:\/.*)?' "$i" || :
       done)"
 
       if [ -n "$filesToFixup" ]; then
@@ -222,6 +223,9 @@ in
         description = lib.mdDoc ''
           Packages added to the {env}`PATH` environment variable when
           executing programs from Udev rules.
+
+          coreutils, gnu{sed,grep}, util-linux and config.systemd.package are
+          automatically included.
         '';
       };
 
diff --git a/nixos/modules/services/hardware/vdr.nix b/nixos/modules/services/hardware/vdr.nix
index afa64fa16c4a6..689d83f7eedcd 100644
--- a/nixos/modules/services/hardware/vdr.nix
+++ b/nixos/modules/services/hardware/vdr.nix
@@ -1,18 +1,15 @@
 { config, lib, pkgs, ... }:
-
-with lib;
-
 let
   cfg = config.services.vdr;
-  libDir = "/var/lib/vdr";
-in {
-
-  ###### interface
 
+  inherit (lib)
+    mkEnableOption mkPackageOption mkOption types mkIf optional mdDoc;
+in
+{
   options = {
 
     services.vdr = {
-      enable = mkEnableOption (lib.mdDoc "VDR. Please put config into ${libDir}");
+      enable = mkEnableOption (mdDoc "Start VDR");
 
       package = mkPackageOption pkgs "vdr" {
         example = "wrapVdr.override { plugins = with pkgs.vdrPlugins; [ hello ]; }";
@@ -21,58 +18,84 @@ in {
       videoDir = mkOption {
         type = types.path;
         default = "/srv/vdr/video";
-        description = lib.mdDoc "Recording directory";
+        description = mdDoc "Recording directory";
       };
 
       extraArguments = mkOption {
         type = types.listOf types.str;
-        default = [];
-        description = lib.mdDoc "Additional command line arguments to pass to VDR.";
+        default = [ ];
+        description = mdDoc "Additional command line arguments to pass to VDR.";
+      };
+
+      enableLirc = mkEnableOption (mdDoc "LIRC");
+
+      user = mkOption {
+        type = types.str;
+        default = "vdr";
+        description = mdDoc ''
+          User under which the VDR service runs.
+        '';
       };
 
-      enableLirc = mkEnableOption (lib.mdDoc "LIRC");
+      group = mkOption {
+        type = types.str;
+        default = "vdr";
+        description = mdDoc ''
+          Group under which the VDRvdr service runs.
+        '';
+      };
     };
+
   };
 
-  ###### implementation
+  config = mkIf cfg.enable {
 
-  config = mkIf cfg.enable (mkMerge [{
     systemd.tmpfiles.rules = [
-      "d ${cfg.videoDir} 0755 vdr vdr -"
-      "Z ${cfg.videoDir} - vdr vdr -"
+      "d ${cfg.videoDir} 0755 ${cfg.user} ${cfg.group} -"
+      "Z ${cfg.videoDir} - ${cfg.user} ${cfg.group} -"
     ];
 
     systemd.services.vdr = {
       description = "VDR";
       wantedBy = [ "multi-user.target" ];
+      wants = optional cfg.enableLirc "lircd.service";
+      after = [ "network.target" ]
+        ++ optional cfg.enableLirc "lircd.service";
       serviceConfig = {
-        ExecStart = ''
-          ${cfg.package}/bin/vdr \
-            --video="${cfg.videoDir}" \
-            --config="${libDir}" \
-            ${escapeShellArgs cfg.extraArguments}
-        '';
-        User = "vdr";
+        ExecStart =
+          let
+            args = [
+              "--video=${cfg.videoDir}"
+            ]
+            ++ optional cfg.enableLirc "--lirc=${config.passthru.lirc.socket}"
+            ++ cfg.extraArguments;
+          in
+          "${cfg.package}/bin/vdr ${lib.escapeShellArgs args}";
+        User = cfg.user;
+        Group = cfg.group;
         CacheDirectory = "vdr";
         StateDirectory = "vdr";
+        RuntimeDirectory = "vdr";
         Restart = "on-failure";
       };
     };
 
-    users.users.vdr = {
-      group = "vdr";
-      home = libDir;
-      isSystemUser = true;
+    environment.systemPackages = [ cfg.package ];
+
+    users.users = mkIf (cfg.user == "vdr") {
+      vdr = {
+        inherit (cfg) group;
+        home = "/run/vdr";
+        isSystemUser = true;
+        extraGroups = [
+          "video"
+          "audio"
+        ]
+        ++ optional cfg.enableLirc "lirc";
+      };
     };
 
-    users.groups.vdr = {};
-  }
+    users.groups = mkIf (cfg.group == "vdr") { vdr = { }; };
 
-  (mkIf cfg.enableLirc {
-    services.lirc.enable = true;
-    users.users.vdr.extraGroups = [ "lirc" ];
-    services.vdr.extraArguments = [
-      "--lirc=${config.passthru.lirc.socket}"
-    ];
-  })]);
+  };
 }
diff --git a/nixos/modules/services/home-automation/home-assistant.nix b/nixos/modules/services/home-automation/home-assistant.nix
index 6aa0ae9eba47a..bc470576b759b 100644
--- a/nixos/modules/services/home-automation/home-assistant.nix
+++ b/nixos/modules/services/home-automation/home-assistant.nix
@@ -11,14 +11,12 @@ let
   # options shown in settings.
   # We post-process the result to add support for YAML functions, like secrets or includes, see e.g.
   # https://www.home-assistant.io/docs/configuration/secrets/
-  filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) cfg.config or {};
+  filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) (lib.recursiveUpdate customLovelaceModulesResources (cfg.config or {}));
   configFile = pkgs.runCommandLocal "configuration.yaml" { } ''
     cp ${format.generate "configuration.yaml" filteredConfig} $out
     sed -i -e "s/'\!\([a-z_]\+\) \(.*\)'/\!\1 \2/;s/^\!\!/\!/;" $out
   '';
-  lovelaceConfig = if (cfg.lovelaceConfig == null) then {}
-    else (lib.recursiveUpdate customLovelaceModulesResources cfg.lovelaceConfig);
-  lovelaceConfigFile = format.generate "ui-lovelace.yaml" lovelaceConfig;
+  lovelaceConfigFile = format.generate "ui-lovelace.yaml" cfg.lovelaceConfig;
 
   # Components advertised by the home-assistant package
   availableComponents = cfg.package.availableComponents;
@@ -77,7 +75,7 @@ let
   # Create parts of the lovelace config that reference lovelave modules as resources
   customLovelaceModulesResources = {
     lovelace.resources = map (card: {
-      url = "/local/nixos-lovelace-modules/${card.entrypoint or card.pname}.js?${card.version}";
+      url = "/local/nixos-lovelace-modules/${card.entrypoint or (card.pname + ".js")}?${card.version}";
       type = "module";
     }) cfg.customLovelaceModules;
   };
@@ -159,7 +157,7 @@ in {
       default = [];
       example = literalExpression ''
         with pkgs.home-assistant-custom-components; [
-          prometheus-sensor
+          prometheus_sensor
         ];
       '';
       description = lib.mdDoc ''
@@ -470,8 +468,8 @@ in {
           mkdir -p "${cfg.configDir}/custom_components"
 
           # remove components symlinked in from below the /nix/store
-          components="$(find "${cfg.configDir}/custom_components" -maxdepth 1 -type l)"
-          for component in "$components"; do
+          readarray -d "" components < <(find "${cfg.configDir}/custom_components" -maxdepth 1 -type l -print0)
+          for component in "''${components[@]}"; do
             if [[ "$(readlink "$component")" =~ ^${escapeShellArg builtins.storeDir} ]]; then
               rm "$component"
             fi
@@ -525,7 +523,6 @@ in {
           "bluetooth_tracker"
           "bthome"
           "default_config"
-          "eq3btsmart"
           "eufylife_ble"
           "esphome"
           "fjaraskupan"
diff --git a/nixos/modules/services/logging/logcheck.nix b/nixos/modules/services/logging/logcheck.nix
index 8a277cea6e461..5d87fc87d4161 100644
--- a/nixos/modules/services/logging/logcheck.nix
+++ b/nixos/modules/services/logging/logcheck.nix
@@ -220,10 +220,16 @@ in
       logcheck = {};
     };
 
-    system.activationScripts.logcheck = ''
-      mkdir -m 700 -p /var/{lib,lock}/logcheck
-      chown ${cfg.user} /var/{lib,lock}/logcheck
-    '';
+    systemd.tmpfiles.settings.logcheck = {
+      "/var/lib/logcheck".d = {
+        mode = "700";
+        inherit (cfg) user;
+      };
+      "/var/lock/logcheck".d = {
+        mode = "700";
+        inherit (cfg) user;
+      };
+    };
 
     services.cron.systemCronJobs =
         let withTime = name: {timeArgs, ...}: timeArgs != null;
diff --git a/nixos/modules/services/logging/vector.nix b/nixos/modules/services/logging/vector.nix
index 48f9eeb4ce8f0..9ccf8a4fa0610 100644
--- a/nixos/modules/services/logging/vector.nix
+++ b/nixos/modules/services/logging/vector.nix
@@ -51,13 +51,17 @@ in
         {
           ExecStart = "${getExe cfg.package} --config ${validateConfig conf}";
           DynamicUser = true;
-          Restart = "no";
+          Restart = "always";
           StateDirectory = "vector";
           ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
           AmbientCapabilities = "CAP_NET_BIND_SERVICE";
           # This group is required for accessing journald.
           SupplementaryGroups = mkIf cfg.journaldAccess "systemd-journal";
         };
+      unitConfig = {
+        StartLimitIntervalSec = 10;
+        StartLimitBurst = 5;
+      };
     };
   };
 }
diff --git a/nixos/modules/services/mail/postfix.nix b/nixos/modules/services/mail/postfix.nix
index 23c47aaca7e23..209e066a19ef8 100644
--- a/nixos/modules/services/mail/postfix.nix
+++ b/nixos/modules/services/mail/postfix.nix
@@ -747,7 +747,7 @@ in
 
             ${concatStringsSep "\n" (mapAttrsToList (to: from: ''
               ln -sf ${from} /var/lib/postfix/conf/${to}
-              ${pkgs.postfix}/bin/postalias /var/lib/postfix/conf/${to}
+              ${pkgs.postfix}/bin/postalias -o -p /var/lib/postfix/conf/${to}
             '') cfg.aliasFiles)}
             ${concatStringsSep "\n" (mapAttrsToList (to: from: ''
               ln -sf ${from} /var/lib/postfix/conf/${to}
@@ -779,6 +779,19 @@ in
             ExecStart = "${pkgs.postfix}/bin/postfix start";
             ExecStop = "${pkgs.postfix}/bin/postfix stop";
             ExecReload = "${pkgs.postfix}/bin/postfix reload";
+
+            # Hardening
+            PrivateTmp = true;
+            PrivateDevices = true;
+            ProtectSystem = "full";
+            CapabilityBoundingSet = [ "~CAP_NET_ADMIN CAP_SYS_ADMIN CAP_SYS_BOOT CAP_SYS_MODULE" ];
+            MemoryDenyWriteExecute = true;
+            ProtectKernelModules = true;
+            ProtectKernelTunables = true;
+            ProtectControlGroups = true;
+            RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_NETLINK" "AF_UNIX" ];
+            RestrictNamespaces = true;
+            RestrictRealtime = true;
           };
         };
 
diff --git a/nixos/modules/services/mail/rspamd-trainer.nix b/nixos/modules/services/mail/rspamd-trainer.nix
new file mode 100644
index 0000000000000..bb78ddf9dd471
--- /dev/null
+++ b/nixos/modules/services/mail/rspamd-trainer.nix
@@ -0,0 +1,76 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.rspamd-trainer;
+  format = pkgs.formats.toml { };
+
+in {
+  options.services.rspamd-trainer = {
+
+    enable = mkEnableOption (mdDoc "Spam/ham trainer for rspamd");
+
+    settings = mkOption {
+      default = { };
+      description = mdDoc ''
+        IMAP authentication configuration for rspamd-trainer. For supplying
+        the IMAP password, use the `secrets` option.
+      '';
+      type = types.submodule {
+        freeformType = format.type;
+      };
+      example = literalExpression ''
+        {
+          HOST = "localhost";
+          USERNAME = "spam@example.com";
+          INBOXPREFIX = "INBOX/";
+        }
+      '';
+    };
+
+    secrets = lib.mkOption {
+      type = with types; listOf path;
+      description = lib.mdDoc ''
+        A list of files containing the various secrets. Should be in the
+        format expected by systemd's `EnvironmentFile` directory. For the
+        IMAP account password use `PASSWORD = mypassword`.
+      '';
+      default = [ ];
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd = {
+      services.rspamd-trainer = {
+        description = "Spam/ham trainer for rspamd";
+        serviceConfig = {
+          ExecStart = "${pkgs.rspamd-trainer}/bin/rspamd-trainer";
+          WorkingDirectory = "/var/lib/rspamd-trainer";
+          StateDirectory = [ "rspamd-trainer/log" ];
+          Type = "oneshot";
+          DynamicUser = true;
+          EnvironmentFile = [
+            ( format.generate "rspamd-trainer-env" cfg.settings )
+            cfg.secrets
+          ];
+        };
+      };
+      timers."rspamd-trainer" = {
+        wantedBy = [ "timers.target" ];
+        timerConfig = {
+          OnBootSec = "10m";
+          OnUnitActiveSec = "10m";
+          Unit = "rspamd-trainer.service";
+        };
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ onny ];
+
+}
diff --git a/nixos/modules/services/matrix/matrix-sliding-sync.nix b/nixos/modules/services/matrix/matrix-sliding-sync.nix
index 295be0c6bf167..8b22cd7dba802 100644
--- a/nixos/modules/services/matrix/matrix-sliding-sync.nix
+++ b/nixos/modules/services/matrix/matrix-sliding-sync.nix
@@ -1,10 +1,14 @@
 { config, lib, pkgs, ... }:
 
 let
-  cfg = config.services.matrix-synapse.sliding-sync;
+  cfg = config.services.matrix-sliding-sync;
 in
 {
-  options.services.matrix-synapse.sliding-sync = {
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "matrix-synapse" "sliding-sync" ] [ "services" "matrix-sliding-sync" ])
+  ];
+
+  options.services.matrix-sliding-sync = {
     enable = lib.mkEnableOption (lib.mdDoc "sliding sync");
 
     package = lib.mkPackageOption pkgs "matrix-sliding-sync" { };
@@ -83,6 +87,7 @@ in
     systemd.services.matrix-sliding-sync = rec {
       after =
         lib.optional cfg.createDatabase "postgresql.service"
+        ++ lib.optional config.services.dendrite.enable "dendrite.service"
         ++ lib.optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit;
       wants = after;
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/matrix/maubot.nix b/nixos/modules/services/matrix/maubot.nix
index 7d392c22983b4..bc96ca03b1fc7 100644
--- a/nixos/modules/services/matrix/maubot.nix
+++ b/nixos/modules/services/matrix/maubot.nix
@@ -42,7 +42,7 @@ let
       database = lib.last (lib.splitString "/" noSchema);
     };
 
-  postgresDBs = [
+  postgresDBs = builtins.filter isPostgresql [
     cfg.settings.database
     cfg.settings.crypto_database
     cfg.settings.plugin_databases.postgres
diff --git a/nixos/modules/services/matrix/synapse.md b/nixos/modules/services/matrix/synapse.md
index 58be24204fcfe..f270be8c8d781 100644
--- a/nixos/modules/services/matrix/synapse.md
+++ b/nixos/modules/services/matrix/synapse.md
@@ -16,13 +16,13 @@ around Matrix.
 
 ## Synapse Homeserver {#module-services-matrix-synapse}
 
-[Synapse](https://github.com/matrix-org/synapse) is
+[Synapse](https://github.com/element-hq/synapse) is
 the reference homeserver implementation of Matrix from the core development
 team at matrix.org. The following configuration example will set up a
 synapse server for the `example.org` domain, served from
 the host `myhostname.example.org`. For more information,
 please refer to the
-[installation instructions of Synapse](https://matrix-org.github.io/synapse/latest/setup/installation.html) .
+[installation instructions of Synapse](https://element-hq.github.io/synapse/latest/setup/installation.html) .
 ```
 { pkgs, lib, config, ... }:
 let
@@ -70,7 +70,7 @@ in {
         # the domain (i.e. example.org from @foo:example.org) and the federation port
         # is 8448.
         # Further reference can be found in the docs about delegation under
-        # https://matrix-org.github.io/synapse/latest/delegate.html
+        # https://element-hq.github.io/synapse/latest/delegate.html
         locations."= /.well-known/matrix/server".extraConfig = mkWellKnown serverConfig;
         # This is usually needed for homeserver discovery (from e.g. other Matrix clients).
         # Further reference can be found in the upstream docs at
@@ -169,7 +169,7 @@ in an additional file like this:
 ::: {.note}
 It's also possible to user alternative authentication mechanism such as
 [LDAP (via `matrix-synapse-ldap3`)](https://github.com/matrix-org/matrix-synapse-ldap3)
-or [OpenID](https://matrix-org.github.io/synapse/latest/openid.html).
+or [OpenID](https://element-hq.github.io/synapse/latest/openid.html).
 :::
 
 ## Element (formerly known as Riot) Web Client {#module-services-matrix-element-web}
diff --git a/nixos/modules/services/matrix/synapse.nix b/nixos/modules/services/matrix/synapse.nix
index 9cc769c2d0db7..50019d2a25cb5 100644
--- a/nixos/modules/services/matrix/synapse.nix
+++ b/nixos/modules/services/matrix/synapse.nix
@@ -446,7 +446,7 @@ in {
         default = { };
         description = mdDoc ''
           The primary synapse configuration. See the
-          [sample configuration](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_config.yaml)
+          [sample configuration](https://github.com/element-hq/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_config.yaml)
           for possible values.
 
           Secrets should be passed in by using the `extraConfigFiles` option.
@@ -749,7 +749,7 @@ in {
                     by the module, but in practice it broke on runtime and as a result, no URL
                     preview worked anywhere if this was set.
 
-                    See https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#url_preview_url_blacklist
+                    See https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#url_preview_url_blacklist
                     on how to configure it properly.
                   ''))
                   (types.attrsOf types.str));
@@ -873,7 +873,7 @@ in {
                 Redis configuration for synapse.
 
                 See the
-                [upstream documentation](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/usage/configuration/config_documentation.md#redis)
+                [upstream documentation](https://github.com/element-hq/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/usage/configuration/config_documentation.md#redis)
                 for available options.
               '';
             };
@@ -886,7 +886,7 @@ in {
         description = lib.mdDoc ''
           Options for configuring workers. Worker support will be enabled if at least one worker is configured here.
 
-          See the [worker documention](https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration)
+          See the [worker documention](https://element-hq.github.io/synapse/latest/workers.html#worker-configuration)
           for possible options for each worker. Worker-specific options overriding the shared homeserver configuration can be
           specified here for each worker.
 
@@ -900,9 +900,9 @@ in {
             using [`services.matrix-synapse.configureRedisLocally`](#opt-services.matrix-synapse.configureRedisLocally).
 
             Workers also require a proper reverse proxy setup to direct incoming requests to the appropriate process. See
-            the [reverse proxy documentation](https://matrix-org.github.io/synapse/latest/reverse_proxy.html) for a
+            the [reverse proxy documentation](https://element-hq.github.io/synapse/latest/reverse_proxy.html) for a
             general reverse proxying setup and
-            the [worker documentation](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications)
+            the [worker documentation](https://element-hq.github.io/synapse/latest/workers.html#available-worker-applications)
             for the available endpoints per worker application.
           :::
         '';
@@ -932,7 +932,7 @@ in {
                 The file for log configuration.
 
                 See the [python documentation](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema)
-                for the schema and the [upstream repository](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_log_config.yaml)
+                for the schema and the [upstream repository](https://github.com/element-hq/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_log_config.yaml)
                 for an example.
               '';
             };
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index f4305bea2ad76..d0135b2ba7acd 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -234,6 +234,13 @@ in
         description = lib.mdDoc "Path to the git repositories.";
       };
 
+      camoHmacKeyFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/var/lib/secrets/gitea/camoHmacKey";
+        description = lib.mdDoc "Path to a file containing the camo HMAC key.";
+      };
+
       mailerPasswordFile = mkOption {
         type = types.nullOr types.str;
         default = null;
@@ -429,6 +436,10 @@ in
         LFS_JWT_SECRET = "#lfsjwtsecret#";
       };
 
+      camo = mkIf (cfg.camoHmacKeyFile != null) {
+        HMAC_KEY = "#hmackey#";
+      };
+
       session = {
         COOKIE_NAME = lib.mkDefault "session";
       };
@@ -570,6 +581,10 @@ in
               ${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
             ''}
 
+            ${lib.optionalString (cfg.camoHmacKeyFile != null) ''
+              ${replaceSecretBin} '#hmackey#' '${cfg.camoHmacKeyFile}' '${runConfig}'
+            ''}
+
             ${lib.optionalString (cfg.mailerPasswordFile != null) ''
               ${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
             ''}
diff --git a/nixos/modules/services/misc/guix/default.nix b/nixos/modules/services/misc/guix/default.nix
index 00e84dc745541..7174ff36b7090 100644
--- a/nixos/modules/services/misc/guix/default.nix
+++ b/nixos/modules/services/misc/guix/default.nix
@@ -22,11 +22,19 @@ let
       })
       (builtins.genList guixBuildUser numberOfUsers));
 
-  # A set of Guix user profiles to be linked at activation.
+  # A set of Guix user profiles to be linked at activation. All of these should
+  # be default profiles managed by Guix CLI and the profiles are located in
+  # `${cfg.stateDir}/profiles/per-user/$USER/$PROFILE`.
   guixUserProfiles = {
-    # The current Guix profile that is created through `guix pull`.
+    # The default Guix profile managed by `guix pull`. Take note this should be
+    # the profile with the most precedence in `PATH` env to let users use their
+    # updated versions of `guix` CLI.
     "current-guix" = "\${XDG_CONFIG_HOME}/guix/current";
 
+    # The default Guix home profile. This profile contains more than exports
+    # such as an activation script at `$GUIX_HOME_PROFILE/activate`.
+    "guix-home" = "$HOME/.guix-home/profile";
+
     # The default Guix profile similar to $HOME/.nix-profile from Nix.
     "guix-profile" = "$HOME/.guix-profile";
   };
@@ -228,14 +236,8 @@ in
         description = "Guix daemon socket";
         before = [ "multi-user.target" ];
         listenStreams = [ "${cfg.stateDir}/guix/daemon-socket/socket" ];
-        unitConfig = {
-          RequiresMountsFor = [
-            cfg.storeDir
-            cfg.stateDir
-          ];
-          ConditionPathIsReadWrite = "${cfg.stateDir}/guix/daemon-socket";
-        };
-        wantedBy = [ "socket.target" ];
+        unitConfig.RequiresMountsFor = [ cfg.storeDir cfg.stateDir ];
+        wantedBy = [ "sockets.target" ];
       };
 
       systemd.mounts = [{
@@ -262,20 +264,31 @@ in
       # ephemeral setups where only certain part of the filesystem is
       # persistent (e.g., "Erase my darlings"-type of setup).
       system.userActivationScripts.guix-activate-user-profiles.text = let
+        guixProfile = profile: "${cfg.stateDir}/guix/profiles/per-user/\${USER}/${profile}";
+        linkProfile = profile: location: let
+          userProfile = guixProfile profile;
+        in ''
+          [ -d "${userProfile}" ] && ln -sfn "${userProfile}" "${location}"
+        '';
         linkProfileToPath = acc: profile: location: let
-          guixProfile = "${cfg.stateDir}/guix/profiles/per-user/\${USER}/${profile}";
-          in acc + ''
-            [ -d "${guixProfile}" ] && ln -sf "${guixProfile}" "${location}"
-          '';
+          in acc + (linkProfile profile location);
+
+        # This should contain export-only Guix user profiles. The rest of it is
+        # handled manually in the activation script.
+        guixUserProfiles' = lib.attrsets.removeAttrs guixUserProfiles [ "guix-home" ];
 
-        activationScript = lib.foldlAttrs linkProfileToPath "" guixUserProfiles;
+        linkExportsScript = lib.foldlAttrs linkProfileToPath "" guixUserProfiles';
       in ''
         # Don't export this please! It is only expected to be used for this
         # activation script and nothing else.
         XDG_CONFIG_HOME=''${XDG_CONFIG_HOME:-$HOME/.config}
 
         # Linking the usual Guix profiles into the home directory.
-        ${activationScript}
+        ${linkExportsScript}
+
+        # Activate all of the default Guix non-exports profiles manually.
+        ${linkProfile "guix-home" "$HOME/.guix-home"}
+        [ -L "$HOME/.guix-home" ] && "$HOME/.guix-home/activate"
       '';
 
       # GUIX_LOCPATH is basically LOCPATH but for Guix libc which in turn used by
@@ -373,7 +386,6 @@ in
         serviceConfig = {
           Type = "oneshot";
 
-          MemoryDenyWriteExecute = true;
           PrivateDevices = true;
           PrivateNetworks = true;
           ProtectControlGroups = true;
diff --git a/nixos/modules/services/misc/llama-cpp.nix b/nixos/modules/services/misc/llama-cpp.nix
new file mode 100644
index 0000000000000..4d76456fb2fd5
--- /dev/null
+++ b/nixos/modules/services/misc/llama-cpp.nix
@@ -0,0 +1,111 @@
+{ config, lib, pkgs, utils, ... }:
+
+let
+  cfg = config.services.llama-cpp;
+in {
+
+  options = {
+
+    services.llama-cpp = {
+      enable = lib.mkEnableOption "LLaMA C++ server";
+
+      package = lib.mkPackageOption pkgs "llama-cpp" { };
+
+      model = lib.mkOption {
+        type = lib.types.path;
+        example = "/models/mistral-instruct-7b/ggml-model-q4_0.gguf";
+        description = "Model path.";
+      };
+
+      extraFlags = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        description = "Extra flags passed to llama-cpp-server.";
+        example = ["-c" "4096" "-ngl" "32" "--numa"];
+        default = [];
+      };
+
+      host = lib.mkOption {
+        type = lib.types.str;
+        default = "127.0.0.1";
+        example = "0.0.0.0";
+        description = "IP address the LLaMA C++ server listens on.";
+      };
+
+      port = lib.mkOption {
+        type = lib.types.port;
+        default = 8080;
+        description = "Listen port for LLaMA C++ server.";
+      };
+
+      openFirewall = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = "Open ports in the firewall for LLaMA C++ server.";
+      };
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    systemd.services.llama-cpp = {
+      description = "LLaMA C++ server";
+      after = ["network.target"];
+      wantedBy = ["multi-user.target"];
+
+      serviceConfig = {
+        Type = "idle";
+        KillSignal = "SIGINT";
+        ExecStart = "${cfg.package}/bin/llama-cpp-server --log-disable --host ${cfg.host} --port ${builtins.toString cfg.port} -m ${cfg.model} ${utils.escapeSystemdExecArgs cfg.extraFlags}";
+        Restart = "on-failure";
+        RestartSec = 300;
+
+        # for GPU acceleration
+        PrivateDevices = false;
+
+        # hardening
+        DynamicUser = true;
+        CapabilityBoundingSet = "";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_UNIX"
+        ];
+        NoNewPrivileges = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        MemoryDenyWriteExecute = true;
+        LockPersonality = true;
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+          "~@resources"
+        ];
+        SystemCallErrorNumber = "EPERM";
+        ProtectProc = "invisible";
+        ProtectHostname = true;
+        ProcSubset = "pid";
+      };
+    };
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ newam ];
+}
diff --git a/nixos/modules/services/misc/moonraker.nix b/nixos/modules/services/misc/moonraker.nix
index 0ee7e898cf761..750dca9d03736 100644
--- a/nixos/modules/services/misc/moonraker.nix
+++ b/nixos/modules/services/misc/moonraker.nix
@@ -186,6 +186,12 @@ in {
       };
     };
 
+    # set this to false, otherwise we'll get a warning indicating that `/etc/klipper.cfg`
+    # is not located in the moonraker config directory.
+    services.moonraker.settings = lib.mkIf (!config.services.klipper.mutableConfig) {
+      file_manager.check_klipper_config_path = false;
+    };
+
     security.polkit.extraConfig = lib.optionalString cfg.allowSystemControl ''
       // nixos/moonraker: Allow Moonraker to perform system-level operations
       //
diff --git a/nixos/modules/services/misc/nitter.nix b/nixos/modules/services/misc/nitter.nix
index c2c462d46bb5b..d2cf7c0de2b77 100644
--- a/nixos/modules/services/misc/nitter.nix
+++ b/nixos/modules/services/misc/nitter.nix
@@ -304,6 +304,23 @@ in
         '';
       };
 
+      guestAccounts = mkOption {
+        type = types.path;
+        default = "/var/lib/nitter/guest_accounts.jsonl";
+        description = lib.mdDoc ''
+          Path to the guest accounts file.
+
+          This file contains a list of guest accounts that can be used to
+          access the instance without logging in. The file is in JSONL format,
+          where each line is a JSON object with the following fields:
+
+          {"oauth_token":"some_token","oauth_token_secret":"some_secret_key"}
+
+          See https://github.com/zedeus/nitter/wiki/Guest-Account-Branch-Deployment
+          for more information on guest accounts and how to generate them.
+        '';
+      };
+
       redisCreateLocally = mkOption {
         type = types.bool;
         default = true;
@@ -333,8 +350,12 @@ in
         after = [ "network-online.target" ];
         serviceConfig = {
           DynamicUser = true;
+          LoadCredential="guestAccountsFile:${cfg.guestAccounts}";
           StateDirectory = "nitter";
-          Environment = [ "NITTER_CONF_FILE=/var/lib/nitter/nitter.conf" ];
+          Environment = [
+            "NITTER_CONF_FILE=/var/lib/nitter/nitter.conf"
+            "NITTER_ACCOUNTS_FILE=%d/guestAccountsFile"
+          ];
           # Some parts of Nitter expect `public` folder in working directory,
           # see https://github.com/zedeus/nitter/issues/414
           WorkingDirectory = "${cfg.package}/share/nitter";
diff --git a/nixos/modules/services/misc/ollama.nix b/nixos/modules/services/misc/ollama.nix
new file mode 100644
index 0000000000000..9794bbbec464c
--- /dev/null
+++ b/nixos/modules/services/misc/ollama.nix
@@ -0,0 +1,42 @@
+{ config, lib, pkgs, ... }: let
+
+  cfg = config.services.ollama;
+
+in {
+
+  options = {
+    services.ollama = {
+      enable = lib.mkEnableOption (
+        lib.mdDoc "Server for local large language models"
+      );
+      package = lib.mkPackageOption pkgs "ollama" { };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    systemd = {
+      services.ollama = {
+        wantedBy = [ "multi-user.target" ];
+        description = "Server for local large language models";
+        after = [ "network.target" ];
+        environment = {
+          HOME = "%S/ollama";
+          OLLAMA_MODELS = "%S/ollama/models";
+        };
+        serviceConfig = {
+          ExecStart = "${lib.getExe cfg.package} serve";
+          WorkingDirectory = "/var/lib/ollama";
+          StateDirectory = [ "ollama" ];
+          DynamicUser = true;
+        };
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ onny ];
+
+}
diff --git a/nixos/modules/services/misc/paperless.nix b/nixos/modules/services/misc/paperless.nix
index b3bc7d89009db..3c6832958f59a 100644
--- a/nixos/modules/services/misc/paperless.nix
+++ b/nixos/modules/services/misc/paperless.nix
@@ -10,7 +10,7 @@ let
   defaultFont = "${pkgs.liberation_ttf}/share/fonts/truetype/LiberationSerif-Regular.ttf";
 
   # Don't start a redis instance if the user sets a custom redis connection
-  enableRedis = !hasAttr "PAPERLESS_REDIS" cfg.extraConfig;
+  enableRedis = !(cfg.settings ? PAPERLESS_REDIS);
   redisServer = config.services.redis.servers.paperless;
 
   env = {
@@ -24,9 +24,11 @@ let
     PAPERLESS_TIME_ZONE = config.time.timeZone;
   } // optionalAttrs enableRedis {
     PAPERLESS_REDIS = "unix://${redisServer.unixSocket}";
-  } // (
-    lib.mapAttrs (_: toString) cfg.extraConfig
-  );
+  } // (lib.mapAttrs (_: s:
+    if (lib.isAttrs s || lib.isList s) then builtins.toJSON s
+    else if lib.isBool s then lib.boolToString s
+    else toString s
+  ) cfg.settings);
 
   manage = pkgs.writeShellScript "manage" ''
     set -o allexport # Export the following env vars
@@ -82,6 +84,7 @@ in
 
   imports = [
     (mkRenamedOptionModule [ "services" "paperless-ng" ] [ "services" "paperless" ])
+    (mkRenamedOptionModule [ "services" "paperless" "extraConfig" ] [ "services" "paperless" "settings" ])
   ];
 
   options.services.paperless = {
@@ -160,32 +163,30 @@ in
       description = lib.mdDoc "Web interface port.";
     };
 
-    # FIXME this should become an RFC42-style settings attr
-    extraConfig = mkOption {
-      type = types.attrs;
+    settings = mkOption {
+      type = lib.types.submodule {
+        freeformType = with lib.types; attrsOf (let
+          typeList = [ bool float int str path package ];
+        in oneOf (typeList ++ [ (listOf (oneOf typeList)) (attrsOf (oneOf typeList)) ]));
+      };
       default = { };
       description = lib.mdDoc ''
         Extra paperless config options.
 
-        See [the documentation](https://docs.paperless-ngx.com/configuration/)
-        for available options.
+        See [the documentation](https://docs.paperless-ngx.com/configuration/) for available options.
 
-        Note that some options such as `PAPERLESS_CONSUMER_IGNORE_PATTERN` expect JSON values. Use `builtins.toJSON` to ensure proper quoting.
+        Note that some settings such as `PAPERLESS_CONSUMER_IGNORE_PATTERN` expect JSON values.
+        Settings declared as lists or attrsets will automatically be serialised into JSON strings for your convenience.
       '';
-      example = literalExpression ''
-        {
-          PAPERLESS_OCR_LANGUAGE = "deu+eng";
-
-          PAPERLESS_DBHOST = "/run/postgresql";
-
-          PAPERLESS_CONSUMER_IGNORE_PATTERN = builtins.toJSON [ ".DS_STORE/*" "desktop.ini" ];
-
-          PAPERLESS_OCR_USER_ARGS = builtins.toJSON {
-            optimize = 1;
-            pdfa_image_compression = "lossless";
-          };
+      example = {
+        PAPERLESS_OCR_LANGUAGE = "deu+eng";
+        PAPERLESS_DBHOST = "/run/postgresql";
+        PAPERLESS_CONSUMER_IGNORE_PATTERN = [ ".DS_STORE/*" "desktop.ini" ];
+        PAPERLESS_OCR_USER_ARGS = {
+          optimize = 1;
+          pdfa_image_compression = "lossless";
         };
-      '';
+      };
     };
 
     user = mkOption {
diff --git a/nixos/modules/services/misc/portunus.nix b/nixos/modules/services/misc/portunus.nix
index 3299b6404c2b5..7036a372d1ea8 100644
--- a/nixos/modules/services/misc/portunus.nix
+++ b/nixos/modules/services/misc/portunus.nix
@@ -102,7 +102,9 @@ in
     ldap = {
       package = mkOption {
         type = types.package;
-        # needs openldap built with a libxcrypt that support crypt sha256 until https://github.com/majewsky/portunus/issues/2 is solved
+        # needs openldap built with a libxcrypt that support crypt sha256 until users have had time to migrate to newer hashes
+        # Ref: <https://github.com/majewsky/portunus/issues/2>
+        # TODO: remove in NixOS 24.11 (cf. same note on pkgs/servers/portunus/default.nix)
         default = pkgs.openldap.override { libxcrypt = pkgs.libxcrypt-legacy; };
         defaultText = lib.literalExpression "pkgs.openldap.override { libxcrypt = pkgs.libxcrypt-legacy; }";
         description = lib.mdDoc "The OpenLDAP package to use.";
@@ -247,6 +249,7 @@ in
             acmeDirectory = config.security.acme.certs."${cfg.domain}".directory;
           in
           {
+            PORTUNUS_SERVER_HTTP_SECURE = "true";
             PORTUNUS_SLAPD_TLS_CA_CERTIFICATE = "/etc/ssl/certs/ca-certificates.crt";
             PORTUNUS_SLAPD_TLS_CERTIFICATE = "${acmeDirectory}/cert.pem";
             PORTUNUS_SLAPD_TLS_DOMAIN_NAME = cfg.domain;
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
index b517170cda216..c1209e34a92b5 100644
--- a/nixos/modules/services/misc/redmine.nix
+++ b/nixos/modules/services/misc/redmine.nix
@@ -53,7 +53,7 @@ in
       enable = mkEnableOption (lib.mdDoc "Redmine");
 
       package = mkPackageOption pkgs "redmine" {
-        example = "redmine.override { ruby = pkgs.ruby_2_7; }";
+        example = "redmine.override { ruby = pkgs.ruby_3_2; }";
       };
 
       user = mkOption {
diff --git a/nixos/modules/services/misc/tandoor-recipes.nix b/nixos/modules/services/misc/tandoor-recipes.nix
index 2d7d29b2e7172..6c51a9bb85550 100644
--- a/nixos/modules/services/misc/tandoor-recipes.nix
+++ b/nixos/modules/services/misc/tandoor-recipes.nix
@@ -12,7 +12,7 @@ let
     DEBUG_TOOLBAR = "0";
     MEDIA_ROOT = "/var/lib/tandoor-recipes";
   } // optionalAttrs (config.time.timeZone != null) {
-    TIMEZONE = config.time.timeZone;
+    TZ = config.time.timeZone;
   } // (
     lib.mapAttrs (_: toString) cfg.extraConfig
   );
diff --git a/nixos/modules/services/misc/tuxclocker.nix b/nixos/modules/services/misc/tuxclocker.nix
new file mode 100644
index 0000000000000..5969f75b8e30d
--- /dev/null
+++ b/nixos/modules/services/misc/tuxclocker.nix
@@ -0,0 +1,71 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.tuxclocker;
+in
+{
+  options.programs.tuxclocker = {
+    enable = mkEnableOption (lib.mdDoc ''
+      TuxClocker, a hardware control and monitoring program
+    '');
+
+    enableAMD = mkEnableOption (lib.mdDoc ''
+      AMD GPU controls.
+      Sets the `amdgpu.ppfeaturemask` kernel parameter to 0xfffd7fff to enable all TuxClocker controls
+    '');
+
+    enabledNVIDIADevices = mkOption {
+      type = types.listOf types.int;
+      default = [ ];
+      example = [ 0 1 ];
+      description = lib.mdDoc ''
+        Enable NVIDIA GPU controls for a device by index.
+        Sets the `Coolbits` Xorg option to enable all TuxClocker controls.
+      '';
+    };
+
+    useUnfree = mkOption {
+      type = types.bool;
+      default = false;
+      example = true;
+      description = lib.mdDoc ''
+        Whether to use components requiring unfree dependencies.
+        Disabling this allows you to get everything from the binary cache.
+      '';
+    };
+  };
+
+  config = let
+      package = if cfg.useUnfree then pkgs.tuxclocker else pkgs.tuxclocker-without-unfree;
+    in
+      mkIf cfg.enable {
+        environment.systemPackages = [
+          package
+        ];
+
+        services.dbus.packages = [
+          package
+        ];
+
+        # MSR is used for some features
+        boot.kernelModules = [ "msr" ];
+
+        # https://download.nvidia.com/XFree86/Linux-x86_64/430.14/README/xconfigoptions.html#Coolbits
+        services.xserver.config = let
+          configSection = (i: ''
+            Section "Device"
+              Driver "nvidia"
+              Option "Coolbits" "31"
+              Identifier "Device-nvidia[${toString i}]"
+            EndSection
+          '');
+        in
+          concatStrings (map configSection cfg.enabledNVIDIADevices);
+
+        # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/gpu/drm/amd/include/amd_shared.h#n207
+        # Enable everything modifiable in TuxClocker
+        boot.kernelParams = mkIf cfg.enableAMD [ "amdgpu.ppfeaturemask=0xfffd7fff" ];
+      };
+}
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 62c50490ee99a..5ac010bf81ee8 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -74,7 +74,7 @@ let
     fi
   '';
   provisionConfDir = pkgs.runCommand "grafana-provisioning" { nativeBuildInputs = [ pkgs.xorg.lndir ]; } ''
-    mkdir -p $out/{datasources,dashboards,notifiers,alerting}
+    mkdir -p $out/{alerting,datasources,dashboards,notifiers,plugins}
     ${ln { src = datasourceFileOrDir;    dir = "datasources"; filename = "datasource"; }}
     ${ln { src = dashboardFileOrDir;     dir = "dashboards";  filename = "dashboard"; }}
     ${ln { src = notifierFileOrDir;      dir = "notifiers";   filename = "notifier"; }}
@@ -1831,7 +1831,7 @@ in
         set -o errexit -o pipefail -o nounset -o errtrace
         shopt -s inherit_errexit
 
-        exec ${cfg.package}/bin/grafana-server -homepath ${cfg.dataDir} -config ${configFile}
+        exec ${cfg.package}/bin/grafana server -homepath ${cfg.dataDir} -config ${configFile}
       '';
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
diff --git a/nixos/modules/services/monitoring/netdata.nix b/nixos/modules/services/monitoring/netdata.nix
index 78b12537e27fd..ec6aa56150397 100644
--- a/nixos/modules/services/monitoring/netdata.nix
+++ b/nixos/modules/services/monitoring/netdata.nix
@@ -198,6 +198,7 @@ in {
         }
       ];
 
+    services.netdata.configDir.".opt-out-from-anonymous-statistics" = mkIf (!cfg.enableAnalyticsReporting) (pkgs.writeText ".opt-out-from-anonymous-statistics" "");
     environment.etc."netdata/netdata.conf".source = configFile;
     environment.etc."netdata/conf.d".source = configDirectory;
 
diff --git a/nixos/modules/services/monitoring/prometheus/default.nix b/nixos/modules/services/monitoring/prometheus/default.nix
index 90ea56658b02d..b4ac8e21451af 100644
--- a/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixos/modules/services/monitoring/prometheus/default.nix
@@ -41,12 +41,12 @@ let
   # This becomes the main config file for Prometheus
   promConfig = {
     global = filterValidPrometheus cfg.globalConfig;
-    rule_files = map (promtoolCheck "check rules" "rules") (cfg.ruleFiles ++ [
-      (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
-    ]);
     scrape_configs = filterValidPrometheus cfg.scrapeConfigs;
     remote_write = filterValidPrometheus cfg.remoteWrite;
     remote_read = filterValidPrometheus cfg.remoteRead;
+    rule_files = optionals (!(cfg.enableAgentMode)) (map (promtoolCheck "check rules" "rules") (cfg.ruleFiles ++ [
+      (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
+    ]));
     alerting = {
       inherit (cfg) alertmanagers;
     };
@@ -62,15 +62,20 @@ let
     promtoolCheck "check config ${lib.optionalString (cfg.checkConfig == "syntax-only") "--syntax-only"}" "prometheus.yml" yml;
 
   cmdlineArgs = cfg.extraFlags ++ [
-    "--storage.tsdb.path=${workingDir}/data/"
     "--config.file=${
       if cfg.enableReload
       then "/etc/prometheus/prometheus.yaml"
       else prometheusYml
     }"
     "--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
-    "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
-  ] ++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
+  ] ++ (
+    if (cfg.enableAgentMode) then [
+      "--enable-feature=agent"
+    ] else [
+       "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity }"
+       "--storage.tsdb.path=${workingDir}/data/"
+    ])
+    ++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
     ++ optional (cfg.retentionTime != null) "--storage.tsdb.retention.time=${cfg.retentionTime}"
     ++ optional (cfg.webConfigFile != null) "--web.config.file=${cfg.webConfigFile}";
 
@@ -1430,6 +1435,10 @@ let
       remote_timeout = mkOpt types.str ''
         Timeout for requests to the remote write endpoint.
       '';
+      headers = mkOpt (types.attrsOf types.str) ''
+        Custom HTTP headers to be sent along with each remote write request.
+        Be aware that headers that are set by Prometheus itself can't be overwritten.
+      '';
       write_relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
         List of remote write relabel configurations.
       '';
@@ -1525,6 +1534,10 @@ let
       remote_timeout = mkOpt types.str ''
         Timeout for requests to the remote read endpoint.
       '';
+      headers = mkOpt (types.attrsOf types.str) ''
+        Custom HTTP headers to be sent along with each remote read request.
+        Be aware that headers that are set by Prometheus itself can't be overwritten.
+      '';
       read_recent = mkOpt types.bool ''
         Whether reads should be made for queries for time ranges that
         the local storage should have complete data for.
@@ -1612,6 +1625,8 @@ in
       '';
     };
 
+    enableAgentMode = mkEnableOption (lib.mdDoc "agent mode");
+
     configText = mkOption {
       type = types.nullOr types.lines;
       default = null;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 39abd293b2d18..35db8a7376b11 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -64,6 +64,7 @@ let
     "pgbouncer"
     "php-fpm"
     "pihole"
+    "ping"
     "postfix"
     "postgres"
     "process"
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/kea.nix b/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
index 8b1cd47d0a409..3abb6ff6bdf8b 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
@@ -15,8 +15,8 @@ in {
       type = types.listOf types.str;
       example = literalExpression ''
         [
-          "/run/kea-dhcp4/kea-dhcp4.socket"
-          "/run/kea-dhcp6/kea-dhcp6.socket"
+          "/run/kea/kea-dhcp4.socket"
+          "/run/kea/kea-dhcp6.socket"
         ]
       '';
       description = lib.mdDoc ''
@@ -31,13 +31,15 @@ in {
     ];
     serviceConfig = {
       User = "kea";
+      DynamicUser = true;
       ExecStart = ''
         ${pkgs.prometheus-kea-exporter}/bin/kea-exporter \
           --address ${cfg.listenAddress} \
           --port ${toString cfg.port} \
           ${concatStringsSep " " cfg.controlSocketPaths}
       '';
-      SupplementaryGroups = [ "kea" ];
+      RuntimeDirectory = "kea";
+      RuntimeDirectoryPreserve = true;
       RestrictAddressFamilies = [
         # Need AF_UNIX to collect data
         "AF_UNIX"
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix b/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
index 3158e71f0468b..88dc79fc2503f 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
@@ -43,14 +43,14 @@ in
     };
   };
   serviceOpts = mkMerge ([{
+    environment.CONST_LABELS = concatStringsSep "," cfg.constLabels;
     serviceConfig = {
       ExecStart = ''
         ${pkgs.prometheus-nginx-exporter}/bin/nginx-prometheus-exporter \
           --nginx.scrape-uri='${cfg.scrapeUri}' \
-          --nginx.ssl-verify=${boolToString cfg.sslVerify} \
+          --${lib.optionalString (!cfg.sslVerify) "no-"}nginx.ssl-verify \
           --web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
           --web.telemetry-path=${cfg.telemetryPath} \
-          --prometheus.const-labels=${concatStringsSep "," cfg.constLabels} \
           ${concatStringsSep " \\\n  " cfg.extraFlags}
       '';
     };
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/ping.nix b/nixos/modules/services/monitoring/prometheus/exporters/ping.nix
new file mode 100644
index 0000000000000..af78b6bef6258
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/ping.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.ping;
+
+  settingsFormat = pkgs.formats.yaml {};
+  configFile = settingsFormat.generate "config.yml" cfg.settings;
+in
+{
+  port = 9427;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      default = {};
+
+      description = lib.mdDoc ''
+        Configuration for ping_exporter, see
+        <https://github.com/czerwonk/ping_exporter>
+        for supported values.
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      # ping-exporter needs `CAP_NET_RAW` to run as non root https://github.com/czerwonk/ping_exporter#running-as-non-root-user
+      CapabilityBoundingSet = [ "CAP_NET_RAW" ];
+      AmbientCapabilities = [ "CAP_NET_RAW" ];
+      ExecStart = ''
+        ${pkgs.prometheus-ping-exporter}/bin/ping_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --config.path="${configFile}" \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix b/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
index 4112774940139..b9ab305f7c082 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
@@ -19,7 +19,11 @@ in
           };
           apiKeyFile = mkOption {
             type = types.str;
-            description = "File containing the API key.";
+            description = ''
+              The path to a file containing the API key.
+              The file is securely passed to the service by leveraging systemd credentials.
+              No special permissions need to be set on this file.
+            '';
             example = "/run/secrets/sabnzbd_apikey";
           };
         };
@@ -30,18 +34,24 @@ in
   serviceOpts =
     let
       servers = lib.zipAttrs cfg.servers;
-      apiKeys = lib.concatStringsSep "," (builtins.map (file: "$(cat ${file})") servers.apiKeyFile);
+      credentials = lib.imap0 (i: v: { name = "apikey-${toString i}"; path = v; }) servers.apiKeyFile;
     in
     {
+      serviceConfig.LoadCredential = builtins.map ({ name, path }: "${name}:${path}") credentials;
+
       environment = {
         METRICS_PORT = toString cfg.port;
         METRICS_ADDR = cfg.listenAddress;
         SABNZBD_BASEURLS = lib.concatStringsSep "," servers.baseUrl;
       };
 
-      script = ''
-        export SABNZBD_APIKEYS="${apiKeys}"
-        exec ${lib.getExe pkgs.prometheus-sabnzbd-exporter}
-      '';
+      script =
+        let
+          apiKeys = lib.concatStringsSep "," (builtins.map (cred: "$(< $CREDENTIALS_DIRECTORY/${cred.name})") credentials);
+        in
+        ''
+          export SABNZBD_APIKEYS="${apiKeys}"
+          exec ${lib.getExe pkgs.prometheus-sabnzbd-exporter}
+        '';
     };
 }
diff --git a/nixos/modules/services/monitoring/thanos.nix b/nixos/modules/services/monitoring/thanos.nix
index 5baa0d8446e54..02502816ef5d7 100644
--- a/nixos/modules/services/monitoring/thanos.nix
+++ b/nixos/modules/services/monitoring/thanos.nix
@@ -394,9 +394,8 @@ let
         Maximum number of queries processed concurrently by query node.
       '';
 
-      query.replica-labels = mkAttrsParam "query.replica-label" ''
+      query.replica-labels = mkListParam "query.replica-label" ''
         Labels to treat as a replica indicator along which data is
-
         deduplicated.
 
         Still you will be able to query without deduplication using
diff --git a/nixos/modules/services/monitoring/ups.nix b/nixos/modules/services/monitoring/ups.nix
index efef2d777acd8..63afb5deb5bd4 100644
--- a/nixos/modules/services/monitoring/ups.nix
+++ b/nixos/modules/services/monitoring/ups.nix
@@ -6,9 +6,83 @@ with lib;
 
 let
   cfg = config.power.ups;
-in
+  defaultPort = 3493;
+
+  nutFormat = {
+
+    type = with lib.types; let
+
+      singleAtom = nullOr (oneOf [
+        bool
+        int
+        float
+        str
+      ]) // {
+        description = "atom (null, bool, int, float or string)";
+      };
+
+      in attrsOf (oneOf [
+        singleAtom
+        (listOf (nonEmptyListOf singleAtom))
+      ]);
+
+    generate = name: value:
+      let
+        normalizedValue =
+          lib.mapAttrs (key: val:
+            if lib.isList val
+            then forEach val (elem: if lib.isList elem then elem else [elem])
+            else
+              if val == null
+              then []
+              else [[val]]
+          ) value;
+
+        mkValueString = concatMapStringsSep " " (v:
+          let str = generators.mkValueStringDefault {} v;
+          in
+            # Quote the value if it has spaces and isn't already quoted.
+            if (hasInfix " " str) && !(hasPrefix "\"" str && hasSuffix "\"" str)
+            then "\"${str}\""
+            else str
+        );
+
+      in pkgs.writeText name (lib.generators.toKeyValue {
+        mkKeyValue = generators.mkKeyValueDefault { inherit mkValueString; } " ";
+        listsAsDuplicateKeys = true;
+      } normalizedValue);
+
+  };
+
+  installSecrets = source: target: secrets:
+    pkgs.writeShellScript "installSecrets.sh" ''
+      install -m0600 -D ${source} "${target}"
+      ${concatLines (forEach secrets (name: ''
+        ${pkgs.replace-secret}/bin/replace-secret \
+          '@${name}@' \
+          "$CREDENTIALS_DIRECTORY/${name}" \
+          "${target}"
+      ''))}
+      chmod u-w "${target}"
+    '';
+
+  upsmonConf = nutFormat.generate "upsmon.conf" cfg.upsmon.settings;
+
+  upsdUsers = pkgs.writeText "upsd.users" (let
+    # This looks like INI, but it's not quite because the
+    # 'upsmon' option lacks a '='. See: man upsd.users
+    userConfig = name: user: concatStringsSep "\n      " (concatLists [
+      [
+        "[${name}]"
+        "password = \"@upsdusers_password_${name}@\""
+      ]
+      (optional (user.upsmon != null) "upsmon ${user.upsmon}")
+      (forEach user.actions (action: "actions = ${action}"))
+      (forEach user.instcmds (instcmd: "instcmds = ${instcmd}"))
+    ]);
+  in concatStringsSep "\n\n" (mapAttrsToList userConfig cfg.users));
+
 
-let
   upsOptions = {name, config, ...}:
   {
     options = {
@@ -95,6 +169,213 @@ let
     };
   };
 
+  listenOptions = {
+    options = {
+      address = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Address of the interface for `upsd` to listen on.
+          See `man upsd.conf` for details.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = defaultPort;
+        description = lib.mdDoc ''
+          TCP port for `upsd` to listen on.
+          See `man upsd.conf` for details.
+        '';
+      };
+    };
+  };
+
+  upsdOptions = {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        defaultText = literalMD "`true` if `mode` is one of `standalone`, `netserver`";
+        description = mdDoc "Whether to enable `upsd`.";
+      };
+
+      listen = mkOption {
+        type = with types; listOf (submodule listenOptions);
+        default = [];
+        example = [
+          {
+            address = "192.168.50.1";
+          }
+          {
+            address = "::1";
+            port = 5923;
+          }
+        ];
+        description = lib.mdDoc ''
+          Address of the interface for `upsd` to listen on.
+          See `man upsd` for details`.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Additional lines to add to `upsd.conf`.
+        '';
+      };
+    };
+
+    config = {
+      enable = mkDefault (elem cfg.mode [ "standalone" "netserver" ]);
+    };
+  };
+
+
+  monitorOptions = { name, config, ... }: {
+    options = {
+      system = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc ''
+          Identifier of the UPS to monitor, in this form: `<upsname>[@<hostname>[:<port>]]`
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      powerValue = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc ''
+          Number of power supplies that the UPS feeds on this system.
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Username from `upsd.users` for accessing this UPS.
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.str;
+        defaultText = literalMD "power.ups.users.\${user}.passwordFile";
+        description = lib.mdDoc ''
+          The full path to a file containing the password from
+          `upsd.users` for accessing this UPS. The password file
+          is read on service start.
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      type = mkOption {
+        type = types.str;
+        default = "master";
+        description = lib.mdDoc ''
+          The relationship with `upsd`.
+          See `upsmon.conf` for details.
+        '';
+      };
+    };
+
+    config = {
+      passwordFile = mkDefault cfg.users.${config.user}.passwordFile;
+    };
+  };
+
+  upsmonOptions = {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        defaultText = literalMD "`true` if `mode` is one of `standalone`, `netserver`, `netclient`";
+        description = mdDoc "Whether to enable `upsmon`.";
+      };
+
+      monitor = mkOption {
+        type = with types; attrsOf (submodule monitorOptions);
+        default = {};
+        description = lib.mdDoc ''
+          Set of UPS to monitor. See `man upsmon.conf` for details.
+        '';
+      };
+
+      settings = mkOption {
+        type = nutFormat.type;
+        default = {};
+        defaultText = literalMD ''
+          {
+            MINSUPPLIES = 1;
+            RUN_AS_USER = "root";
+            NOTIFYCMD = "''${pkgs.nut}/bin/upssched";
+            SHUTDOWNCMD = "''${pkgs.systemd}/bin/shutdown now";
+          }
+        '';
+        description = mdDoc "Additional settings to add to `upsmon.conf`.";
+        example = literalMD ''
+          {
+            MINSUPPLIES = 2;
+            NOTIFYFLAG = [
+              [ "ONLINE" "SYSLOG+EXEC" ]
+              [ "ONBATT" "SYSLOG+EXEC" ]
+            ];
+          }
+        '';
+      };
+    };
+
+    config = {
+      enable = mkDefault (elem cfg.mode [ "standalone" "netserver" "netclient" ]);
+      settings = {
+        RUN_AS_USER = "root"; # TODO: replace 'root' by another username.
+        MINSUPPLIES = mkDefault 1;
+        NOTIFYCMD = mkDefault "${pkgs.nut}/bin/upssched";
+        SHUTDOWNCMD = mkDefault "${pkgs.systemd}/bin/shutdown now";
+        MONITOR = flip mapAttrsToList cfg.upsmon.monitor (name: monitor: with monitor; [ system powerValue user "\"@upsmon_password_${name}@\"" type ]);
+      };
+    };
+  };
+
+  userOptions = {
+    options = {
+      passwordFile = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The full path to a file that contains the user's (clear text)
+          password. The password file is read on service start.
+        '';
+      };
+
+      actions = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Allow the user to do certain things with upsd.
+          See `man upsd.users` for details.
+        '';
+      };
+
+      instcmds = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Let the user initiate specific instant commands. Use "ALL" to grant all commands automatically. For the full list of what your UPS supports, use "upscmd -l".
+          See `man upsd.users` for details.
+        '';
+      };
+
+      upsmon = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Add the necessary actions for a upsmon process to work.
+          See `man upsd.users` for details.
+        '';
+      };
+    };
+  };
+
 in
 
 
@@ -103,19 +384,14 @@ in
     # powerManagement.powerDownCommands
 
     power.ups = {
-      enable = mkOption {
-        default = false;
-        type = with types; bool;
-        description = lib.mdDoc ''
-          Enables support for Power Devices, such as Uninterruptible Power
-          Supplies, Power Distribution Units and Solar Controllers.
-        '';
-      };
+      enable = mkEnableOption (lib.mdDoc ''
+        Enables support for Power Devices, such as Uninterruptible Power
+        Supplies, Power Distribution Units and Solar Controllers.
+      '');
 
-      # This option is not used yet.
       mode = mkOption {
         default = "standalone";
-        type = types.str;
+        type = types.enum [ "none" "standalone" "netserver" "netclient" ];
         description = lib.mdDoc ''
           The MODE determines which part of the NUT is to be started, and
           which configuration files must be modified.
@@ -148,6 +424,13 @@ in
         '';
       };
 
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for `upsd`.
+        '';
+      };
 
       maxStartDelay = mkOption {
         default = 45;
@@ -161,6 +444,22 @@ in
         '';
       };
 
+      upsmon = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Options for the `upsmon.conf` configuration file.
+        '';
+        type = types.submodule upsmonOptions;
+      };
+
+      upsd = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Options for the `upsd.conf` configuration file.
+        '';
+        type = types.submodule upsdOptions;
+      };
+
       ups = mkOption {
         default = {};
         # see nut/etc/ups.conf.sample
@@ -172,46 +471,95 @@ in
         type = with types; attrsOf (submodule upsOptions);
       };
 
+      users = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Users that can access upsd. See `man upsd.users`.
+        '';
+        type = with types; attrsOf (submodule userOptions);
+      };
+
     };
   };
 
   config = mkIf cfg.enable {
 
+    assertions = [
+      (let
+        totalPowerValue = foldl' add 0 (map (monitor: monitor.powerValue) (attrValues cfg.upsmon.monitor));
+        minSupplies = cfg.upsmon.settings.MINSUPPLIES;
+      in mkIf cfg.upsmon.enable {
+        assertion = totalPowerValue >= minSupplies;
+        message = ''
+          `power.ups.upsmon`: Total configured power value (${toString totalPowerValue}) must be at least MINSUPPLIES (${toString minSupplies}).
+        '';
+      })
+    ];
+
     environment.systemPackages = [ pkgs.nut ];
 
-    systemd.services.upsmon = {
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts =
+        if cfg.upsd.listen == []
+        then [ defaultPort ]
+        else unique (forEach cfg.upsd.listen (listen: listen.port));
+    };
+
+    systemd.services.upsmon = let
+      secrets = mapAttrsToList (name: monitor: "upsmon_password_${name}") cfg.upsmon.monitor;
+      createUpsmonConf = installSecrets upsmonConf "/run/nut/upsmon.conf" secrets;
+    in {
+      enable = cfg.upsmon.enable;
       description = "Uninterruptible Power Supplies (Monitor)";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.Type = "forking";
-      script = "${pkgs.nut}/sbin/upsmon";
-      environment.NUT_CONFPATH = "/etc/nut/";
-      environment.NUT_STATEPATH = "/var/lib/nut/";
+      serviceConfig = {
+        Type = "forking";
+        ExecStartPre = "${createUpsmonConf}";
+        ExecStart = "${pkgs.nut}/sbin/upsmon";
+        ExecReload = "${pkgs.nut}/sbin/upsmon -c reload";
+        LoadCredential = mapAttrsToList (name: monitor: "upsmon_password_${name}:${monitor.passwordFile}") cfg.upsmon.monitor;
+      };
+      environment.NUT_CONFPATH = "/etc/nut";
+      environment.NUT_STATEPATH = "/var/lib/nut";
     };
 
-    systemd.services.upsd = {
+    systemd.services.upsd = let
+      secrets = mapAttrsToList (name: user: "upsdusers_password_${name}") cfg.users;
+      createUpsdUsers = installSecrets upsdUsers "/run/nut/upsd.users" secrets;
+    in {
+      enable = cfg.upsd.enable;
       description = "Uninterruptible Power Supplies (Daemon)";
       after = [ "network.target" "upsmon.service" ];
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.Type = "forking";
-      # TODO: replace 'root' by another username.
-      script = "${pkgs.nut}/sbin/upsd -u root";
-      environment.NUT_CONFPATH = "/etc/nut/";
-      environment.NUT_STATEPATH = "/var/lib/nut/";
+      serviceConfig = {
+        Type = "forking";
+        ExecStartPre = "${createUpsdUsers}";
+        # TODO: replace 'root' by another username.
+        ExecStart = "${pkgs.nut}/sbin/upsd -u root";
+        ExecReload = "${pkgs.nut}/sbin/upsd -c reload";
+        LoadCredential = mapAttrsToList (name: user: "upsdusers_password_${name}:${user.passwordFile}") cfg.users;
+      };
+      environment.NUT_CONFPATH = "/etc/nut";
+      environment.NUT_STATEPATH = "/var/lib/nut";
+      restartTriggers = [
+        config.environment.etc."nut/upsd.conf".source
+      ];
     };
 
     systemd.services.upsdrv = {
+      enable = cfg.upsd.enable;
       description = "Uninterruptible Power Supplies (Register all UPS)";
       after = [ "upsd.service" ];
       wantedBy = [ "multi-user.target" ];
-      # TODO: replace 'root' by another username.
-      script = "${pkgs.nut}/bin/upsdrvctl -u root start";
       serviceConfig = {
         Type = "oneshot";
         RemainAfterExit = true;
+        # TODO: replace 'root' by another username.
+        ExecStart = "${pkgs.nut}/bin/upsdrvctl -u root start";
       };
-      environment.NUT_CONFPATH = "/etc/nut/";
-      environment.NUT_STATEPATH = "/var/lib/nut/";
+      environment.NUT_CONFPATH = "/etc/nut";
+      environment.NUT_STATEPATH = "/var/lib/nut";
     };
 
     environment.etc = {
@@ -223,26 +571,26 @@ in
         ''
           maxstartdelay = ${toString cfg.maxStartDelay}
 
-          ${flip concatStringsSep (forEach (attrValues cfg.ups) (ups: ups.summary)) "
-
-          "}
+          ${concatStringsSep "\n\n" (forEach (attrValues cfg.ups) (ups: ups.summary))}
+        '';
+      "nut/upsd.conf".source = pkgs.writeText "upsd.conf"
+        ''
+          ${concatStringsSep "\n" (forEach cfg.upsd.listen (listen: "LISTEN ${listen.address} ${toString listen.port}"))}
+          ${cfg.upsd.extraConfig}
         '';
       "nut/upssched.conf".source = cfg.schedulerRules;
-      # These file are containing private information and thus should not
-      # be stored inside the Nix store.
-      /*
-      "nut/upsd.conf".source = "";
-      "nut/upsd.users".source = "";
-      "nut/upsmon.conf".source = "";
-      */
+      "nut/upsd.users".source = "/run/nut/upsd.users";
+      "nut/upsmon.conf".source = "/run/nut/upsmon.conf";
     };
 
     power.ups.schedulerRules = mkDefault "${pkgs.nut}/etc/upssched.conf.sample";
 
     systemd.tmpfiles.rules = [
       "d /var/state/ups -"
+      "d /var/lib/nut 700"
     ];
 
+    services.udev.packages = [ pkgs.nut ];
 
 /*
     users.users.nut =
diff --git a/nixos/modules/services/network-filesystems/drbd.nix b/nixos/modules/services/network-filesystems/drbd.nix
index e74ed391d48e3..79a1b768b4615 100644
--- a/nixos/modules/services/network-filesystems/drbd.nix
+++ b/nixos/modules/services/network-filesystems/drbd.nix
@@ -55,8 +55,8 @@ let cfg = config.services.drbd; in
       wants = [ "systemd-udev.settle.service" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
-        ExecStart = "${pkgs.drbd}/sbin/drbdadm up all";
-        ExecStop = "${pkgs.drbd}/sbin/drbdadm down all";
+        ExecStart = "${pkgs.drbd}/bin/drbdadm up all";
+        ExecStop = "${pkgs.drbd}/bin/drbdadm down all";
       };
     };
   };
diff --git a/nixos/modules/services/network-filesystems/eris-server.nix b/nixos/modules/services/network-filesystems/eris-server.nix
index 66eccfac408c4..104676a52c61f 100644
--- a/nixos/modules/services/network-filesystems/eris-server.nix
+++ b/nixos/modules/services/network-filesystems/eris-server.nix
@@ -3,6 +3,7 @@
 let
   cfg = config.services.eris-server;
   stateDirectoryPath = "\${STATE_DIRECTORY}";
+  nullOrStr = with lib.types; nullOr str;
 in {
 
   options.services.eris-server = {
@@ -26,7 +27,7 @@ in {
     };
 
     listenCoap = lib.mkOption {
-      type = lib.types.str;
+      type = nullOrStr;
       default = ":5683";
       example = "[::1]:5683";
       description = ''
@@ -39,8 +40,8 @@ in {
     };
 
     listenHttp = lib.mkOption {
-      type = lib.types.str;
-      default = "";
+      type = nullOrStr;
+      default = null;
       example = "[::1]:8080";
       description = "Server HTTP listen address. Do not listen by default.";
     };
@@ -58,8 +59,8 @@ in {
     };
 
     mountpoint = lib.mkOption {
-      type = lib.types.str;
-      default = "";
+      type = nullOrStr;
+      default = null;
       example = "/eris";
       description = ''
         Mountpoint for FUSE namespace that exposes "urn:eris:…" files.
@@ -69,33 +70,44 @@ in {
   };
 
   config = lib.mkIf cfg.enable {
+    assertions = [{
+      assertion = lib.strings.versionAtLeast cfg.package.version "20231219";
+      message =
+        "Version of `config.services.eris-server.package` is incompatible with this module";
+    }];
+
     systemd.services.eris-server = let
-      cmd =
-        "${cfg.package}/bin/eris-go server --coap '${cfg.listenCoap}' --http '${cfg.listenHttp}' ${
-          lib.optionalString cfg.decode "--decode "
-        }${
-          lib.optionalString (cfg.mountpoint != "")
-          ''--mountpoint "${cfg.mountpoint}" ''
-        }${lib.strings.escapeShellArgs cfg.backends}";
+      cmd = "${cfg.package}/bin/eris-go server"
+        + (lib.optionalString (cfg.listenCoap != null)
+          " --coap '${cfg.listenCoap}'")
+        + (lib.optionalString (cfg.listenHttp != null)
+          " --http '${cfg.listenHttp}'")
+        + (lib.optionalString cfg.decode " --decode")
+        + (lib.optionalString (cfg.mountpoint != null)
+          " --mountpoint '${cfg.mountpoint}'");
     in {
       description = "ERIS block server";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      script = lib.mkIf (cfg.mountpoint != "") ''
+      environment.ERIS_STORE_URL = toString cfg.backends;
+      script = lib.mkIf (cfg.mountpoint != null) ''
         export PATH=${config.security.wrapperDir}:$PATH
         ${cmd}
       '';
       serviceConfig = let
-        umounter = lib.mkIf (cfg.mountpoint != "")
+        umounter = lib.mkIf (cfg.mountpoint != null)
           "-${config.security.wrapperDir}/fusermount -uz ${cfg.mountpoint}";
-      in {
-        ExecStartPre = umounter;
-        ExecStart = lib.mkIf (cfg.mountpoint == "") cmd;
-        ExecStopPost = umounter;
-        Restart = "always";
-        RestartSec = 20;
-        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
-      };
+      in if (cfg.mountpoint == null) then {
+        ExecStart = cmd;
+      } else
+        {
+          ExecStartPre = umounter;
+          ExecStopPost = umounter;
+        } // {
+          Restart = "always";
+          RestartSec = 20;
+          AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        };
     };
   };
 
diff --git a/nixos/modules/services/network-filesystems/kubo.nix b/nixos/modules/services/network-filesystems/kubo.nix
index fbf9b32a2b25a..8226fc614bc47 100644
--- a/nixos/modules/services/network-filesystems/kubo.nix
+++ b/nixos/modules/services/network-filesystems/kubo.nix
@@ -147,18 +147,6 @@ in
         description = lib.mdDoc "Whether Kubo should try to run the fs-repo-migration at startup.";
       };
 
-      ipfsMountDir = mkOption {
-        type = types.str;
-        default = "/ipfs";
-        description = lib.mdDoc "Where to mount the IPFS namespace to";
-      };
-
-      ipnsMountDir = mkOption {
-        type = types.str;
-        default = "/ipns";
-        description = lib.mdDoc "Where to mount the IPNS namespace to";
-      };
-
       enableGC = mkOption {
         type = types.bool;
         default = false;
@@ -205,6 +193,18 @@ in
               ];
               description = lib.mdDoc "Where Kubo listens for incoming p2p connections";
             };
+
+            Mounts.IPFS = mkOption {
+              type = types.str;
+              default = "/ipfs";
+              description = lib.mdDoc "Where to mount the IPFS namespace to";
+            };
+
+            Mounts.IPNS = mkOption {
+              type = types.str;
+              default = "/ipns";
+              description = lib.mdDoc "Where to mount the IPNS namespace to";
+            };
           };
         };
         description = lib.mdDoc ''
@@ -309,8 +309,8 @@ in
     systemd.tmpfiles.rules = [
       "d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
     ] ++ optionals cfg.autoMount [
-      "d '${cfg.ipfsMountDir}' - ${cfg.user} ${cfg.group} - -"
-      "d '${cfg.ipnsMountDir}' - ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.settings.Mounts.IPFS}' - ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.settings.Mounts.IPNS}' - ${cfg.user} ${cfg.group} - -"
     ];
 
     # The hardened systemd unit breaks the fuse-mount function according to documentation in the unit file itself
@@ -320,8 +320,6 @@ in
 
     services.kubo.settings = mkIf cfg.autoMount {
       Mounts.FuseAllowOther = lib.mkDefault true;
-      Mounts.IPFS = lib.mkDefault cfg.ipfsMountDir;
-      Mounts.IPNS = lib.mkDefault cfg.ipnsMountDir;
     };
 
     systemd.services.ipfs = {
@@ -352,8 +350,8 @@ in
           ipfs --offline config replace -
       '';
       postStop = mkIf cfg.autoMount ''
-        # After an unclean shutdown the fuse mounts at cfg.ipnsMountDir and cfg.ipfsMountDir are locked
-        umount --quiet '${cfg.ipnsMountDir}' '${cfg.ipfsMountDir}' || true
+        # After an unclean shutdown the fuse mounts at cfg.settings.Mounts.IPFS and cfg.settings.Mounts.IPNS are locked
+        umount --quiet '${cfg.settings.Mounts.IPFS}' '${cfg.settings.Mounts.IPNS}' || true
       '';
       serviceConfig = {
         ExecStart = [ "" "${cfg.package}/bin/ipfs daemon ${kuboFlags}" ];
@@ -361,6 +359,8 @@ in
         Group = cfg.group;
         StateDirectory = "";
         ReadWritePaths = optionals (!cfg.autoMount) [ "" cfg.dataDir ];
+        # Make sure the socket units are started before ipfs.service
+        Sockets = [ "ipfs-gateway.socket" "ipfs-api.socket" ];
       } // optionalAttrs (cfg.serviceFdlimit != null) { LimitNOFILE = cfg.serviceFdlimit; };
     } // optionalAttrs (!cfg.startWhenNeeded) {
       wantedBy = [ "default.target" ];
@@ -403,8 +403,8 @@ in
     (mkRenamedOptionModule [ "services" "ipfs" "defaultMode" ] [ "services" "kubo" "defaultMode" ])
     (mkRenamedOptionModule [ "services" "ipfs" "autoMount" ] [ "services" "kubo" "autoMount" ])
     (mkRenamedOptionModule [ "services" "ipfs" "autoMigrate" ] [ "services" "kubo" "autoMigrate" ])
-    (mkRenamedOptionModule [ "services" "ipfs" "ipfsMountDir" ] [ "services" "kubo" "ipfsMountDir" ])
-    (mkRenamedOptionModule [ "services" "ipfs" "ipnsMountDir" ] [ "services" "kubo" "ipnsMountDir" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "ipfsMountDir" ] [ "services" "kubo" "settings" "Mounts" "IPFS" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "ipnsMountDir" ] [ "services" "kubo" "settings" "Mounts" "IPNS" ])
     (mkRenamedOptionModule [ "services" "ipfs" "gatewayAddress" ] [ "services" "kubo" "settings" "Addresses" "Gateway" ])
     (mkRenamedOptionModule [ "services" "ipfs" "apiAddress" ] [ "services" "kubo" "settings" "Addresses" "API" ])
     (mkRenamedOptionModule [ "services" "ipfs" "swarmAddress" ] [ "services" "kubo" "settings" "Addresses" "Swarm" ])
@@ -419,5 +419,7 @@ in
     (mkRenamedOptionModule [ "services" "kubo" "gatewayAddress" ] [ "services" "kubo" "settings" "Addresses" "Gateway" ])
     (mkRenamedOptionModule [ "services" "kubo" "apiAddress" ] [ "services" "kubo" "settings" "Addresses" "API" ])
     (mkRenamedOptionModule [ "services" "kubo" "swarmAddress" ] [ "services" "kubo" "settings" "Addresses" "Swarm" ])
+    (mkRenamedOptionModule [ "services" "kubo" "ipfsMountDir" ] [ "services" "kubo" "settings" "Mounts" "IPFS" ])
+    (mkRenamedOptionModule [ "services" "kubo" "ipnsMountDir" ] [ "services" "kubo" "settings" "Mounts" "IPNS" ])
   ];
 }
diff --git a/nixos/modules/services/networking/avahi-daemon.nix b/nixos/modules/services/networking/avahi-daemon.nix
index de51843ba6f9c..08fc092e230c0 100644
--- a/nixos/modules/services/networking/avahi-daemon.nix
+++ b/nixos/modules/services/networking/avahi-daemon.nix
@@ -42,6 +42,7 @@ in
 {
   imports = [
     (lib.mkRenamedOptionModule [ "services" "avahi" "interfaces" ] [ "services" "avahi" "allowInterfaces" ])
+    (lib.mkRenamedOptionModule [ "services" "avahi" "nssmdns" ] [ "services" "avahi" "nssmdns4" ])
   ];
 
   options.services.avahi = {
@@ -93,8 +94,7 @@ in
 
     ipv6 = mkOption {
       type = types.bool;
-      default = config.networking.enableIPv6;
-      defaultText = literalExpression "config.networking.enableIPv6";
+      default = false;
       description = lib.mdDoc "Whether to use IPv6.";
     };
 
@@ -218,13 +218,28 @@ in
       };
     };
 
-    nssmdns = mkOption {
+    nssmdns4 = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the mDNS NSS (Name Service Switch) plug-in for IPv4.
+        Enabling it allows applications to resolve names in the `.local`
+        domain by transparently querying the Avahi daemon.
+      '';
+    };
+
+    nssmdns6 = mkOption {
       type = types.bool;
       default = false;
       description = lib.mdDoc ''
-        Whether to enable the mDNS NSS (Name Service Switch) plug-in.
+        Whether to enable the mDNS NSS (Name Service Switch) plug-in for IPv6.
         Enabling it allows applications to resolve names in the `.local`
         domain by transparently querying the Avahi daemon.
+
+        ::: {.note}
+        Due to the fact that most mDNS responders only register local IPv4 addresses,
+        most user want to leave this option disabled to avoid long timeouts when applications first resolve the none existing IPv6 address.
+        :::
       '';
     };
 
@@ -256,9 +271,18 @@ in
 
     users.groups.avahi = { };
 
-    system.nssModules = optional cfg.nssmdns pkgs.nssmdns;
-    system.nssDatabases.hosts = optionals cfg.nssmdns (mkMerge [
-      (mkBefore [ "mdns_minimal [NOTFOUND=return]" ]) # before resolve
+    system.nssModules = optional (cfg.nssmdns4 || cfg.nssmdns6) pkgs.nssmdns;
+    system.nssDatabases.hosts = let
+      mdnsMinimal = if (cfg.nssmdns4 && cfg.nssmdns6) then
+        "mdns_minimal"
+      else if (!cfg.nssmdns4 && cfg.nssmdns6) then
+        "mdns6_minimal"
+      else if (cfg.nssmdns4 && !cfg.nssmdns6) then
+        "mdns4_minimal"
+      else
+        "";
+    in optionals (cfg.nssmdns4 || cfg.nssmdns6) (mkMerge [
+      (mkBefore [ "${mdnsMinimal} [NOTFOUND=return]" ]) # before resolve
       (mkAfter [ "mdns" ]) # after dns
     ]);
 
diff --git a/nixos/modules/services/networking/ddclient.nix b/nixos/modules/services/networking/ddclient.nix
index 8f4fb0bc78d4e..18f205b8d99ef 100644
--- a/nixos/modules/services/networking/ddclient.nix
+++ b/nixos/modules/services/networking/ddclient.nix
@@ -126,7 +126,7 @@ with lib;
         default = "dyndns2";
         type = str;
         description = lib.mdDoc ''
-          Protocol to use with dynamic DNS provider (see https://sourceforge.net/p/ddclient/wiki/protocols).
+          Protocol to use with dynamic DNS provider (see https://ddclient.net/protocols.html ).
         '';
       };
 
@@ -217,7 +217,7 @@ with lib;
         inherit RuntimeDirectory;
         inherit StateDirectory;
         Type = "oneshot";
-        ExecStartPre = "!${pkgs.writeShellScript "ddclient-prestart" preStart}";
+        ExecStartPre = [ "!${pkgs.writeShellScript "ddclient-prestart" preStart}" ];
         ExecStart = "${lib.getExe cfg.package} -file /run/${RuntimeDirectory}/ddclient.conf";
       };
     };
diff --git a/nixos/modules/services/networking/dnsmasq.md b/nixos/modules/services/networking/dnsmasq.md
new file mode 100644
index 0000000000000..6fc9178b1c0d5
--- /dev/null
+++ b/nixos/modules/services/networking/dnsmasq.md
@@ -0,0 +1,68 @@
+# Dnsmasq {#module-services-networking-dnsmasq}
+
+Dnsmasq is an integrated DNS, DHCP and TFTP server for small networks.
+
+## Configuration {#module-services-networking-dnsmasq-configuration}
+
+### An authoritative DHCP and DNS server on a home network {#module-services-networking-dnsmasq-configuration-home}
+
+On a home network, you can use Dnsmasq as a DHCP and DNS server. New devices on
+your network will be configured by Dnsmasq, and instructed to use it as the DNS
+server by default. This allows you to rely on your own server to perform DNS
+queries and caching, with DNSSEC enabled.
+
+The following example assumes that
+
+- you have disabled your router's integrated DHCP server, if it has one
+- your router's address is set in  [](#opt-networking.defaultGateway.address)
+- your system's Ethernet interface is `eth0`
+- you have configured the address(es) to forward DNS queries in [](#opt-networking.nameservers)
+
+```nix
+{
+  services.dnsmasq = {
+    enable = true;
+    settings = {
+      interface = "eth0";
+      bind-interfaces = true; # Only bind to the specified interface
+      dhcp-authoritative = true; # Should be set when dnsmasq is definitely the only DHCP server on a network
+
+      server = config.networking.nameservers; # Upstream dns servers to which requests should be forwarded
+
+      dhcp-host = [
+        # Give the current system a fixed address of 192.168.0.254
+        "dc:a6:32:0b:ea:b9,192.168.0.254,${config.networking.hostName},infinite"
+      ];
+
+      dhcp-option = [
+        # Address of the gateway, i.e. your router
+        "option:router,${config.networking.defaultGateway.address}"
+      ];
+
+      dhcp-range = [
+        # Range of IPv4 addresses to give out
+        # <range start>,<range end>,<lease time>
+        "192.168.0.10,192.168.0.253,24h"
+        # Enable stateless IPv6 allocation
+        "::f,::ff,constructor:eth0,ra-stateless"
+      ];
+
+      dhcp-rapid-commit = true; # Faster DHCP negotiation for IPv6
+      local-service = true; # Accept DNS queries only from hosts whose address is on a local subnet
+      log-queries = true; # Log results of all DNS queries
+      bogus-priv = true; # Don't forward requests for the local address ranges (192.168.x.x etc) to upstream nameservers
+      domain-needed = true; # Don't forward requests without dots or domain parts to upstream nameservers
+
+      dnssec = true; # Enable DNSSEC
+      # DNSSEC trust anchor. Source: https://data.iana.org/root-anchors/root-anchors.xml
+      trust-anchor = ".,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D";
+    };
+  };
+}
+```
+
+## References {#module-services-networking-dnsmasq-references}
+
+- Upstream website: <https://dnsmasq.org>
+- Manpage: <https://dnsmasq.org/docs/dnsmasq-man.html>
+- FAQ: <https://dnsmasq.org/docs/FAQ>
diff --git a/nixos/modules/services/networking/dnsmasq.nix b/nixos/modules/services/networking/dnsmasq.nix
index 8d1ca36c38ed2..d01a1b6707a53 100644
--- a/nixos/modules/services/networking/dnsmasq.nix
+++ b/nixos/modules/services/networking/dnsmasq.nix
@@ -181,4 +181,6 @@ in
         restartTriggers = [ config.environment.etc.hosts.source ];
     };
   };
+
+  meta.doc = ./dnsmasq.md;
 }
diff --git a/nixos/modules/services/networking/ejabberd.nix b/nixos/modules/services/networking/ejabberd.nix
index b10a3d9f21df6..78af256f9c81b 100644
--- a/nixos/modules/services/networking/ejabberd.nix
+++ b/nixos/modules/services/networking/ejabberd.nix
@@ -120,6 +120,12 @@ in {
         if [ -z "$(ls -A '${cfg.spoolDir}')" ]; then
           touch "${cfg.spoolDir}/.firstRun"
         fi
+
+        if ! test -e ${cfg.spoolDir}/.erlang.cookie; then
+          touch ${cfg.spoolDir}/.erlang.cookie
+          chmod 600 ${cfg.spoolDir}/.erlang.cookie
+          dd if=/dev/random bs=16 count=1 | base64 > ${cfg.spoolDir}/.erlang.cookie
+        fi
       '';
 
       postStart = ''
diff --git a/nixos/modules/services/networking/firewall-iptables.nix b/nixos/modules/services/networking/firewall-iptables.nix
index e900868387203..2d11517700086 100644
--- a/nixos/modules/services/networking/firewall-iptables.nix
+++ b/nixos/modules/services/networking/firewall-iptables.nix
@@ -308,8 +308,9 @@ in
       description = "Firewall";
       wantedBy = [ "sysinit.target" ];
       wants = [ "network-pre.target" ];
-      before = [ "network-pre.target" ];
       after = [ "systemd-modules-load.service" ];
+      before = [ "network-pre.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
 
       path = [ cfg.package ] ++ cfg.extraPackages;
 
diff --git a/nixos/modules/services/networking/gns3-server.md b/nixos/modules/services/networking/gns3-server.md
new file mode 100644
index 0000000000000..9320d914fbd3a
--- /dev/null
+++ b/nixos/modules/services/networking/gns3-server.md
@@ -0,0 +1,31 @@
+# GNS3 Server {#module-services-gns3-server}
+
+[GNS3](https://www.gns3.com/), a network software emulator.
+
+## Basic Usage {#module-services-gns3-server-basic-usage}
+
+A minimal configuration looks like this:
+
+```nix
+{
+  services.gns3-server = {
+    enable = true;
+
+    auth = {
+      enable = true;
+      user = "gns3";
+      passwordFile = "/var/lib/secrets/gns3_password";
+    };
+
+    ssl = {
+      enable = true;
+      certFile = "/var/lib/gns3/ssl/cert.pem";
+      keyFile = "/var/lib/gns3/ssl/key.pem";
+    };
+
+    dynamips.enable = true;
+    ubridge.enable = true;
+    vpcs.enable = true;
+  };
+}
+```
diff --git a/nixos/modules/services/networking/gns3-server.nix b/nixos/modules/services/networking/gns3-server.nix
new file mode 100644
index 0000000000000..25583765de672
--- /dev/null
+++ b/nixos/modules/services/networking/gns3-server.nix
@@ -0,0 +1,263 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.gns3-server;
+
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "gns3-server.conf" cfg.settings;
+
+in {
+  meta = {
+    doc = ./gns3-server.md;
+    maintainers = [ lib.maintainers.anthonyroussel ];
+  };
+
+  options = {
+    services.gns3-server = {
+      enable = lib.mkEnableOption (lib.mdDoc "GNS3 Server daemon");
+
+      package = lib.mkPackageOptionMD pkgs "gns3-server" { };
+
+      auth = {
+        enable = lib.mkEnableOption (lib.mdDoc "password based HTTP authentication to access the GNS3 Server");
+
+        user = lib.mkOption {
+          type = lib.types.nullOr lib.types.str;
+          default = null;
+          example = "gns3";
+          description = lib.mdDoc ''Username used to access the GNS3 Server.'';
+        };
+
+        passwordFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/run/secrets/gns3-server-password";
+          description = lib.mdDoc ''
+            A file containing the password to access the GNS3 Server.
+
+            ::: {.warning}
+            This should be a string, not a nix path, since nix paths
+            are copied into the world-readable nix store.
+            :::
+          '';
+        };
+      };
+
+      settings = lib.mkOption {
+        type = lib.types.submodule { freeformType = settingsFormat.type; };
+        default = {};
+        example = { host = "127.0.0.1"; port = 3080; };
+        description = lib.mdDoc ''
+          The global options in `config` file in ini format.
+
+          Refer to <https://docs.gns3.com/docs/using-gns3/administration/gns3-server-configuration-file/>
+          for all available options.
+        '';
+      };
+
+      log = {
+        file = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = "/var/log/gns3/server.log";
+          description = lib.mdDoc ''Path of the file GNS3 Server should log to.'';
+        };
+
+        debug = lib.mkEnableOption (lib.mdDoc "debug logging");
+      };
+
+      ssl = {
+        enable = lib.mkEnableOption (lib.mdDoc "SSL encryption");
+
+        certFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/gns3/ssl/server.pem";
+          description = lib.mdDoc ''
+            Path to the SSL certificate file. This certificate will
+            be offered to, and may be verified by, clients.
+          '';
+        };
+
+        keyFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/gns3/ssl/server.key";
+          description = lib.mdDoc "Private key file for the certificate.";
+        };
+      };
+
+      dynamips = {
+        enable = lib.mkEnableOption (lib.mdDoc ''Whether to enable Dynamips support.'');
+        package = lib.mkPackageOptionMD pkgs "dynamips" { };
+      };
+
+      ubridge = {
+        enable = lib.mkEnableOption (lib.mdDoc ''Whether to enable uBridge support.'');
+        package = lib.mkPackageOptionMD pkgs "ubridge" { };
+      };
+
+      vpcs = {
+        enable = lib.mkEnableOption (lib.mdDoc ''Whether to enable VPCS support.'');
+        package = lib.mkPackageOptionMD pkgs "vpcs" { };
+      };
+    };
+  };
+
+  config = let
+    flags = {
+      enableDocker = config.virtualisation.docker.enable;
+      enableLibvirtd = config.virtualisation.libvirtd.enable;
+    };
+
+  in lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.ssl.enable -> cfg.ssl.certFile != null;
+        message = "Please provide a certificate to use for SSL encryption.";
+      }
+      {
+        assertion = cfg.ssl.enable -> cfg.ssl.keyFile != null;
+        message = "Please provide a private key to use for SSL encryption.";
+      }
+      {
+        assertion = cfg.auth.enable -> cfg.auth.user != null;
+        message = "Please provide a username to use for HTTP authentication.";
+      }
+      {
+        assertion = cfg.auth.enable -> cfg.auth.passwordFile != null;
+        message = "Please provide a password file to use for HTTP authentication.";
+      }
+    ];
+
+    users.groups.ubridge = lib.mkIf cfg.ubridge.enable { };
+
+    security.wrappers.ubridge = lib.mkIf cfg.ubridge.enable {
+      capabilities = "cap_net_raw,cap_net_admin=eip";
+      group = "ubridge";
+      owner = "root";
+      permissions = "u=rwx,g=rx,o=r";
+      source = lib.getExe cfg.ubridge.package;
+    };
+
+    services.gns3-server.settings = lib.mkMerge [
+      {
+        Server = {
+          appliances_path = lib.mkDefault "/var/lib/gns3/appliances";
+          configs_path = lib.mkDefault "/var/lib/gns3/configs";
+          images_path = lib.mkDefault "/var/lib/gns3/images";
+          projects_path = lib.mkDefault "/var/lib/gns3/projects";
+          symbols_path = lib.mkDefault "/var/lib/gns3/symbols";
+        };
+      }
+      (lib.mkIf (cfg.ubridge.enable) {
+        Server.ubridge_path = lib.mkDefault (lib.getExe cfg.ubridge.package);
+      })
+      (lib.mkIf (cfg.auth.enable) {
+        Server = {
+          auth = lib.mkDefault (lib.boolToString cfg.auth.enable);
+          user = lib.mkDefault cfg.auth.user;
+          password = lib.mkDefault "@AUTH_PASSWORD@";
+        };
+      })
+      (lib.mkIf (cfg.vpcs.enable) {
+        VPCS.vpcs_path = lib.mkDefault (lib.getExe cfg.vpcs.package);
+      })
+      (lib.mkIf (cfg.dynamips.enable) {
+        Dynamips.dynamips_path = lib.mkDefault (lib.getExe cfg.dynamips.package);
+      })
+    ];
+
+    systemd.services.gns3-server = let
+      commandArgs = lib.cli.toGNUCommandLineShell { } {
+        config = "/etc/gns3/gns3_server.conf";
+        pid = "/run/gns3/server.pid";
+        log = cfg.log.file;
+        ssl = cfg.ssl.enable;
+        # These are implicitly not set if `null`
+        certfile = cfg.ssl.certFile;
+        certkey = cfg.ssl.keyFile;
+      };
+    in
+    {
+      description = "GNS3 Server";
+
+      after = [ "network.target" "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+
+      # configFile cannot be stored in RuntimeDirectory, because GNS3
+      # uses the `--config` base path to stores supplementary configuration files at runtime.
+      #
+      preStart = ''
+        install -m660 ${configFile} /etc/gns3/gns3_server.conf
+
+        ${lib.optionalString cfg.auth.enable ''
+          ${pkgs.replace-secret}/bin/replace-secret \
+            '@AUTH_PASSWORD@' \
+            "''${CREDENTIALS_DIRECTORY}/AUTH_PASSWORD" \
+            /etc/gns3/gns3_server.conf
+        ''}
+      '';
+
+      path = lib.optional flags.enableLibvirtd pkgs.qemu;
+
+      reloadTriggers = [ configFile ];
+
+      serviceConfig = {
+        ConfigurationDirectory = "gns3";
+        ConfigurationDirectoryMode = "0750";
+        DynamicUser = true;
+        Environment = "HOME=%S/gns3";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecStart = "${lib.getExe cfg.package} ${commandArgs}";
+        Group = "gns3";
+        LimitNOFILE = 16384;
+        LoadCredential = lib.mkIf cfg.auth.enable [ "AUTH_PASSWORD:${cfg.auth.passwordFile}" ];
+        LogsDirectory = "gns3";
+        LogsDirectoryMode = "0750";
+        PIDFile = "/run/gns3/server.pid";
+        Restart = "on-failure";
+        RestartSec = 5;
+        RuntimeDirectory = "gns3";
+        StateDirectory = "gns3";
+        StateDirectoryMode = "0750";
+        SupplementaryGroups = lib.optional flags.enableDocker "docker"
+          ++ lib.optional flags.enableLibvirtd "libvirtd"
+          ++ lib.optional cfg.ubridge.enable "ubridge";
+        User = "gns3";
+        WorkingDirectory = "%S/gns3";
+
+        # Hardening
+        DeviceAllow = lib.optional flags.enableLibvirtd "/dev/kvm";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        # Don't restrict ProcSubset because python3Packages.psutil requires read access to /proc/stat
+        # ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_NETLINK"
+          "AF_UNIX"
+          "AF_PACKET"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/harmonia.nix b/nixos/modules/services/networking/harmonia.nix
index beaa7d00b6ce8..b384ac9261376 100644
--- a/nixos/modules/services/networking/harmonia.nix
+++ b/nixos/modules/services/networking/harmonia.nix
@@ -29,6 +29,11 @@ in
 
   config = lib.mkIf cfg.enable {
     nix.settings.extra-allowed-users = [ "harmonia" ];
+    users.users.harmonia = {
+      isSystemUser = true;
+      group = "harmonia";
+    };
+    users.groups.harmonia = { };
 
     systemd.services.harmonia = {
       description = "harmonia binary cache service";
@@ -50,7 +55,7 @@ in
         ExecStart = lib.getExe cfg.package;
         User = "harmonia";
         Group = "harmonia";
-        DynamicUser = true;
+        Restart = "on-failure";
         PrivateUsers = true;
         DeviceAllow = [ "" ];
         UMask = "0066";
diff --git a/nixos/modules/services/networking/iwd.nix b/nixos/modules/services/networking/iwd.nix
index b74f5d0bec9b8..d46c1a69a6197 100644
--- a/nixos/modules/services/networking/iwd.nix
+++ b/nixos/modules/services/networking/iwd.nix
@@ -64,8 +64,10 @@ in
     };
 
     systemd.services.iwd = {
+      path = [ config.networking.resolvconf.package ];
       wantedBy = [ "multi-user.target" ];
       restartTriggers = [ configFile ];
+      serviceConfig.ReadWritePaths = "-/etc/resolv.conf";
     };
   };
 
diff --git a/nixos/modules/services/networking/jigasi.nix b/nixos/modules/services/networking/jigasi.nix
new file mode 100644
index 0000000000000..e701689031b14
--- /dev/null
+++ b/nixos/modules/services/networking/jigasi.nix
@@ -0,0 +1,237 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jigasi;
+  homeDirName = "jigasi-home";
+  stateDir = "/tmp";
+  sipCommunicatorPropertiesFile = "${stateDir}/${homeDirName}/sip-communicator.properties";
+  sipCommunicatorPropertiesFileUnsubstituted = "${pkgs.jigasi}/etc/jitsi/jigasi/sip-communicator.properties";
+in
+{
+  options.services.jigasi = with types; {
+    enable = mkEnableOption "Jitsi Gateway to SIP - component of Jitsi Meet";
+
+    xmppHost = mkOption {
+      type = str;
+      example = "localhost";
+      description = ''
+        Hostname of the XMPP server to connect to.
+      '';
+    };
+
+    xmppDomain = mkOption {
+      type = nullOr str;
+      example = "meet.example.org";
+      description = ''
+        Domain name of the XMMP server to which to connect as a component.
+
+        If null, <option>xmppHost</option> is used.
+      '';
+    };
+
+    componentPasswordFile = mkOption {
+      type = str;
+      example = "/run/keys/jigasi-component";
+      description = ''
+        Path to file containing component secret.
+      '';
+    };
+
+    userName = mkOption {
+      type = str;
+      default = "callcontrol";
+      description = ''
+        User part of the JID for XMPP user connection.
+      '';
+    };
+
+    userDomain = mkOption {
+      type = str;
+      example = "internal.meet.example.org";
+      description = ''
+        Domain part of the JID for XMPP user connection.
+      '';
+    };
+
+    userPasswordFile = mkOption {
+      type = str;
+      example = "/run/keys/jigasi-user";
+      description = ''
+        Path to file containing password for XMPP user connection.
+      '';
+    };
+
+    bridgeMuc = mkOption {
+      type = str;
+      example = "jigasibrewery@internal.meet.example.org";
+      description = ''
+        JID of the internal MUC used to communicate with Videobridges.
+      '';
+    };
+
+    defaultJvbRoomName = mkOption {
+      type = str;
+      default = "";
+      example = "siptest";
+      description = ''
+        Name of the default JVB room that will be joined if no special header is included in SIP invite.
+      '';
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = ''
+        File containing environment variables to be passed to the jigasi service,
+        in which secret tokens can be specified securely by defining values for
+        <literal>JIGASI_SIPUSER</literal>,
+        <literal>JIGASI_SIPPWD</literal>,
+        <literal>JIGASI_SIPSERVER</literal> and
+        <literal>JIGASI_SIPPORT</literal>.
+      '';
+    };
+
+    config = mkOption {
+      type = attrsOf str;
+      default = { };
+      example = literalExpression ''
+        {
+          "org.jitsi.jigasi.auth.URL" = "XMPP:jitsi-meet.example.com";
+        }
+      '';
+      description = ''
+        Contents of the <filename>sip-communicator.properties</filename> configuration file for jigasi.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.jicofo.config = {
+      "org.jitsi.jicofo.jigasi.BREWERY" = "${cfg.bridgeMuc}";
+    };
+
+    services.jigasi.config = mapAttrs (_: v: mkDefault v) {
+      "org.jitsi.jigasi.BRIDGE_MUC" = cfg.bridgeMuc;
+    };
+
+    users.groups.jitsi-meet = {};
+
+    systemd.services.jigasi = let
+      jigasiProps = {
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_LOCATION" = "${stateDir}";
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_NAME" = "${homeDirName}";
+        "-Djava.util.logging.config.file" = "${pkgs.jigasi}/etc/jitsi/jigasi/logging.properties";
+      };
+    in
+    {
+      description = "Jitsi Gateway to SIP";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        [ -f "${sipCommunicatorPropertiesFile}" ] && rm -f "${sipCommunicatorPropertiesFile}"
+        mkdir -p "$(dirname ${sipCommunicatorPropertiesFile})"
+        temp="${sipCommunicatorPropertiesFile}.unsubstituted"
+
+        export DOMAIN_BASE="${cfg.xmppDomain}"
+        export JIGASI_XMPP_PASSWORD=$(cat "${cfg.userPasswordFile}")
+        export JIGASI_DEFAULT_JVB_ROOM_NAME="${cfg.defaultJvbRoomName}"
+
+        # encode the credentials to base64
+        export JIGASI_SIPPWD=$(echo -n "$JIGASI_SIPPWD" | base64 -w 0)
+        export JIGASI_XMPP_PASSWORD_BASE64=$(cat "${cfg.userPasswordFile}" | base64 -w 0)
+
+        cp "${sipCommunicatorPropertiesFileUnsubstituted}" "$temp"
+        chmod 644 "$temp"
+        cat <<EOF >>"$temp"
+        net.java.sip.communicator.impl.protocol.sip.acc1403273890647.SERVER_PORT=$JIGASI_SIPPORT
+        net.java.sip.communicator.impl.protocol.sip.acc1403273890647.PREFERRED_TRANSPORT=udp
+        EOF
+        chmod 444 "$temp"
+
+        # Replace <<$VAR_NAME>> from example config to $VAR_NAME for environment substitution
+        sed -i -E \
+          's/<<([^>]+)>>/\$\1/g' \
+          "$temp"
+
+        sed -i \
+          's|\(net\.java\.sip\.communicator\.impl\.protocol\.jabber\.acc-xmpp-1\.PASSWORD=\).*|\1\$JIGASI_XMPP_PASSWORD_BASE64|g' \
+          "$temp"
+
+        sed -i \
+          's|\(#\)\(org.jitsi.jigasi.DEFAULT_JVB_ROOM_NAME=\).*|\2\$JIGASI_DEFAULT_JVB_ROOM_NAME|g' \
+          "$temp"
+
+        ${pkgs.envsubst}/bin/envsubst \
+          -o "${sipCommunicatorPropertiesFile}" \
+          -i "$temp"
+
+        # Set the brewery room name
+        sed -i \
+          's|\(net\.java\.sip\.communicator\.impl\.protocol\.jabber\.acc-xmpp-1\.BREWERY=\).*|\1${cfg.bridgeMuc}|g' \
+          "${sipCommunicatorPropertiesFile}"
+        sed -i \
+          's|\(org\.jitsi\.jigasi\.ALLOWED_JID=\).*|\1${cfg.bridgeMuc}|g' \
+          "${sipCommunicatorPropertiesFile}"
+
+
+        # Disable certificate verification for self-signed certificates
+        sed -i \
+          's|\(# \)\(net.java.sip.communicator.service.gui.ALWAYS_TRUST_MODE_ENABLED=true\)|\2|g' \
+          "${sipCommunicatorPropertiesFile}"
+      '';
+
+      restartTriggers = [
+        config.environment.etc."jitsi/jigasi/sip-communicator.properties".source
+      ];
+      environment.JAVA_SYS_PROPS = concatStringsSep " " (mapAttrsToList (k: v: "${k}=${toString v}") jigasiProps);
+
+      script = ''
+        ${pkgs.jigasi}/bin/jigasi \
+          --host="${cfg.xmppHost}" \
+          --domain="${if cfg.xmppDomain == null then cfg.xmppHost else cfg.xmppDomain}" \
+          --secret="$(cat ${cfg.componentPasswordFile})" \
+          --user_name="${cfg.userName}" \
+          --user_domain="${cfg.userDomain}" \
+          --user_password="$(cat ${cfg.userPasswordFile})" \
+          --configdir="${stateDir}" \
+          --configdirname="${homeDirName}"
+      '';
+
+      serviceConfig = {
+        Type = "exec";
+
+        DynamicUser = true;
+        User = "jigasi";
+        Group = "jitsi-meet";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        StateDirectory = baseNameOf stateDir;
+        EnvironmentFile = cfg.environmentFile;
+      };
+    };
+
+    environment.etc."jitsi/jigasi/sip-communicator.properties".source =
+      mkDefault "${sipCommunicatorPropertiesFile}";
+    environment.etc."jitsi/jigasi/logging.properties".source =
+      mkDefault "${stateDir}/logging.properties-journal";
+  };
+
+  meta.maintainers = lib.teams.jitsi.members;
+}
diff --git a/nixos/modules/services/networking/kea.nix b/nixos/modules/services/networking/kea.nix
index 2f922a026a3a9..5ca705976c413 100644
--- a/nixos/modules/services/networking/kea.nix
+++ b/nixos/modules/services/networking/kea.nix
@@ -254,6 +254,8 @@ in
       DynamicUser = true;
       User = "kea";
       ConfigurationDirectory = "kea";
+      RuntimeDirectory = "kea";
+      RuntimeDirectoryPreserve = true;
       StateDirectory = "kea";
       UMask = "0077";
     };
@@ -288,8 +290,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea-ctrl-agent";
-        KEA_LOCKFILE_DIR = "/run/kea-ctrl-agent";
+        KEA_PIDFILE_DIR = "/run/kea";
+        KEA_LOCKFILE_DIR = "/run/kea";
       };
 
       restartTriggers = [
@@ -300,7 +302,6 @@ in
         ExecStart = "${package}/bin/kea-ctrl-agent -c /etc/kea/ctrl-agent.conf ${lib.escapeShellArgs cfg.ctrl-agent.extraArgs}";
         KillMode = "process";
         Restart = "on-failure";
-        RuntimeDirectory = "kea-ctrl-agent";
       } // commonServiceConfig;
     };
   })
@@ -329,8 +330,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea-dhcp4";
-        KEA_LOCKFILE_DIR = "/run/kea-dhcp4";
+        KEA_PIDFILE_DIR = "/run/kea";
+        KEA_LOCKFILE_DIR = "/run/kea";
       };
 
       restartTriggers = [
@@ -348,7 +349,6 @@ in
           "CAP_NET_BIND_SERVICE"
           "CAP_NET_RAW"
         ];
-        RuntimeDirectory = "kea-dhcp4";
       } // commonServiceConfig;
     };
   })
@@ -377,8 +377,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea-dhcp6";
-        KEA_LOCKFILE_DIR = "/run/kea-dhcp6";
+        KEA_PIDFILE_DIR = "/run/kea";
+        KEA_LOCKFILE_DIR = "/run/kea";
       };
 
       restartTriggers = [
@@ -394,7 +394,6 @@ in
         CapabilityBoundingSet = [
           "CAP_NET_BIND_SERVICE"
         ];
-        RuntimeDirectory = "kea-dhcp6";
       } // commonServiceConfig;
     };
   })
@@ -423,8 +422,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea-dhcp-ddns";
-        KEA_LOCKFILE_DIR = "/run/kea-dhcp-ddns";
+        KEA_PIDFILE_DIR = "/run/kea";
+        KEA_LOCKFILE_DIR = "/run/kea";
       };
 
       restartTriggers = [
@@ -439,7 +438,6 @@ in
         CapabilityBoundingSet = [
           "CAP_NET_BIND_SERVICE"
         ];
-        RuntimeDirectory = "kea-dhcp-ddns";
       } // commonServiceConfig;
     };
   })
diff --git a/nixos/modules/services/networking/nebula.nix b/nixos/modules/services/networking/nebula.nix
index b9ebbfbd9a297..e13876172dac6 100644
--- a/nixos/modules/services/networking/nebula.nix
+++ b/nixos/modules/services/networking/nebula.nix
@@ -196,7 +196,7 @@ in
             before = [ "sshd.service" ];
             wantedBy = [ "multi-user.target" ];
             serviceConfig = {
-              Type = "simple";
+              Type = "notify";
               Restart = "always";
               ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
               UMask = "0027";
diff --git a/nixos/modules/services/networking/quicktun.nix b/nixos/modules/services/networking/quicktun.nix
index 7aed972adc882..2d44659f20804 100644
--- a/nixos/modules/services/networking/quicktun.nix
+++ b/nixos/modules/services/networking/quicktun.nix
@@ -1,94 +1,153 @@
-{ config, pkgs, lib, ... }:
+{ options, config, pkgs, lib, ... }:
 
 let
+  inherit (lib) mkOption mdDoc types mkIf;
 
+  opt = options.services.quicktun;
   cfg = config.services.quicktun;
-
 in
-
-with lib;
-
 {
   options = {
-
     services.quicktun = mkOption {
       default = { };
-      description = lib.mdDoc "QuickTun tunnels";
-      type = types.attrsOf (types.submodule {
+      description = mdDoc ''
+        QuickTun tunnels.
+
+        See <http://wiki.ucis.nl/QuickTun> for more information about available options.
+      '';
+      type = types.attrsOf (types.submodule ({ name, ... }: let
+        qtcfg = cfg.${name};
+      in {
         options = {
           tunMode = mkOption {
-            type = types.int;
-            default = 0;
-            example = 1;
-            description = lib.mdDoc "";
+            type = with types; coercedTo bool (b: if b then 1 else 0) (ints.between 0 1);
+            default = false;
+            example = true;
+            description = mdDoc "Whether to operate in tun (IP) or tap (Ethernet) mode.";
           };
 
           remoteAddress = mkOption {
             type = types.str;
+            default = "0.0.0.0";
             example = "tunnel.example.com";
-            description = lib.mdDoc "";
+            description = mdDoc ''
+              IP address or hostname of the remote end (use `0.0.0.0` for a floating/dynamic remote endpoint).
+            '';
           };
 
           localAddress = mkOption {
-            type = types.str;
+            type = with types; nullOr str;
+            default = null;
             example = "0.0.0.0";
-            description = lib.mdDoc "";
+            description = mdDoc "IP address or hostname of the local end.";
           };
 
           localPort = mkOption {
-            type = types.int;
+            type = types.port;
             default = 2998;
-            description = lib.mdDoc "";
+            description = mdDoc "Local UDP port.";
           };
 
           remotePort = mkOption {
-            type = types.int;
-            default = 2998;
-            description = lib.mdDoc "";
+            type = types.port;
+            default = qtcfg.localPort;
+            defaultText = lib.literalExpression "config.services.quicktun.<name>.localPort";
+            description = mdDoc " remote UDP port";
           };
 
           remoteFloat = mkOption {
-            type = types.int;
-            default = 0;
-            description = lib.mdDoc "";
+            type = with types; coercedTo bool (b: if b then 1 else 0) (ints.between 0 1);
+            default = false;
+            example = true;
+            description = mdDoc ''
+              Whether to allow the remote address and port to change when properly encrypted packets are received.
+            '';
           };
 
           protocol = mkOption {
-            type = types.str;
+            type = types.enum [ "raw" "nacl0" "nacltai" "salty" ];
             default = "nacltai";
-            description = lib.mdDoc "";
+            description = mdDoc "Which protocol to use.";
           };
 
           privateKey = mkOption {
-            type = types.str;
-            description = lib.mdDoc "";
+            type = with types; nullOr str;
+            default = null;
+            description = mdDoc ''
+              Local secret key in hexadecimal form.
+
+              ::: {.warning}
+              This option is deprecated. Please use {var}`services.quicktun.<name>.privateKeyFile` instead.
+              :::
+
+              ::: {.note}
+              Not needed when {var}`services.quicktun.<name>.protocol` is set to `raw`.
+              :::
+            '';
+          };
+
+          privateKeyFile = mkOption {
+            type = with types; nullOr path;
+            # This is a hack to deprecate `privateKey` without using `mkChangedModuleOption`
+            default = if qtcfg.privateKey == null then null else pkgs.writeText "quickttun-key-${name}" qtcfg.privateKey;
+            defaultText = "null";
+            description = mdDoc ''
+              Path to file containing local secret key in binary or hexadecimal form.
+
+              ::: {.note}
+              Not needed when {var}`services.quicktun.<name>.protocol` is set to `raw`.
+              :::
+            '';
           };
 
           publicKey = mkOption {
-            type = types.str;
-            description = lib.mdDoc "";
+            type = with types; nullOr str;
+            default = null;
+            description = mdDoc ''
+              Remote public key in hexadecimal form.
+
+              ::: {.note}
+              Not needed when {var}`services.quicktun.<name>.protocol` is set to `raw`.
+              :::
+            '';
           };
 
           timeWindow = mkOption {
-            type = types.int;
+            type = types.ints.unsigned;
             default = 5;
-            description = lib.mdDoc "";
+            description = mdDoc ''
+              Allowed time window for first received packet in seconds (positive number allows packets from history)
+            '';
           };
 
           upScript = mkOption {
-            type = types.lines;
-            default = "";
-            description = lib.mdDoc "";
+            type = with types; nullOr lines;
+            default = null;
+            description = mdDoc ''
+              Run specified command or script after the tunnel device has been opened.
+            '';
           };
         };
-      });
+      }));
     };
-
   };
 
-  config = mkIf (cfg != []) {
-    systemd.services = foldr (a: b: a // b) {} (
-      mapAttrsToList (name: qtcfg: {
+  config = {
+    warnings = lib.pipe cfg [
+      (lib.mapAttrsToList (name: value: if value.privateKey != null then name else null))
+      (builtins.filter (n: n != null))
+      (map (n: "  - services.quicktun.${n}.privateKey"))
+      (services: lib.optional (services != [ ]) ''
+        `services.quicktun.<name>.privateKey` is deprecated.
+        Please use `services.quicktun.<name>.privateKeyFile` instead.
+
+        Offending options:
+        ${lib.concatStringsSep "\n" services}
+      '')
+    ];
+
+    systemd.services = lib.mkMerge (
+      lib.mapAttrsToList (name: qtcfg: {
         "quicktun-${name}" = {
           wantedBy = [ "multi-user.target" ];
           after = [ "network.target" ];
@@ -96,14 +155,14 @@ with lib;
             INTERFACE = name;
             TUN_MODE = toString qtcfg.tunMode;
             REMOTE_ADDRESS = qtcfg.remoteAddress;
-            LOCAL_ADDRESS = qtcfg.localAddress;
+            LOCAL_ADDRESS = mkIf (qtcfg.localAddress != null) (qtcfg.localAddress);
             LOCAL_PORT = toString qtcfg.localPort;
             REMOTE_PORT = toString qtcfg.remotePort;
             REMOTE_FLOAT = toString qtcfg.remoteFloat;
-            PRIVATE_KEY = qtcfg.privateKey;
-            PUBLIC_KEY = qtcfg.publicKey;
+            PRIVATE_KEY_FILE = mkIf (qtcfg.privateKeyFile != null) qtcfg.privateKeyFile;
+            PUBLIC_KEY = mkIf (qtcfg.publicKey != null) qtcfg.publicKey;
             TIME_WINDOW = toString qtcfg.timeWindow;
-            TUN_UP_SCRIPT = pkgs.writeScript "quicktun-${name}-up.sh" qtcfg.upScript;
+            TUN_UP_SCRIPT = mkIf (qtcfg.upScript != null) (pkgs.writeScript "quicktun-${name}-up.sh" qtcfg.upScript);
             SUID = "nobody";
           };
           serviceConfig = {
@@ -114,5 +173,4 @@ with lib;
       }) cfg
     );
   };
-
 }
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index f54ce59174387..39793922ab516 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -674,7 +674,11 @@ in
           (lport: "sshd -G -T -C lport=${toString lport} -f ${sshconf} > /dev/null")
           cfg.ports}
         ${concatMapStringsSep "\n"
-          (la: "sshd -G -T -C ${escapeShellArg "laddr=${la.addr},lport=${toString la.port}"} -f ${sshconf} > /dev/null")
+          (la:
+            concatMapStringsSep "\n"
+              (port: "sshd -G -T -C ${escapeShellArg "laddr=${la.addr},lport=${toString port}"} -f ${sshconf} > /dev/null")
+              (if la.port != null then [ la.port ] else cfg.ports)
+          )
           cfg.listenAddresses}
         touch $out
       '')
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index 99d4d9eeffcc6..e0425792431e6 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -559,6 +559,15 @@ in {
         '';
       };
 
+      databaseDir = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          The directory containing the database and logs.
+        '';
+        default = cfg.configDir;
+        defaultText = literalExpression "config.${opt.configDir}";
+      };
+
       extraFlags = mkOption {
         type = types.listOf types.str;
         default = [];
@@ -660,7 +669,7 @@ in {
               -no-browser \
               -gui-address=${if isUnixGui then "unix://" else ""}${cfg.guiAddress} \
               -config=${cfg.configDir} \
-              -data=${cfg.dataDir} \
+              -data=${cfg.databaseDir} \
               ${escapeShellArgs cfg.extraFlags}
           '';
           MemoryDenyWriteExecute = true;
diff --git a/nixos/modules/services/networking/tailscale.nix b/nixos/modules/services/networking/tailscale.nix
index 3822df81063d9..1070e4e252967 100644
--- a/nixos/modules/services/networking/tailscale.nix
+++ b/nixos/modules/services/networking/tailscale.nix
@@ -100,8 +100,8 @@ in {
     };
 
     systemd.services.tailscaled-autoconnect = mkIf (cfg.authKeyFile != null) {
-      after = ["tailscale.service"];
-      wants = ["tailscale.service"];
+      after = ["tailscaled.service"];
+      wants = ["tailscaled.service"];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         Type = "oneshot";
diff --git a/nixos/modules/services/networking/teamspeak3.nix b/nixos/modules/services/networking/teamspeak3.nix
index f09ef1a959ed4..ff41539a6d9b7 100644
--- a/nixos/modules/services/networking/teamspeak3.nix
+++ b/nixos/modules/services/networking/teamspeak3.nix
@@ -50,7 +50,7 @@ in
       };
 
       defaultVoicePort = mkOption {
-        type = types.int;
+        type = types.port;
         default = 9987;
         description = lib.mdDoc ''
           Default UDP port for clients to connect to virtual servers - used for first virtual server, subsequent ones will open on incrementing port numbers by default.
@@ -67,7 +67,7 @@ in
       };
 
       fileTransferPort = mkOption {
-        type = types.int;
+        type = types.port;
         default = 30033;
         description = lib.mdDoc ''
           TCP port opened for file transfers.
@@ -84,10 +84,26 @@ in
       };
 
       queryPort = mkOption {
-        type = types.int;
+        type = types.port;
         default = 10011;
         description = lib.mdDoc ''
-          TCP port opened for ServerQuery connections.
+          TCP port opened for ServerQuery connections using the raw telnet protocol.
+        '';
+      };
+
+      querySshPort = mkOption {
+        type = types.port;
+        default = 10022;
+        description = lib.mdDoc ''
+          TCP port opened for ServerQuery connections using the SSH protocol.
+        '';
+      };
+
+      queryHttpPort = mkOption {
+        type = types.port;
+        default = 10080;
+        description = lib.mdDoc ''
+          TCP port opened for ServerQuery connections using the HTTP protocol.
         '';
       };
 
@@ -128,7 +144,9 @@ in
     ];
 
     networking.firewall = mkIf cfg.openFirewall {
-      allowedTCPPorts = [ cfg.fileTransferPort ] ++ optionals (cfg.openFirewallServerQuery) [ cfg.queryPort (cfg.queryPort + 11) ];
+      allowedTCPPorts = [ cfg.fileTransferPort ] ++ (map (port:
+        mkIf cfg.openFirewallServerQuery port
+      ) [cfg.queryPort cfg.querySshPort cfg.queryHttpPort]);
       # subsequent vServers will use the incremented voice port, let's just open the next 10
       allowedUDPPortRanges = [ { from = cfg.defaultVoicePort; to = cfg.defaultVoicePort + 10; } ];
     };
@@ -141,13 +159,19 @@ in
       serviceConfig = {
         ExecStart = ''
           ${ts3}/bin/ts3server \
-            dbsqlpath=${ts3}/lib/teamspeak/sql/ logpath=${cfg.logPath} \
-            ${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
+            dbsqlpath=${ts3}/lib/teamspeak/sql/ \
+            logpath=${cfg.logPath} \
+            license_accepted=1 \
             default_voice_port=${toString cfg.defaultVoicePort} \
-            ${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
             filetransfer_port=${toString cfg.fileTransferPort} \
+            query_port=${toString cfg.queryPort} \
+            query_ssh_port=${toString cfg.querySshPort} \
+            query_http_port=${toString cfg.queryHttpPort} \
+            ${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
+            ${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
             ${optionalString (cfg.queryIP != null) "query_ip=${cfg.queryIP}"} \
-            query_port=${toString cfg.queryPort} license_accepted=1
+            ${optionalString (cfg.queryIP != null) "query_ssh_ip=${cfg.queryIP}"} \
+            ${optionalString (cfg.queryIP != null) "query_http_ip=${cfg.queryIP}"} \
         '';
         WorkingDirectory = cfg.dataDir;
         User = user;
diff --git a/nixos/modules/services/networking/tinyproxy.nix b/nixos/modules/services/networking/tinyproxy.nix
index 42d45c460c2e7..8ff12b52f10ca 100644
--- a/nixos/modules/services/networking/tinyproxy.nix
+++ b/nixos/modules/services/networking/tinyproxy.nix
@@ -85,7 +85,7 @@ in
         User = "tinyproxy";
         Group = "tinyproxy";
         Type = "simple";
-        ExecStart = "${getExe pkgs.tinyproxy} -d -c ${configFile}";
+        ExecStart = "${getExe cfg.package} -d -c ${configFile}";
         ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
         KillSignal = "SIGINT";
         TimeoutStopSec = "30s";
diff --git a/nixos/modules/services/networking/vdirsyncer.nix b/nixos/modules/services/networking/vdirsyncer.nix
index 6a069943434da..165dc70f0876a 100644
--- a/nixos/modules/services/networking/vdirsyncer.nix
+++ b/nixos/modules/services/networking/vdirsyncer.nix
@@ -20,9 +20,11 @@ let
     else
       pkgs.writeText "vdirsyncer-${name}.conf" (toIniJson (
         {
-          general = cfg'.config.general // (lib.optionalAttrs (cfg'.config.statusPath == null) {
-            status_path = "/var/lib/vdirsyncer/${name}";
-          });
+          general = cfg'.config.general // {
+            status_path = if cfg'.config.statusPath == null
+                          then "/var/lib/vdirsyncer/${name}"
+                          else cfg'.config.statusPath;
+          };
         } // (
           mapAttrs' (name: nameValuePair "pair ${name}") cfg'.config.pairs
         ) // (
diff --git a/nixos/modules/services/networking/wpa_supplicant.nix b/nixos/modules/services/networking/wpa_supplicant.nix
index 90d9c68433cf4..4586550ed75e7 100644
--- a/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixos/modules/services/networking/wpa_supplicant.nix
@@ -107,6 +107,10 @@ let
       stopIfChanged = false;
 
       path = [ package ];
+      # if `userControl.enable`, the supplicant automatically changes the permissions
+      #  and owning group of the runtime dir; setting `umask` ensures the generated
+      #  config file isn't readable (except to root);  see nixpkgs#267693
+      serviceConfig.UMask = "066";
       serviceConfig.RuntimeDirectory = "wpa_supplicant";
       serviceConfig.RuntimeDirectoryMode = "700";
       serviceConfig.EnvironmentFile = mkIf (cfg.environmentFile != null)
diff --git a/nixos/modules/services/networking/yggdrasil.nix b/nixos/modules/services/networking/yggdrasil.nix
index 514753687d699..9173e7eb3457b 100644
--- a/nixos/modules/services/networking/yggdrasil.nix
+++ b/nixos/modules/services/networking/yggdrasil.nix
@@ -137,16 +137,24 @@ in
         message = "networking.enableIPv6 must be true for yggdrasil to work";
       }];
 
-      system.activationScripts.yggdrasil = mkIf cfg.persistentKeys ''
-        if [ ! -e ${keysPath} ]
-        then
-          mkdir --mode=700 -p ${builtins.dirOf keysPath}
-          ${binYggdrasil} -genconf -json \
-            | ${pkgs.jq}/bin/jq \
-                'to_entries|map(select(.key|endswith("Key")))|from_entries' \
-            > ${keysPath}
-        fi
-      '';
+      # This needs to be a separate service. The yggdrasil service fails if
+      # this is put into its preStart.
+      systemd.services.yggdrasil-persistent-keys = lib.mkIf cfg.persistentKeys {
+        wantedBy = [ "multi-user.target" ];
+        before = [ "yggdrasil.service" ];
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+        script = ''
+          if [ ! -e ${keysPath} ]
+          then
+            mkdir --mode=700 -p ${builtins.dirOf keysPath}
+            ${binYggdrasil} -genconf -json \
+              | ${pkgs.jq}/bin/jq \
+                  'to_entries|map(select(.key|endswith("Key")))|from_entries' \
+              > ${keysPath}
+          fi
+        '';
+      };
 
       systemd.services.yggdrasil = {
         description = "Yggdrasil Network Service";
diff --git a/nixos/modules/services/networking/zerotierone.nix b/nixos/modules/services/networking/zerotierone.nix
index 994e01d4980ea..60615d553041b 100644
--- a/nixos/modules/services/networking/zerotierone.nix
+++ b/nixos/modules/services/networking/zerotierone.nix
@@ -4,6 +4,8 @@ with lib;
 
 let
   cfg = config.services.zerotierone;
+  localConfFile = pkgs.writeText "zt-local.conf" (builtins.toJSON cfg.localConf);
+  localConfFilePath = "/var/lib/zerotier-one/local.conf";
 in
 {
   options.services.zerotierone.enable = mkEnableOption (lib.mdDoc "ZeroTierOne");
@@ -29,6 +31,19 @@ in
 
   options.services.zerotierone.package = mkPackageOption pkgs "zerotierone" { };
 
+  options.services.zerotierone.localConf = mkOption {
+    default = null;
+    description = mdDoc ''
+      Optional configuration to be written to the Zerotier JSON-based local.conf.
+      If set, the configuration will be symlinked to `/var/lib/zerotier-one/local.conf` at build time.
+      To understand the configuration format, refer to https://docs.zerotier.com/config/#local-configuration-options.
+    '';
+    example = {
+      settings.allowTcpFallbackRelay = false;
+    };
+    type = types.nullOr types.attrs;
+  };
+
   config = mkIf cfg.enable {
     systemd.services.zerotierone = {
       description = "ZeroTierOne";
@@ -45,7 +60,17 @@ in
         chown -R root:root /var/lib/zerotier-one
       '' + (concatMapStrings (netId: ''
         touch "/var/lib/zerotier-one/networks.d/${netId}.conf"
-      '') cfg.joinNetworks);
+      '') cfg.joinNetworks) + optionalString (cfg.localConf != null) ''
+        if [ -L "${localConfFilePath}" ]
+        then
+          rm ${localConfFilePath}
+        elif [ -f "${localConfFilePath}" ]
+        then
+          mv ${localConfFilePath} ${localConfFilePath}.bak
+        fi
+        ln -s ${localConfFile} ${localConfFilePath}
+      '';
+
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/zerotier-one -p${toString cfg.port}";
         Restart = "always";
diff --git a/nixos/modules/services/search/hound.nix b/nixos/modules/services/search/hound.nix
index 539a322b431f6..d238b26a226b5 100644
--- a/nixos/modules/services/search/hound.nix
+++ b/nixos/modules/services/search/hound.nix
@@ -3,6 +3,12 @@ with lib;
 let
   cfg = config.services.hound;
 in {
+  imports = [
+    (lib.mkRemovedOptionModule [ "services" "hound" "extraGroups" ] "Use users.users.hound.extraGroups instead")
+  ];
+
+  meta.maintainers = with maintainers; [ SuperSandro2000 ];
+
   options = {
     services.hound = {
       enable = mkOption {
@@ -13,6 +19,8 @@ in {
         '';
       };
 
+      package = mkPackageOptionMD pkgs "hound" { };
+
       user = mkOption {
         default = "hound";
         type = types.str;
@@ -29,27 +37,15 @@ in {
         '';
       };
 
-      extraGroups = mkOption {
-        type = types.listOf types.str;
-        default = [ ];
-        example = [ "dialout" ];
-        description = lib.mdDoc ''
-          List of extra groups that the "hound" user should be a part of.
-        '';
-      };
-
       home = mkOption {
         default = "/var/lib/hound";
         type = types.path;
         description = lib.mdDoc ''
-          The path to use as hound's $HOME. If the default user
-          "hound" is configured then this is the home of the "hound"
-          user.
+          The path to use as hound's $HOME.
+          If the default user "hound" is configured then this is the home of the "hound" user.
         '';
       };
 
-      package = mkPackageOption pkgs "hound" { };
-
       config = mkOption {
         type = types.str;
         description = lib.mdDoc ''
@@ -57,63 +53,62 @@ in {
           should be an absolute path to a writable location on disk.
         '';
         example = literalExpression ''
-          '''
-            {
-              "max-concurrent-indexers" : 2,
-              "dbpath" : "''${services.hound.home}/data",
-              "repos" : {
-                  "nixpkgs": {
-                    "url" : "https://www.github.com/NixOS/nixpkgs.git"
-                  }
-              }
+          {
+            "max-concurrent-indexers" : 2,
+            "repos" : {
+                "nixpkgs": {
+                  "url" : "https://www.github.com/NixOS/nixpkgs.git"
+                }
             }
-          '''
+          }
         '';
       };
 
       listen = mkOption {
         type = types.str;
         default = "0.0.0.0:6080";
-        example = "127.0.0.1:6080 or just :6080";
+        example = ":6080";
         description = lib.mdDoc ''
-          Listen on this IP:port / :port
+          Listen on this [IP]:port
         '';
       };
     };
   };
 
   config = mkIf cfg.enable {
-    users.groups = optionalAttrs (cfg.group == "hound") {
-      hound.gid = config.ids.gids.hound;
+    users.groups = lib.mkIf (cfg.group == "hound") {
+      hound = { };
     };
 
-    users.users = optionalAttrs (cfg.user == "hound") {
+    users.users = lib.mkIf (cfg.user == "hound") {
       hound = {
-        description = "hound code search";
+        description = "Hound code search";
         createHome = true;
-        home = cfg.home;
-        group = cfg.group;
-        extraGroups = cfg.extraGroups;
-        uid = config.ids.uids.hound;
+        isSystemUser = true;
+        inherit (cfg) home group;
       };
     };
 
-    systemd.services.hound = {
+    systemd.services.hound = let
+      configFile = pkgs.writeTextFile {
+        name = "hound.json";
+        text = cfg.config;
+        checkPhase = ''
+          # check if the supplied text is valid json
+          ${lib.getExe pkgs.jq} . $target > /dev/null
+        '';
+      };
+    in {
       description = "Hound Code Search";
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
-
       serviceConfig = {
         User = cfg.user;
         Group = cfg.group;
         WorkingDirectory = cfg.home;
         ExecStartPre = "${pkgs.git}/bin/git config --global --replace-all http.sslCAinfo /etc/ssl/certs/ca-certificates.crt";
-        ExecStart = "${cfg.package}/bin/houndd" +
-                    " -addr ${cfg.listen}" +
-                    " -conf ${pkgs.writeText "hound.json" cfg.config}";
-
+        ExecStart = "${cfg.package}/bin/houndd -addr ${cfg.listen} -conf ${configFile}";
       };
     };
   };
-
 }
diff --git a/nixos/modules/services/security/clamav.nix b/nixos/modules/services/security/clamav.nix
index 72a195d3a04ed..d3164373ec01f 100644
--- a/nixos/modules/services/security/clamav.nix
+++ b/nixos/modules/services/security/clamav.nix
@@ -3,7 +3,6 @@ with lib;
 let
   clamavUser = "clamav";
   stateDir = "/var/lib/clamav";
-  runDir = "/run/clamav";
   clamavGroup = clamavUser;
   cfg = config.services.clamav;
   pkg = pkgs.clamav;
@@ -99,6 +98,29 @@ in
           '';
         };
       };
+
+      scanner = {
+        enable = mkEnableOption (lib.mdDoc "ClamAV scanner");
+
+        interval = mkOption {
+          type = types.str;
+          default = "*-*-* 04:00:00";
+          description = lib.mdDoc ''
+            How often clamdscan is invoked. See systemd.time(7) for more
+            information about the format.
+            By default this runs using 10 cores at most, be sure to run it at a time of low traffic.
+          '';
+        };
+
+        scanDirectories = mkOption {
+          type = with types; listOf str;
+          default = [ "/home" "/var/lib" "/tmp" "/etc" "/var/tmp" ];
+          description = lib.mdDoc ''
+            List of directories to scan.
+            The default includes everything I could think of that is valid for nixos. Feel free to contribute a PR to add to the default if you see something missing.
+          '';
+        };
+      };
     };
   };
 
@@ -117,9 +139,8 @@ in
 
     services.clamav.daemon.settings = {
       DatabaseDirectory = stateDir;
-      LocalSocket = "${runDir}/clamd.ctl";
-      PidFile = "${runDir}/clamd.pid";
-      TemporaryDirectory = "/tmp";
+      LocalSocket = "/run/clamav/clamd.ctl";
+      PidFile = "/run/clamav/clamd.pid";
       User = "clamav";
       Foreground = true;
     };
@@ -182,7 +203,6 @@ in
         ExecStart = "${pkg}/bin/freshclam";
         SuccessExitStatus = "1"; # if databases are up to date
         StateDirectory = "clamav";
-        RuntimeDirectory = "clamav";
         User = clamavUser;
         Group = clamavGroup;
         PrivateTmp = "yes";
@@ -204,7 +224,6 @@ in
       serviceConfig = {
         Type = "oneshot";
         StateDirectory = "clamav";
-        RuntimeDirectory = "clamav";
         User = clamavUser;
         Group = clamavGroup;
         PrivateTmp = "yes";
@@ -230,12 +249,31 @@ in
         Type = "oneshot";
         ExecStart = "${pkgs.fangfrisch}/bin/fangfrisch --conf ${fangfrischConfigFile} refresh";
         StateDirectory = "clamav";
-        RuntimeDirectory = "clamav";
         User = clamavUser;
         Group = clamavGroup;
         PrivateTmp = "yes";
         PrivateDevices = "yes";
       };
     };
+
+    systemd.timers.clamdscan = mkIf cfg.scanner.enable {
+      description = "Timer for ClamAV virus scanner";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = cfg.scanner.interval;
+        Unit = "clamdscan.service";
+      };
+    };
+
+    systemd.services.clamdscan = mkIf cfg.scanner.enable {
+      description = "ClamAV virus scanner";
+      after = optionals cfg.updater.enable [ "clamav-freshclam.service" ];
+      wants = optionals cfg.updater.enable [ "clamav-freshclam.service" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkg}/bin/clamdscan --multiscan --fdpass --infected --allmatch ${lib.concatStringsSep " " cfg.scanner.scanDirectories}";
+      };
+    };
   };
 }
diff --git a/nixos/modules/services/security/munge.nix b/nixos/modules/services/security/munge.nix
index 4d6fe33f697b8..9d306c205f946 100644
--- a/nixos/modules/services/security/munge.nix
+++ b/nixos/modules/services/security/munge.nix
@@ -45,19 +45,25 @@ in
 
     systemd.services.munged = {
       wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" ];
+      wants = [
+        "network-online.target"
+        "time-sync.target"
+      ];
+      after = [
+        "network-online.target"
+        "time-sync.target"
+      ];
 
       path = [ pkgs.munge pkgs.coreutils ];
 
       serviceConfig = {
         ExecStartPre = "+${pkgs.coreutils}/bin/chmod 0400 ${cfg.password}";
-        ExecStart = "${pkgs.munge}/bin/munged --syslog --key-file ${cfg.password}";
-        PIDFile = "/run/munge/munged.pid";
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecStart = "${pkgs.munge}/bin/munged --foreground --key-file ${cfg.password}";
         User = "munge";
         Group = "munge";
         StateDirectory = "munge";
         StateDirectoryMode = "0711";
+        Restart = "on-failure";
         RuntimeDirectory = "munge";
       };
 
diff --git a/nixos/modules/services/security/shibboleth-sp.nix b/nixos/modules/services/security/shibboleth-sp.nix
index e7897c3324cf6..975de1efa2f2a 100644
--- a/nixos/modules/services/security/shibboleth-sp.nix
+++ b/nixos/modules/services/security/shibboleth-sp.nix
@@ -1,44 +1,43 @@
-{pkgs, config, lib, ...}:
+{ config, lib, pkgs, ... }:
 
-with lib;
 let
   cfg = config.services.shibboleth-sp;
 in {
   options = {
     services.shibboleth-sp = {
-      enable = mkOption {
-        type = types.bool;
+      enable = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc "Whether to enable the shibboleth service";
       };
 
-      configFile = mkOption {
-        type = types.path;
-        example = literalExpression ''"''${pkgs.shibboleth-sp}/etc/shibboleth/shibboleth2.xml"'';
+      configFile = lib.mkOption {
+        type = lib.types.path;
+        example = lib.literalExpression ''"''${pkgs.shibboleth-sp}/etc/shibboleth/shibboleth2.xml"'';
         description = lib.mdDoc "Path to shibboleth config file";
       };
 
-      fastcgi.enable = mkOption {
-        type = types.bool;
+      fastcgi.enable = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc "Whether to include the shibauthorizer and shibresponder FastCGI processes";
       };
 
-      fastcgi.shibAuthorizerPort = mkOption {
-        type = types.int;
+      fastcgi.shibAuthorizerPort = lib.mkOption {
+        type = lib.types.int;
         default = 9100;
         description = lib.mdDoc "Port for shibauthorizer FastCGI process to bind to";
       };
 
-      fastcgi.shibResponderPort = mkOption {
-        type = types.int;
+      fastcgi.shibResponderPort = lib.mkOption {
+        type = lib.types.int;
         default = 9101;
         description = lib.mdDoc "Port for shibauthorizer FastCGI process to bind to";
       };
     };
   };
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     systemd.services.shibboleth-sp = {
       description = "Provides SSO and federation for web applications";
       after       = lib.optionals cfg.fastcgi.enable [ "shibresponder.service" "shibauthorizer.service" ];
@@ -48,7 +47,7 @@ in {
       };
     };
 
-    systemd.services.shibresponder = mkIf cfg.fastcgi.enable {
+    systemd.services.shibresponder = lib.mkIf cfg.fastcgi.enable {
       description = "Provides SSO through Shibboleth via FastCGI";
       after       = [ "network.target" ];
       wantedBy    = [ "multi-user.target" ];
@@ -59,7 +58,7 @@ in {
       };
     };
 
-    systemd.services.shibauthorizer = mkIf cfg.fastcgi.enable {
+    systemd.services.shibauthorizer = lib.mkIf cfg.fastcgi.enable {
       description = "Provides SSO through Shibboleth via FastCGI";
       after       = [ "network.target" ];
       wantedBy    = [ "multi-user.target" ];
@@ -71,5 +70,5 @@ in {
     };
   };
 
-  meta.maintainers = with lib.maintainers; [ jammerful ];
+  meta.maintainers = with lib.maintainers; [ ];
 }
diff --git a/nixos/modules/services/security/tor.nix b/nixos/modules/services/security/tor.nix
index 4ff941251c99b..dea20dec1ab47 100644
--- a/nixos/modules/services/security/tor.nix
+++ b/nixos/modules/services/security/tor.nix
@@ -854,7 +854,7 @@ in
           BridgeRelay = true;
           ExtORPort.port = mkDefault "auto";
           ServerTransportPlugin.transports = mkDefault ["obfs4"];
-          ServerTransportPlugin.exec = mkDefault "${pkgs.obfs4}/bin/obfs4proxy managed";
+          ServerTransportPlugin.exec = mkDefault "${lib.getExe pkgs.obfs4} managed";
         } // optionalAttrs (cfg.relay.role == "private-bridge") {
           ExtraInfoStatistics = false;
           PublishServerDescriptor = false;
diff --git a/nixos/modules/services/system/cachix-watch-store.nix b/nixos/modules/services/system/cachix-watch-store.nix
index 992a59cbc075b..8aa5f0358fa97 100644
--- a/nixos/modules/services/system/cachix-watch-store.nix
+++ b/nixos/modules/services/system/cachix-watch-store.nix
@@ -23,6 +23,14 @@ in
       '';
     };
 
+    signingKeyFile = mkOption {
+      type = types.nullOr types.path;
+      description = lib.mdDoc ''
+        Optional file containing a self-managed signing key to sign uploaded store paths.
+      '';
+      default = null;
+    };
+
     compressionLevel = mkOption {
       type = types.nullOr types.int;
       description = lib.mdDoc "The compression level for ZSTD compression (between 0 and 16)";
@@ -69,7 +77,8 @@ in
         DynamicUser = true;
         LoadCredential = [
           "cachix-token:${toString cfg.cachixTokenFile}"
-        ];
+        ]
+        ++ lib.optional (cfg.signingKeyFile != null) "signing-key:${toString cfg.signingKeyFile}";
       };
       script =
         let
@@ -80,6 +89,7 @@ in
         in
         ''
           export CACHIX_AUTH_TOKEN="$(<"$CREDENTIALS_DIRECTORY/cachix-token")"
+          ${lib.optionalString (cfg.signingKeyFile != null) ''export CACHIX_SIGNING_KEY="$(<"$CREDENTIALS_DIRECTORY/signing-key")"''}
           ${lib.escapeShellArgs command}
         '';
     };
diff --git a/nixos/modules/services/system/dbus.nix b/nixos/modules/services/system/dbus.nix
index 8d5b25e617625..b47ebc92f93a8 100644
--- a/nixos/modules/services/system/dbus.nix
+++ b/nixos/modules/services/system/dbus.nix
@@ -184,6 +184,11 @@ in
         aliases = [
           "dbus.service"
         ];
+        unitConfig = {
+          # We get errors when reloading the dbus-broker service
+          # if /tmp got remounted after this service started
+          RequiresMountsFor = [ "/tmp" ];
+        };
         # Don't restart dbus. Bad things tend to happen if we do.
         reloadIfChanged = true;
         restartTriggers = [
diff --git a/nixos/modules/services/system/zram-generator.nix b/nixos/modules/services/system/zram-generator.nix
index 10b9992375cc1..429531e5743d8 100644
--- a/nixos/modules/services/system/zram-generator.nix
+++ b/nixos/modules/services/system/zram-generator.nix
@@ -27,7 +27,7 @@ in
 
   config = lib.mkIf cfg.enable {
     system.requiredKernelConfig = with config.lib.kernelConfig; [
-      (isModule "ZRAM")
+      (isEnabled "ZRAM")
     ];
 
     systemd.packages = [ cfg.package ];
diff --git a/nixos/modules/services/torrent/transmission.nix b/nixos/modules/services/torrent/transmission.nix
index 0cd24fb03a7bd..5dd02eb331633 100644
--- a/nixos/modules/services/torrent/transmission.nix
+++ b/nixos/modules/services/torrent/transmission.nix
@@ -251,6 +251,20 @@ in
           For instance, SSH sessions may time out more easily.
         '';
       };
+
+      webHome = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "pkgs.flood-for-transmission";
+        description = lib.mdDoc ''
+          If not `null`, sets the value of the `TRANSMISSION_WEB_HOME`
+          environment variable used by the service. Useful for overriding
+          the web interface files, without overriding the transmission
+          package and thus requiring rebuilding it locally. Use this if
+          you want to use an alternative web interface, such as
+          `pkgs.flood-for-transmission`.
+        '';
+      };
     };
   };
 
@@ -280,6 +294,7 @@ in
       requires = optional apparmor.enable "apparmor.service";
       wantedBy = [ "multi-user.target" ];
       environment.CURL_CA_BUNDLE = etc."ssl/certs/ca-certificates.crt".source;
+      environment.TRANSMISSION_WEB_HOME = lib.mkIf (cfg.webHome != null) cfg.webHome;
 
       serviceConfig = {
         # Use "+" because credentialsFile may not be accessible to User= or Group=.
@@ -314,6 +329,9 @@ in
         BindPaths =
           [ "${cfg.home}/${settingsDir}"
             cfg.settings.download-dir
+            # Transmission may need to read in the host's /run (eg. /run/systemd/resolve)
+            # or write in its private /run (eg. /run/host).
+            "/run"
           ] ++
           optional cfg.settings.incomplete-dir-enabled
             cfg.settings.incomplete-dir ++
@@ -324,7 +342,6 @@ in
           # an AppArmor profile is provided to get a confinement based upon paths and rights.
           builtins.storeDir
           "/etc"
-          "/run"
           ] ++
           optional (cfg.settings.script-torrent-done-enabled &&
                     cfg.settings.script-torrent-done-filename != null)
@@ -333,10 +350,10 @@ in
             cfg.settings.watch-dir;
         StateDirectory = [
           "transmission"
-          "transmission/.config/transmission-daemon"
-          "transmission/.incomplete"
-          "transmission/Downloads"
-          "transmission/watch-dir"
+          "transmission/${settingsDir}"
+          "transmission/${incompleteDir}"
+          "transmission/${downloadsDir}"
+          "transmission/${watchDir}"
         ];
         StateDirectoryMode = mkDefault 750;
         # The following options are only for optimizing:
@@ -349,10 +366,10 @@ in
         MemoryDenyWriteExecute = true;
         NoNewPrivileges = true;
         PrivateDevices = true;
-        PrivateMounts = true;
+        PrivateMounts = mkDefault true;
         PrivateNetwork = mkDefault false;
         PrivateTmp = true;
-        PrivateUsers = true;
+        PrivateUsers = mkDefault true;
         ProtectClock = true;
         ProtectControlGroups = true;
         # ProtectHome=true would not allow BindPaths= to work across /home,
@@ -434,7 +451,7 @@ in
       # at least up to the values hardcoded here:
       (mkIf cfg.settings.utp-enabled {
         "net.core.rmem_max" = mkDefault 4194304; # 4MB
-        "net.core.wmem_max" = mkDefault "1048576"; # 1MB
+        "net.core.wmem_max" = mkDefault 1048576; # 1MB
       })
       (mkIf cfg.performanceNetParameters {
         # Increase the number of available source (local) TCP and UDP ports to 49151.
@@ -490,6 +507,10 @@ in
         # https://gitlab.com/apparmor/apparmor/-/wikis/AppArmorStacking#seccomp-and-no_new_privs
         px ${cfg.settings.script-torrent-done-filename} -> &@{dirs},
       ''}
+
+      ${optionalString (cfg.webHome != null) ''
+        r ${cfg.webHome}/**,
+      ''}
     '';
   };
 
diff --git a/nixos/modules/services/video/frigate.nix b/nixos/modules/services/video/frigate.nix
index 146e968780c38..b7945282ba09b 100644
--- a/nixos/modules/services/video/frigate.nix
+++ b/nixos/modules/services/video/frigate.nix
@@ -353,6 +353,7 @@ in
       ];
       serviceConfig = {
         ExecStart = "${cfg.package.python.interpreter} -m frigate";
+        Restart = "on-failure";
 
         User = "frigate";
         Group = "frigate";
diff --git a/nixos/modules/services/web-apps/code-server.nix b/nixos/modules/services/web-apps/code-server.nix
new file mode 100644
index 0000000000000..11601f6c30449
--- /dev/null
+++ b/nixos/modules/services/web-apps/code-server.nix
@@ -0,0 +1,259 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.code-server;
+  defaultUser = "code-server";
+  defaultGroup = defaultUser;
+in {
+  options = {
+    services.code-server = {
+      enable = lib.mkEnableOption (lib.mdDoc "code-server");
+
+      package = lib.mkPackageOptionMD pkgs "code-server" {
+        example = ''
+          pkgs.vscode-with-extensions.override {
+            vscode = pkgs.code-server;
+            vscodeExtensions = with pkgs.vscode-extensions; [
+              bbenoist.nix
+              dracula-theme.theme-dracula
+            ];
+          }
+        '';
+      };
+
+      extraPackages = lib.mkOption {
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional packages to add to the code-server {env}`PATH`.
+        '';
+        example = lib.literalExpression "[ pkgs.go ]";
+        type = lib.types.listOf lib.types.package;
+      };
+
+      extraEnvironment = lib.mkOption {
+        type = lib.types.attrsOf lib.types.str;
+        description = lib.mdDoc ''
+          Additional environment variables to pass to code-server.
+        '';
+        default = { };
+        example = { PKG_CONFIG_PATH = "/run/current-system/sw/lib/pkgconfig"; };
+      };
+
+      extraArguments = lib.mkOption {
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional arguments to pass to code-server.
+        '';
+        example = lib.literalExpression ''[ "--log=info" ]'';
+        type = lib.types.listOf lib.types.str;
+      };
+
+      host = lib.mkOption {
+        default = "localhost";
+        description = lib.mdDoc ''
+          The host name or IP address the server should listen to.
+        '';
+        type = lib.types.str;
+      };
+
+      port = lib.mkOption {
+        default = 4444;
+        description = lib.mdDoc ''
+          The port the server should listen to.
+        '';
+        type = lib.types.port;
+      };
+
+      auth = lib.mkOption {
+        default = "password";
+        description = lib.mdDoc ''
+          The type of authentication to use.
+        '';
+        type = lib.types.enum [ "none" "password" ];
+      };
+
+      hashedPassword = lib.mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Create the password with: `echo -n 'thisismypassword' | npx argon2-cli -e`.
+        '';
+        type = lib.types.str;
+      };
+
+      user = lib.mkOption {
+        default = defaultUser;
+        example = "yourUser";
+        description = lib.mdDoc ''
+          The user to run code-server as.
+          By default, a user named `${defaultUser}` will be created.
+        '';
+        type = lib.types.str;
+      };
+
+      group = lib.mkOption {
+        default = defaultGroup;
+        example = "yourGroup";
+        description = lib.mdDoc ''
+          The group to run code-server under.
+          By default, a group named `${defaultGroup}` will be created.
+        '';
+        type = lib.types.str;
+      };
+
+      extraGroups = lib.mkOption {
+        default = [ ];
+        description = lib.mdDoc ''
+          An array of additional groups for the `${defaultUser}` user.
+        '';
+        example = [ "docker" ];
+        type = lib.types.listOf lib.types.str;
+      };
+
+      socket = lib.mkOption {
+        default = null;
+        example = "/run/code-server/socket";
+        description = lib.mdDoc ''
+          Path to a socket (bind-addr will be ignored).
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      socketMode = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+           File mode of the socket.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      userDataDir = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          Path to the user data directory.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      extensionsDir = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          Path to the extensions directory.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      proxyDomain = lib.mkOption {
+        default = null;
+        example = "code-server.lan";
+        description = lib.mdDoc ''
+          Domain used for proxying ports.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      disableTelemetry = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable telemetry.
+        '';
+        type = lib.types.bool;
+      };
+
+      disableUpdateCheck = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable update check.
+          Without this flag, code-server checks every 6 hours against the latest github release and
+          then notifies you once every week that a new release is available.
+        '';
+        type = lib.types.bool;
+      };
+
+      disableFileDownloads = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable file downloads from Code.
+        '';
+        type = lib.types.bool;
+      };
+
+      disableWorkspaceTrust = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable Workspace Trust feature.
+        '';
+        type = lib.types.bool;
+      };
+
+      disableGettingStartedOverride = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable the coder/coder override in the Help: Getting Started page.
+        '';
+        type = lib.types.bool;
+      };
+
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.code-server = {
+      description = "Code server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      path = cfg.extraPackages;
+      environment = {
+        HASHED_PASSWORD = cfg.hashedPassword;
+      } // cfg.extraEnvironment;
+      serviceConfig = {
+        ExecStart = ''
+          ${lib.getExe cfg.package} \
+            --auth=${cfg.auth} \
+            --bind-addr=${cfg.host}:${toString cfg.port} \
+          '' + lib.optionalString (cfg.socket != null) ''
+            --socket=${cfg.socket} \
+          '' + lib.optionalString (cfg.userDataDir != null) ''
+            --user-data-dir=${cfg.userDataDir} \
+          '' + lib.optionalString (cfg.extensionsDir != null) ''
+            --extensions-dir=${cfg.extensionsDir} \
+          '' + lib.optionalString (cfg.disableTelemetry == true) ''
+            --disable-telemetry \
+          '' + lib.optionalString (cfg.disableUpdateCheck == true) ''
+            --disable-update-check \
+          '' + lib.optionalString (cfg.disableFileDownloads == true) ''
+            --disable-file-downloads \
+          '' + lib.optionalString (cfg.disableWorkspaceTrust == true) ''
+            --disable-workspace-trust \
+          '' + lib.optionalString (cfg.disableGettingStartedOverride == true) ''
+            --disable-getting-started-override \
+          '' + lib.escapeShellArgs cfg.extraArguments;
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        RuntimeDirectory = cfg.user;
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "on-failure";
+      };
+    };
+
+    users.users."${cfg.user}" = lib.mkMerge [
+      (lib.mkIf (cfg.user == defaultUser) {
+        isNormalUser = true;
+        description = "code-server user";
+        inherit (cfg) group;
+      })
+      {
+        packages = cfg.extraPackages;
+        inherit (cfg) extraGroups;
+      }
+    ];
+
+    users.groups."${defaultGroup}" = lib.mkIf (cfg.group == defaultGroup) { };
+  };
+
+  meta.maintainers = [ lib.maintainers.stackshadow ];
+}
diff --git a/nixos/modules/services/web-apps/freshrss.nix b/nixos/modules/services/web-apps/freshrss.nix
index 9683730bbe1f8..c8399143c37ba 100644
--- a/nixos/modules/services/web-apps/freshrss.nix
+++ b/nixos/modules/services/web-apps/freshrss.nix
@@ -294,7 +294,6 @@ in
       systemd.services.freshrss-updater = {
         description = "FreshRSS feed updater";
         after = [ "freshrss-config.service" ];
-        wantedBy = [ "multi-user.target" ];
         startAt = "*:0/5";
         environment = {
           DATA_PATH = cfg.dataDir;
diff --git a/nixos/modules/services/web-apps/invidious.nix b/nixos/modules/services/web-apps/invidious.nix
index cfba3c8a29708..359aaabfe673a 100644
--- a/nixos/modules/services/web-apps/invidious.nix
+++ b/nixos/modules/services/web-apps/invidious.nix
@@ -10,82 +10,115 @@ let
   generatedHmacKeyFile = "/var/lib/invidious/hmac_key";
   generateHmac = cfg.hmacKeyFile == null;
 
-  serviceConfig = {
-    systemd.services.invidious = {
-      description = "Invidious (An alternative YouTube front-end)";
-      wants = [ "network-online.target" ];
-      after = [ "network-online.target" ];
-      wantedBy = [ "multi-user.target" ];
-
-      preStart = lib.optionalString generateHmac ''
-        if [[ ! -e "${generatedHmacKeyFile}" ]]; then
-          ${pkgs.pwgen}/bin/pwgen 20 1 > "${generatedHmacKeyFile}"
-          chmod 0600 "${generatedHmacKeyFile}"
-        fi
-      '';
-
-      script = ''
-        configParts=()
-      ''
-      # autogenerated hmac_key
-      + lib.optionalString generateHmac ''
-        configParts+=("$(${pkgs.jq}/bin/jq -R '{"hmac_key":.}' <"${generatedHmacKeyFile}")")
-      ''
-      # generated settings file
-      + ''
-        configParts+=("$(< ${lib.escapeShellArg settingsFile})")
-      ''
-      # optional database password file
-      + lib.optionalString (cfg.database.host != null) ''
-        configParts+=("$(${pkgs.jq}/bin/jq -R '{"db":{"password":.}}' ${lib.escapeShellArg cfg.database.passwordFile})")
-      ''
-      # optional extra settings file
-      + lib.optionalString (cfg.extraSettingsFile != null) ''
-        configParts+=("$(< ${lib.escapeShellArg cfg.extraSettingsFile})")
-      ''
-      # explicitly specified hmac key file
-      + lib.optionalString (cfg.hmacKeyFile != null) ''
-        configParts+=("$(< ${lib.escapeShellArg cfg.hmacKeyFile})")
-      ''
-      # merge all parts into a single configuration with later elements overriding previous elements
-      + ''
-        export INVIDIOUS_CONFIG="$(${pkgs.jq}/bin/jq -s 'reduce .[] as $item ({}; . * $item)' <<<"''${configParts[*]}")"
-        exec ${cfg.package}/bin/invidious
-      '';
-
-      serviceConfig = {
-        RestartSec = "2s";
-        DynamicUser = true;
-        StateDirectory = "invidious";
-        StateDirectoryMode = "0750";
-
-        CapabilityBoundingSet = "";
-        PrivateDevices = true;
-        PrivateUsers = true;
-        ProtectHome = true;
-        ProtectKernelLogs = true;
-        ProtectProc = "invisible";
-        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
-        RestrictNamespaces = true;
-        SystemCallArchitectures = "native";
-        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
-
-        # Because of various issues Invidious must be restarted often, at least once a day, ideally
-        # every hour.
-        # This option enables the automatic restarting of the Invidious instance.
-        Restart = lib.mkDefault "always";
-        RuntimeMaxSec = lib.mkDefault "1h";
-      };
+  commonInvidousServiceConfig = {
+    description = "Invidious (An alternative YouTube front-end)";
+    wants = [ "network-online.target" ];
+    after = [ "network-online.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+    requires = lib.optional cfg.database.createLocally "postgresql.service";
+    wantedBy = [ "multi-user.target" ];
+
+    serviceConfig = {
+      RestartSec = "2s";
+      DynamicUser = true;
+      User = lib.mkIf (cfg.database.createLocally || cfg.serviceScale > 1) "invidious";
+      StateDirectory = "invidious";
+      StateDirectoryMode = "0750";
+
+      CapabilityBoundingSet = "";
+      PrivateDevices = true;
+      PrivateUsers = true;
+      ProtectHome = true;
+      ProtectKernelLogs = true;
+      ProtectProc = "invisible";
+      RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+      RestrictNamespaces = true;
+      SystemCallArchitectures = "native";
+      SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+
+      # Because of various issues Invidious must be restarted often, at least once a day, ideally
+      # every hour.
+      # This option enables the automatic restarting of the Invidious instance.
+      # To ensure multiple instances of Invidious are not restarted at the exact same time, a
+      # randomized extra offset of up to 5 minutes is added.
+      Restart = lib.mkDefault "always";
+      RuntimeMaxSec = lib.mkDefault "1h";
+      RuntimeRandomizedExtraSec = lib.mkDefault "5min";
     };
+  };
+  mkInvidiousService = scaleIndex:
+    lib.foldl' lib.recursiveUpdate commonInvidousServiceConfig [
+      # only generate the hmac file in the first service
+      (lib.optionalAttrs (scaleIndex == 0) {
+        preStart = lib.optionalString generateHmac ''
+          if [[ ! -e "${generatedHmacKeyFile}" ]]; then
+            ${pkgs.pwgen}/bin/pwgen 20 1 > "${generatedHmacKeyFile}"
+            chmod 0600 "${generatedHmacKeyFile}"
+          fi
+        '';
+      })
+      # configure the secondary services to run after the first service
+      (lib.optionalAttrs (scaleIndex > 0) {
+        after = commonInvidousServiceConfig.after ++ [ "invidious.service" ];
+        wants = commonInvidousServiceConfig.wants ++ [ "invidious.service" ];
+      })
+      {
+        script = ''
+          configParts=()
+        ''
+        # autogenerated hmac_key
+        + lib.optionalString generateHmac ''
+          configParts+=("$(${pkgs.jq}/bin/jq -R '{"hmac_key":.}' <"${generatedHmacKeyFile}")")
+        ''
+        # generated settings file
+        + ''
+          configParts+=("$(< ${lib.escapeShellArg settingsFile})")
+        ''
+        # optional database password file
+        + lib.optionalString (cfg.database.host != null) ''
+          configParts+=("$(${pkgs.jq}/bin/jq -R '{"db":{"password":.}}' ${lib.escapeShellArg cfg.database.passwordFile})")
+        ''
+        # optional extra settings file
+        + lib.optionalString (cfg.extraSettingsFile != null) ''
+          configParts+=("$(< ${lib.escapeShellArg cfg.extraSettingsFile})")
+        ''
+        # explicitly specified hmac key file
+        + lib.optionalString (cfg.hmacKeyFile != null) ''
+          configParts+=("$(< ${lib.escapeShellArg cfg.hmacKeyFile})")
+        ''
+        # configure threads for secondary instances
+        + lib.optionalString (scaleIndex > 0) ''
+          configParts+=('{"channel_threads":0, "feed_threads":0}')
+        ''
+        # configure different ports for the instances
+        + ''
+          configParts+=('{"port":${toString (cfg.port + scaleIndex)}}')
+        ''
+        # merge all parts into a single configuration with later elements overriding previous elements
+        + ''
+          export INVIDIOUS_CONFIG="$(${pkgs.jq}/bin/jq -s 'reduce .[] as $item ({}; . * $item)' <<<"''${configParts[*]}")"
+          exec ${cfg.package}/bin/invidious
+        '';
+      }
+    ];
 
-    services.invidious.settings = {
-      inherit (cfg) port;
+  serviceConfig = {
+    systemd.services = builtins.listToAttrs (builtins.genList
+      (scaleIndex: {
+        name = "invidious" + lib.optionalString (scaleIndex > 0) "-${builtins.toString scaleIndex}";
+        value = mkInvidiousService scaleIndex;
+      })
+      cfg.serviceScale);
 
+    services.invidious.settings = {
       # Automatically initialises and migrates the database if necessary
       check_tables = true;
 
       db = {
-        user = lib.mkDefault "kemal";
+        user = lib.mkDefault (
+          if (lib.versionAtLeast config.system.stateVersion "24.05")
+          then "invidious"
+          else "kemal"
+        );
         dbname = lib.mkDefault "invidious";
         port = cfg.database.port;
         # Blank for unix sockets, see
@@ -94,67 +127,74 @@ let
         # Not needed because peer authentication is enabled
         password = lib.mkIf (cfg.database.host == null) "";
       };
+
+      host_binding = cfg.address;
     } // (lib.optionalAttrs (cfg.domain != null) {
       inherit (cfg) domain;
     });
 
-    assertions = [{
-      assertion = cfg.database.host != null -> cfg.database.passwordFile != null;
-      message = "If database host isn't null, database password needs to be set";
-    }];
+    assertions = [
+      {
+        assertion = cfg.database.host != null -> cfg.database.passwordFile != null;
+        message = "If database host isn't null, database password needs to be set";
+      }
+      {
+        assertion = cfg.serviceScale >= 1;
+        message = "Service can't be scaled below one instance";
+      }
+    ];
   };
 
   # Settings necessary for running with an automatically managed local database
   localDatabaseConfig = lib.mkIf cfg.database.createLocally {
+    assertions = [
+      {
+        assertion = cfg.settings.db.user == cfg.settings.db.dbname;
+        message = ''
+          For local automatic database provisioning (services.invidious.database.createLocally == true)
+          to  work, the username used to connect to PostgreSQL must match the database name, that is
+          services.invidious.settings.db.user must match services.invidious.settings.db.dbname.
+          This is the default since NixOS 24.05. For older systems, it is normally safe to manually set
+          the user to "invidious" as the new user will be created with permissions
+          for the existing database. `REASSIGN OWNED BY kemal TO invidious;` may also be needed, it can be
+          run as `sudo -u postgres env psql --user=postgres --dbname=invidious -c 'reassign OWNED BY kemal to invidious;'`.
+        '';
+      }
+    ];
     # Default to using the local database if we create it
     services.invidious.database.host = lib.mkDefault null;
 
-
-    # TODO(raitobezarius to maintainers of invidious): I strongly advise to clean up the kemal specific
-    # thing for 24.05 and use `ensureDBOwnership`.
-    # See https://github.com/NixOS/nixpkgs/issues/216989
-    systemd.services.postgresql.postStart = lib.mkAfter ''
-      $PSQL -tAc 'ALTER DATABASE "${cfg.settings.db.dbname}" OWNER TO "${cfg.settings.db.user}";'
-    '';
     services.postgresql = {
       enable = true;
-      ensureUsers = lib.singleton { name = cfg.settings.db.user; ensureDBOwnership = false; };
+      ensureUsers = lib.singleton { name = cfg.settings.db.user; ensureDBOwnership = true; };
       ensureDatabases = lib.singleton cfg.settings.db.dbname;
-      # This is only needed because the unix user invidious isn't the same as
-      # the database user. This tells postgres to map one to the other.
-      identMap = ''
-        invidious invidious ${cfg.settings.db.user}
-      '';
-      # And this specifically enables peer authentication for only this
-      # database, which allows passwordless authentication over the postgres
-      # unix socket for the user map given above.
-      authentication = ''
-        local ${cfg.settings.db.dbname} ${cfg.settings.db.user} peer map=invidious
-      '';
     };
+  };
+
+  ytproxyConfig = lib.mkIf cfg.http3-ytproxy.enable {
+    systemd.services.http3-ytproxy = {
+      description = "HTTP3 ytproxy for Invidious";
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
 
-    systemd.services.invidious-db-clean = {
-      description = "Invidious database cleanup";
-      documentation = [ "https://docs.invidious.io/Database-Information-and-Maintenance.md" ];
-      startAt = lib.mkDefault "weekly";
-      path = [ config.services.postgresql.package ];
-      after = [ "postgresql.service" ];
       script = ''
-        psql ${cfg.settings.db.dbname} ${cfg.settings.db.user} -c "DELETE FROM nonces * WHERE expire < current_timestamp"
-        psql ${cfg.settings.db.dbname} ${cfg.settings.db.user} -c "TRUNCATE TABLE videos"
+        mkdir -p socket
+        exec ${lib.getExe cfg.http3-ytproxy.package};
       '';
+
       serviceConfig = {
+        RestartSec = "2s";
         DynamicUser = true;
-        User = "invidious";
+        User = lib.mkIf cfg.nginx.enable config.services.nginx.user;
+        RuntimeDirectory = "http3-ytproxy";
+        WorkingDirectory = "/run/http3-ytproxy";
       };
     };
 
-    systemd.services.invidious = {
-      requires = [ "postgresql.service" ];
-      after = [ "postgresql.service" ];
-
-      serviceConfig = {
-        User = "invidious";
+    services.nginx.virtualHosts.${cfg.domain} = lib.mkIf cfg.nginx.enable {
+      locations."~ (^/videoplayback|^/vi/|^/ggpht/|^/sb/)" = {
+        proxyPass = "http://unix:/run/http3-ytproxy/socket/http-proxy.sock";
       };
     };
   };
@@ -165,14 +205,28 @@ let
       external_port = 80;
     };
 
-    services.nginx = {
+    services.nginx = let
+      ip = if cfg.address == "0.0.0.0" then "127.0.0.1" else cfg.address;
+    in
+    {
       enable = true;
       virtualHosts.${cfg.domain} = {
-        locations."/".proxyPass = "http://127.0.0.1:${toString cfg.port}";
+        locations."/".proxyPass =
+          if cfg.serviceScale == 1 then
+            "http://${ip}:${toString cfg.port}"
+          else "http://upstream-invidious";
 
         enableACME = lib.mkDefault true;
         forceSSL = lib.mkDefault true;
       };
+      upstreams = lib.mkIf (cfg.serviceScale > 1) {
+        "upstream-invidious".servers = builtins.listToAttrs (builtins.genList
+          (scaleIndex: {
+            name = "${ip}:${toString (cfg.port + scaleIndex)}";
+            value = { };
+          })
+          cfg.serviceScale);
+      };
     };
 
     assertions = [{
@@ -220,6 +274,20 @@ in
       '';
     };
 
+    serviceScale = lib.mkOption {
+      type = types.int;
+      default = 1;
+      description = lib.mdDoc ''
+        How many invidious instances to run.
+
+        See https://docs.invidious.io/improve-public-instance/#2-multiple-invidious-processes for more details
+        on how this is intended to work. All instances beyond the first one have the options `channel_threads`
+        and `feed_threads` set to 0 to avoid conflicts with multiple instances refreshing subscriptions. Instances
+        will be configured to bind to consecutive ports starting with {option}`services.invidious.port` for the
+        first instance.
+      '';
+    };
+
     # This needs to be outside of settings to avoid infinite recursion
     # (determining if nginx should be enabled and therefore the settings
     # modified).
@@ -233,6 +301,16 @@ in
       '';
     };
 
+    address = lib.mkOption {
+      type = types.str;
+      # default from https://github.com/iv-org/invidious/blob/master/config/config.example.yml
+      default = if cfg.nginx.enable then "127.0.0.1" else "0.0.0.0";
+      defaultText = lib.literalExpression ''if config.services.invidious.nginx.enable then "127.0.0.1" else "0.0.0.0"'';
+      description = lib.mdDoc ''
+        The IP address Invidious should bind to.
+      '';
+    };
+
     port = lib.mkOption {
       type = types.port;
       # Default from https://docs.invidious.io/Configuration.md
@@ -298,11 +376,28 @@ in
         which can also be used to disable AMCE and TLS.
       '';
     };
+
+    http3-ytproxy = {
+      enable = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable http3-ytproxy for faster loading of images and video playback.
+
+          If {option}`services.invidious.nginx.enable` is used, nginx will be configured automatically. If not, you
+          need to configure a reverse proxy yourself according to
+          https://docs.invidious.io/improve-public-instance/#3-speed-up-video-playback-with-http3-ytproxy.
+        '';
+      };
+
+      package = lib.mkPackageOptionMD pkgs "http3-ytproxy" { };
+    };
   };
 
   config = lib.mkIf cfg.enable (lib.mkMerge [
     serviceConfig
     localDatabaseConfig
     nginxConfig
+    ytproxyConfig
   ]);
 }
diff --git a/nixos/modules/services/web-apps/jitsi-meet.nix b/nixos/modules/services/web-apps/jitsi-meet.nix
index 0c0eb66e65b7c..c4505534d635e 100644
--- a/nixos/modules/services/web-apps/jitsi-meet.nix
+++ b/nixos/modules/services/web-apps/jitsi-meet.nix
@@ -35,6 +35,7 @@ let
       domain = cfg.hostName;
       muc = "conference.${cfg.hostName}";
       focus = "focus.${cfg.hostName}";
+      jigasi = "jigasi.${cfg.hostName}";
     };
     bosh = "//${cfg.hostName}/http-bind";
     websocket = "wss://${cfg.hostName}/xmpp-websocket";
@@ -145,6 +146,16 @@ in
       '';
     };
 
+    jigasi.enable = mkOption {
+      type = bool;
+      default = false;
+      description = ''
+        Whether to enable jigasi instance and configure it to connect to Prosody.
+
+        Additional configuration is possible with <option>services.jigasi</option>.
+      '';
+    };
+
     nginx.enable = mkOption {
       type = bool;
       default = true;
@@ -224,7 +235,7 @@ in
           roomDefaultPublicJids = true;
           extraConfig = ''
             storage = "memory"
-            admins = { "focus@auth.${cfg.hostName}", "jvb@auth.${cfg.hostName}" }
+            admins = { "focus@auth.${cfg.hostName}", "jvb@auth.${cfg.hostName}", "jigasi@auth.${cfg.hostName}" }
           '';
           #-- muc_room_cache_size = 1000
         }
@@ -263,6 +274,9 @@ in
           Component "focus.${cfg.hostName}" "client_proxy"
             target_address = "focus@auth.${cfg.hostName}"
 
+          Component "jigasi.${cfg.hostName}" "client_proxy"
+            target_address = "jigasi@auth.${cfg.hostName}"
+
           Component "speakerstats.${cfg.hostName}" "speakerstats_component"
             muc_component = "conference.${cfg.hostName}"
 
@@ -356,7 +370,10 @@ in
         ${config.services.prosody.package}/bin/prosodyctl mod_roster_command subscribe focus.${cfg.hostName} focus@auth.${cfg.hostName}
         ${config.services.prosody.package}/bin/prosodyctl register jibri auth.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jibri-auth-secret)"
         ${config.services.prosody.package}/bin/prosodyctl register recorder recorder.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"
+      '' + optionalString cfg.jigasi.enable ''
+        ${config.services.prosody.package}/bin/prosodyctl register jigasi auth.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jigasi-user-secret)"
       '';
+
       serviceConfig = {
         EnvironmentFile = [ "/var/lib/jitsi-meet/secrets-env" ];
         SupplementaryGroups = [ "jitsi-meet" ];
@@ -371,13 +388,13 @@ in
 
     systemd.services.jitsi-meet-init-secrets = {
       wantedBy = [ "multi-user.target" ];
-      before = [ "jicofo.service" "jitsi-videobridge2.service" ] ++ (optional cfg.prosody.enable "prosody.service");
+      before = [ "jicofo.service" "jitsi-videobridge2.service" ] ++ (optional cfg.prosody.enable "prosody.service") ++ (optional cfg.jigasi.enable "jigasi.service");
       serviceConfig = {
         Type = "oneshot";
       };
 
       script = let
-        secrets = [ "jicofo-component-secret" "jicofo-user-secret" "jibri-auth-secret" "jibri-recorder-secret" ] ++ (optional (cfg.videobridge.passwordFile == null) "videobridge-secret");
+        secrets = [ "jicofo-component-secret" "jicofo-user-secret" "jibri-auth-secret" "jibri-recorder-secret" ] ++ (optionals cfg.jigasi.enable [ "jigasi-user-secret" "jigasi-component-secret" ]) ++ (optional (cfg.videobridge.passwordFile == null) "videobridge-secret");
       in
       ''
         cd /var/lib/jitsi-meet
@@ -391,6 +408,7 @@ in
 
         # for easy access in prosody
         echo "JICOFO_COMPONENT_SECRET=$(cat jicofo-component-secret)" > secrets-env
+        echo "JIGASI_COMPONENT_SECRET=$(cat jigasi-component-secret)" >> secrets-env
         chown root:jitsi-meet secrets-env
         chmod 640 secrets-env
       ''
@@ -592,6 +610,20 @@ in
         stripFromRoomDomain = "conference.";
       };
     };
+
+    services.jigasi = mkIf cfg.jigasi.enable {
+      enable = true;
+      xmppHost = "localhost";
+      xmppDomain = cfg.hostName;
+      userDomain = "auth.${cfg.hostName}";
+      userName = "jigasi";
+      userPasswordFile = "/var/lib/jitsi-meet/jigasi-user-secret";
+      componentPasswordFile = "/var/lib/jitsi-meet/jigasi-component-secret";
+      bridgeMuc = "jigasibrewery@internal.${cfg.hostName}";
+      config = {
+        "org.jitsi.jigasi.ALWAYS_TRUST_MODE_ENABLED" = "true";
+      };
+    };
   };
 
   meta.doc = ./jitsi-meet.md;
diff --git a/nixos/modules/services/web-apps/keycloak.nix b/nixos/modules/services/web-apps/keycloak.nix
index 5d44bdee64a73..6d2948913b194 100644
--- a/nixos/modules/services/web-apps/keycloak.nix
+++ b/nixos/modules/services/web-apps/keycloak.nix
@@ -25,7 +25,6 @@ let
     maintainers
     catAttrs
     collect
-    splitString
     hasPrefix
     ;
 
@@ -329,7 +328,8 @@ in
             };
 
             hostname = mkOption {
-              type = str;
+              type = nullOr str;
+              default = null;
               example = "keycloak.example.com";
               description = lib.mdDoc ''
                 The hostname part of the public URL used as base for
@@ -451,7 +451,7 @@ in
 
       keycloakConfig = lib.generators.toKeyValue {
         mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" {
-          mkValueString = v: with builtins;
+          mkValueString = v:
             if isInt v then toString v
             else if isString v then v
             else if true == v then "true"
@@ -480,6 +480,14 @@ in
             assertion = createLocalPostgreSQL -> config.services.postgresql.settings.standard_conforming_strings or true;
             message = "Setting up a local PostgreSQL db for Keycloak requires `standard_conforming_strings` turned on to work reliably";
           }
+          {
+            assertion = cfg.settings.hostname != null || cfg.settings.hostname-url or null != null;
+            message = "Setting the Keycloak hostname is required, see `services.keycloak.settings.hostname`";
+          }
+          {
+            assertion = !(cfg.settings.hostname != null && cfg.settings.hostname-url or null != null);
+            message = "`services.keycloak.settings.hostname` and `services.keycloak.settings.hostname-url` are mutually exclusive";
+          }
         ];
 
         environment.systemPackages = [ keycloakBuild ];
diff --git a/nixos/modules/services/web-apps/mastodon.nix b/nixos/modules/services/web-apps/mastodon.nix
index 7b00ce35eb1a7..538e728fcc72f 100644
--- a/nixos/modules/services/web-apps/mastodon.nix
+++ b/nixos/modules/services/web-apps/mastodon.nix
@@ -136,7 +136,7 @@ let
         # System Call Filtering
         SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
       } // cfgService;
-      path = with pkgs; [ file imagemagick ffmpeg ];
+      path = with pkgs; [ ffmpeg-headless file imagemagick ];
     })
   ) cfg.sidekiqProcesses;
 
@@ -711,31 +711,28 @@ in {
     systemd.services.mastodon-init-db = lib.mkIf cfg.automaticMigrations {
       script = lib.optionalString (!databaseActuallyCreateLocally) ''
         umask 077
-
-        export PGPASSFILE
-        PGPASSFILE=$(mktemp)
-        cat > $PGPASSFILE <<EOF
-        ${cfg.database.host}:${toString cfg.database.port}:${cfg.database.name}:${cfg.database.user}:$(cat ${cfg.database.passwordFile})
-        EOF
-
+        export PGPASSWORD="$(cat '${cfg.database.passwordFile}')"
       '' + ''
-        if [ `psql ${cfg.database.name} -c \
+        if [ `psql -c \
                 "select count(*) from pg_class c \
                 join pg_namespace s on s.oid = c.relnamespace \
                 where s.nspname not in ('pg_catalog', 'pg_toast', 'information_schema') \
                 and s.nspname not like 'pg_temp%';" | sed -n 3p` -eq 0 ]; then
+          echo "Seeding database"
           SAFETY_ASSURED=1 rails db:schema:load
           rails db:seed
         else
+          echo "Migrating database (this might be a noop)"
           rails db:migrate
         fi
       '' +  lib.optionalString (!databaseActuallyCreateLocally) ''
-        rm $PGPASSFILE
-        unset PGPASSFILE
+        unset PGPASSWORD
       '';
       path = [ cfg.package pkgs.postgresql ];
       environment = env // lib.optionalAttrs (!databaseActuallyCreateLocally) {
         PGHOST = cfg.database.host;
+        PGPORT = toString cfg.database.port;
+        PGDATABASE = cfg.database.name;
         PGUSER = cfg.database.user;
       };
       serviceConfig = {
@@ -776,7 +773,7 @@ in {
         # System Call Filtering
         SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
       } // cfgService;
-      path = with pkgs; [ file imagemagick ffmpeg ];
+      path = with pkgs; [ ffmpeg-headless file imagemagick ];
     };
 
     systemd.services.mastodon-media-auto-remove = lib.mkIf cfg.mediaAutoRemove.enable {
diff --git a/nixos/modules/services/web-apps/mobilizon.nix b/nixos/modules/services/web-apps/mobilizon.nix
index 0a530bff92325..bdb08f6131496 100644
--- a/nixos/modules/services/web-apps/mobilizon.nix
+++ b/nixos/modules/services/web-apps/mobilizon.nix
@@ -384,7 +384,7 @@ in
           ensureDBOwnership = false;
         }
       ];
-      extraPlugins = with postgresql.pkgs; [ postgis ];
+      extraPlugins = ps: with ps; [ postgis ];
     };
 
     # Nginx config taken from support/nginx/mobilizon-release.conf
diff --git a/nixos/modules/services/web-apps/nextcloud.md b/nixos/modules/services/web-apps/nextcloud.md
index ecc7f380592ae..ce8f96a6a3896 100644
--- a/nixos/modules/services/web-apps/nextcloud.md
+++ b/nixos/modules/services/web-apps/nextcloud.md
@@ -5,7 +5,7 @@ self-hostable cloud platform. The server setup can be automated using
 [services.nextcloud](#opt-services.nextcloud.enable). A
 desktop client is packaged at `pkgs.nextcloud-client`.
 
-The current default by NixOS is `nextcloud27` which is also the latest
+The current default by NixOS is `nextcloud28` which is also the latest
 major version available.
 
 ## Basic usage {#module-services-nextcloud-basic-usage}
@@ -51,7 +51,7 @@ to ensure that changes can be applied by changing the module's options.
 In case the application serves multiple domains (those are checked with
 [`$_SERVER['HTTP_HOST']`](https://www.php.net/manual/en/reserved.variables.server.php))
 it's needed to add them to
-[`services.nextcloud.config.extraTrustedDomains`](#opt-services.nextcloud.config.extraTrustedDomains).
+[`services.nextcloud.extraOptions.trusted_domains`](#opt-services.nextcloud.extraOptions.trusted_domains).
 
 Auto updates for Nextcloud apps can be enabled using
 [`services.nextcloud.autoUpdateApps`](#opt-services.nextcloud.autoUpdateApps.enable).
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 6c50ea3c81ef7..39f4e8f11620c 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -9,6 +9,7 @@ let
   jsonFormat = pkgs.formats.json {};
 
   defaultPHPSettings = {
+    output_buffering = "0";
     short_open_tag = "Off";
     expose_php = "Off";
     error_reporting = "E_ALL & ~E_DEPRECATED & ~E_STRICT";
@@ -23,11 +24,49 @@ let
     catch_workers_output = "yes";
   };
 
+  appStores = {
+    # default apps bundled with pkgs.nextcloudXX, e.g. files, contacts
+    apps = {
+      enabled = true;
+      writable = false;
+    };
+    # apps installed via cfg.extraApps
+    nix-apps = {
+      enabled = cfg.extraApps != { };
+      linkTarget = pkgs.linkFarm "nix-apps"
+        (mapAttrsToList (name: path: { inherit name path; }) cfg.extraApps);
+      writable = false;
+    };
+    # apps installed via the app store.
+    store-apps = {
+      enabled = cfg.appstoreEnable == null || cfg.appstoreEnable;
+      linkTarget = "${cfg.home}/store-apps";
+      writable = true;
+    };
+  };
+
+  webroot = pkgs.runCommand
+    "${cfg.package.name or "nextcloud"}-with-apps"
+    { }
+    ''
+      mkdir $out
+      ln -sfv "${cfg.package}"/* "$out"
+      ${concatStrings
+        (mapAttrsToList (name: store: optionalString (store.enabled && store?linkTarget) ''
+          if [ -e "$out"/${name} ]; then
+            echo "Didn't expect ${name} already in $out!"
+            exit 1
+          fi
+          ln -sfTv ${store.linkTarget} "$out"/${name}
+        '') appStores)}
+    '';
+
   inherit (cfg) datadir;
 
   phpPackage = cfg.phpPackage.buildEnv {
     extensions = { enabled, all }:
       (with all; enabled
+        ++ [ bz2 intl sodium ] # recommended
         ++ optional cfg.enableImagemagick imagick
         # Optionally enabled depending on caching settings
         ++ optional cfg.caching.apcu apcu
@@ -44,7 +83,7 @@ let
 
   occ = pkgs.writeScriptBin "nextcloud-occ" ''
     #! ${pkgs.runtimeShell}
-    cd ${cfg.package}
+    cd ${webroot}
     sudo=exec
     if [[ "$USER" != nextcloud ]]; then
       sudo='exec /run/wrappers/bin/sudo -u nextcloud --preserve-env=NEXTCLOUD_CONFIG_DIR --preserve-env=OC_PASS'
@@ -61,7 +100,9 @@ let
   pgsqlLocal = cfg.database.createLocally && cfg.config.dbtype == "pgsql";
 
   # https://github.com/nextcloud/documentation/pull/11179
-  ocmProviderIsNotAStaticDirAnymore = versionAtLeast cfg.package.version "27.1.2";
+  ocmProviderIsNotAStaticDirAnymore = versionAtLeast cfg.package.version "27.1.2"
+    || (versionOlder cfg.package.version "27.0.0"
+      && versionAtLeast cfg.package.version "26.0.8");
 
 in {
 
@@ -91,6 +132,25 @@ in {
     (mkRemovedOptionModule [ "services" "nextcloud" "disableImagemagick" ] ''
       Use services.nextcloud.enableImagemagick instead.
     '')
+    (mkRemovedOptionModule [ "services" "nextcloud" "config" "dbport" ] ''
+      Add port to services.nextcloud.config.dbhost instead.
+    '')
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "logLevel" ] [ "services" "nextcloud" "extraOptions" "loglevel" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "logType" ] [ "services" "nextcloud" "extraOptions" "log_type" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "config" "defaultPhoneRegion" ] [ "services" "nextcloud" "extraOptions" "default_phone_region" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "config" "overwriteProtocol" ] [ "services" "nextcloud" "extraOptions" "overwriteprotocol" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "skeletonDirectory" ] [ "services" "nextcloud" "extraOptions" "skeletondirectory" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "globalProfiles" ] [ "services" "nextcloud" "extraOptions" "profile.enabled" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "config" "extraTrustedDomains" ] [ "services" "nextcloud" "extraOptions" "trusted_domains" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "config" "trustedProxies" ] [ "services" "nextcloud" "extraOptions" "trusted_proxies" ])
   ];
 
   options.services.nextcloud = {
@@ -154,32 +214,6 @@ in {
         Set this to false to disable the installation of apps from the global appstore. App management is always enabled regardless of this setting.
       '';
     };
-    logLevel = mkOption {
-      type = types.ints.between 0 4;
-      default = 2;
-      description = lib.mdDoc ''
-        Log level value between 0 (DEBUG) and 4 (FATAL).
-
-        - 0 (debug): Log all activity.
-
-        - 1 (info): Log activity such as user logins and file activities, plus warnings, errors, and fatal errors.
-
-        - 2 (warn): Log successful operations, as well as warnings of potential problems, errors and fatal errors.
-
-        - 3 (error): Log failed operations and fatal errors.
-
-        - 4 (fatal): Log only fatal errors that cause the server to stop.
-      '';
-    };
-    logType = mkOption {
-      type = types.enum [ "errorlog" "file" "syslog" "systemd" ];
-      default = "syslog";
-      description = lib.mdDoc ''
-        Logging backend to use.
-        systemd requires the php-systemd package to be added to services.nextcloud.phpExtraExtensions.
-        See the [nextcloud documentation](https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/logging_configuration.html) for details.
-      '';
-    };
     https = mkOption {
       type = types.bool;
       default = false;
@@ -188,7 +222,7 @@ in {
     package = mkOption {
       type = types.package;
       description = lib.mdDoc "Which package to use for the Nextcloud instance.";
-      relatedPackages = [ "nextcloud26" "nextcloud27" ];
+      relatedPackages = [ "nextcloud26" "nextcloud27" "nextcloud28" ];
     };
     phpPackage = mkPackageOption pkgs "php" {
       example = "php82";
@@ -203,16 +237,6 @@ in {
       '';
     };
 
-    skeletonDirectory = mkOption {
-      default = "";
-      type = types.str;
-      description = lib.mdDoc ''
-        The directory where the skeleton files are located. These files will be
-        copied to the data directory of new users. Leave empty to not copy any
-        skeleton files.
-      '';
-    };
-
     webfinger = mkOption {
       type = types.bool;
       default = false;
@@ -238,7 +262,7 @@ in {
     };
 
     phpOptions = mkOption {
-      type = types.attrsOf types.str;
+      type = with types; attrsOf (oneOf [ str int ]);
       defaultText = literalExpression (generators.toPretty { } defaultPHPSettings);
       description = lib.mdDoc ''
         Options for PHP's php.ini file for nextcloud.
@@ -312,7 +336,6 @@ in {
 
     };
 
-
     config = {
       dbtype = mkOption {
         type = types.enum [ "sqlite" "pgsql" "mysql" ];
@@ -343,18 +366,14 @@ in {
           else if mysqlLocal then "localhost:/run/mysqld/mysqld.sock"
           else "localhost";
         defaultText = "localhost";
+        example = "localhost:5000";
         description = lib.mdDoc ''
-          Database host or socket path.
+          Database host (+port) or socket path.
           If [](#opt-services.nextcloud.database.createLocally) is true and
           [](#opt-services.nextcloud.config.dbtype) is either `pgsql` or `mysql`,
           defaults to the correct Unix socket instead.
         '';
       };
-      dbport = mkOption {
-        type = with types; nullOr (either int str);
-        default = null;
-        description = lib.mdDoc "Database port.";
-      };
       dbtableprefix = mkOption {
         type = types.nullOr types.str;
         default = null;
@@ -377,53 +396,6 @@ in {
           setup of Nextcloud by the systemd service `nextcloud-setup.service`.
         '';
       };
-
-      extraTrustedDomains = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = lib.mdDoc ''
-          Trusted domains from which the Nextcloud installation will be
-          accessible.  You don't need to add
-          `services.nextcloud.hostname` here.
-        '';
-      };
-
-      trustedProxies = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = lib.mdDoc ''
-          Trusted proxies to provide if the Nextcloud installation is being
-          proxied to secure against, e.g. spoofing.
-        '';
-      };
-
-      overwriteProtocol = mkOption {
-        type = types.nullOr (types.enum [ "http" "https" ]);
-        default = null;
-        example = "https";
-
-        description = lib.mdDoc ''
-          Force Nextcloud to always use HTTP or HTTPS i.e. for link generation.
-          Nextcloud uses the currently used protocol by default, but when
-          behind a reverse-proxy, it may use `http` for everything although
-          Nextcloud may be served via HTTPS.
-        '';
-      };
-
-      defaultPhoneRegion = mkOption {
-        default = null;
-        type = types.nullOr types.str;
-        example = "DE";
-        description = lib.mdDoc ''
-          An [ISO 3166-1](https://www.iso.org/iso-3166-country-codes.html)
-          country code which replaces automatic phone-number detection
-          without a country code.
-
-          As an example, with `DE` set as the default phone region,
-          the `+49` prefix can be omitted for phone numbers.
-        '';
-      };
-
       objectstore = {
         s3 = {
           enable = mkEnableOption (lib.mdDoc ''
@@ -606,30 +578,109 @@ in {
         The nextcloud-occ program preconfigured to target this Nextcloud instance.
       '';
     };
-    globalProfiles = mkEnableOption (lib.mdDoc "global profiles") // {
-      description = lib.mdDoc ''
-        Makes user-profiles globally available under `nextcloud.tld/u/user.name`.
-        Even though it's enabled by default in Nextcloud, it must be explicitly enabled
-        here because it has the side-effect that personal information is even accessible to
-        unauthenticated users by default.
-
-        By default, the following properties are set to “Show to everyone”
-        if this flag is enabled:
-        - About
-        - Full name
-        - Headline
-        - Organisation
-        - Profile picture
-        - Role
-        - Twitter
-        - Website
-
-        Only has an effect in Nextcloud 23 and later.
-      '';
-    };
 
     extraOptions = mkOption {
-      type = jsonFormat.type;
+      type = types.submodule {
+        freeformType = jsonFormat.type;
+        options = {
+
+          loglevel = mkOption {
+            type = types.ints.between 0 4;
+            default = 2;
+            description = lib.mdDoc ''
+              Log level value between 0 (DEBUG) and 4 (FATAL).
+
+              - 0 (debug): Log all activity.
+
+              - 1 (info): Log activity such as user logins and file activities, plus warnings, errors, and fatal errors.
+
+              - 2 (warn): Log successful operations, as well as warnings of potential problems, errors and fatal errors.
+
+              - 3 (error): Log failed operations and fatal errors.
+
+              - 4 (fatal): Log only fatal errors that cause the server to stop.
+            '';
+          };
+          log_type = mkOption {
+            type = types.enum [ "errorlog" "file" "syslog" "systemd" ];
+            default = "syslog";
+            description = lib.mdDoc ''
+              Logging backend to use.
+              systemd requires the php-systemd package to be added to services.nextcloud.phpExtraExtensions.
+              See the [nextcloud documentation](https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/logging_configuration.html) for details.
+            '';
+          };
+          skeletondirectory = mkOption {
+            default = "";
+            type = types.str;
+            description = lib.mdDoc ''
+              The directory where the skeleton files are located. These files will be
+              copied to the data directory of new users. Leave empty to not copy any
+              skeleton files.
+            '';
+          };
+          trusted_domains = mkOption {
+            type = types.listOf types.str;
+            default = [];
+            description = lib.mdDoc ''
+              Trusted domains, from which the nextcloud installation will be
+              accessible. You don't need to add
+              `services.nextcloud.hostname` here.
+            '';
+          };
+          trusted_proxies = mkOption {
+            type = types.listOf types.str;
+            default = [];
+            description = lib.mdDoc ''
+              Trusted proxies, to provide if the nextcloud installation is being
+              proxied to secure against e.g. spoofing.
+            '';
+          };
+          overwriteprotocol = mkOption {
+            type = types.enum [ "" "http" "https" ];
+            default = "";
+            example = "https";
+            description = lib.mdDoc ''
+              Force Nextcloud to always use HTTP or HTTPS i.e. for link generation.
+              Nextcloud uses the currently used protocol by default, but when
+              behind a reverse-proxy, it may use `http` for everything although
+              Nextcloud may be served via HTTPS.
+            '';
+          };
+          default_phone_region = mkOption {
+            default = "";
+            type = types.str;
+            example = "DE";
+            description = lib.mdDoc ''
+              An [ISO 3166-1](https://www.iso.org/iso-3166-country-codes.html)
+              country code which replaces automatic phone-number detection
+              without a country code.
+
+              As an example, with `DE` set as the default phone region,
+              the `+49` prefix can be omitted for phone numbers.
+            '';
+          };
+          "profile.enabled" = mkEnableOption (lib.mdDoc "global profiles") // {
+            description = lib.mdDoc ''
+              Makes user-profiles globally available under `nextcloud.tld/u/user.name`.
+              Even though it's enabled by default in Nextcloud, it must be explicitly enabled
+              here because it has the side-effect that personal information is even accessible to
+              unauthenticated users by default.
+              By default, the following properties are set to “Show to everyone”
+              if this flag is enabled:
+              - About
+              - Full name
+              - Headline
+              - Organisation
+              - Profile picture
+              - Role
+              - Twitter
+              - Website
+              Only has an effect in Nextcloud 23 and later.
+            '';
+          };
+        };
+      };
       default = {};
       description = lib.mdDoc ''
         Extra options which should be appended to Nextcloud's config.php file.
@@ -677,7 +728,7 @@ in {
 
   config = mkIf cfg.enable (mkMerge [
     { warnings = let
-        latest = 27;
+        latest = 28;
         upgradeWarning = major: nixos:
           ''
             A legacy Nextcloud install (from before NixOS ${nixos}) may be installed.
@@ -698,7 +749,8 @@ in {
         '')
         ++ (optional (versionOlder cfg.package.version "25") (upgradeWarning 24 "22.11"))
         ++ (optional (versionOlder cfg.package.version "26") (upgradeWarning 25 "23.05"))
-        ++ (optional (versionOlder cfg.package.version "27") (upgradeWarning 26 "23.11"));
+        ++ (optional (versionOlder cfg.package.version "27") (upgradeWarning 26 "23.11"))
+        ++ (optional (versionOlder cfg.package.version "28") (upgradeWarning 27 "24.05"));
 
       services.nextcloud.package = with pkgs;
         mkDefault (
@@ -708,15 +760,13 @@ in {
               nextcloud defined in an overlay, please set `services.nextcloud.package` to
               `pkgs.nextcloud`.
             ''
-          else if versionOlder stateVersion "22.11" then nextcloud24
           else if versionOlder stateVersion "23.05" then nextcloud25
           else if versionOlder stateVersion "23.11" then nextcloud26
-          else nextcloud27
+          else if versionOlder stateVersion "24.05" then nextcloud27
+          else nextcloud28
         );
 
-      services.nextcloud.phpPackage =
-        if versionOlder cfg.package.version "26" then pkgs.php81
-        else pkgs.php82;
+      services.nextcloud.phpPackage = pkgs.php82;
 
       services.nextcloud.phpOptions = mkMerge [
         (mapAttrs (const mkOptionDefault) defaultPHPSettings)
@@ -764,11 +814,10 @@ in {
         # When upgrading the Nextcloud package, Nextcloud can report errors such as
         # "The files of the app [all apps in /var/lib/nextcloud/apps] were not replaced correctly"
         # Restarting phpfpm on Nextcloud package update fixes these issues (but this is a workaround).
-        phpfpm-nextcloud.restartTriggers = [ cfg.package ];
+        phpfpm-nextcloud.restartTriggers = [ webroot ];
 
         nextcloud-setup = let
           c = cfg.config;
-          writePhpArray = a: "[${concatMapStringsSep "," (val: ''"${toString val}"'') a}]";
           requiresReadSecretFunction = c.dbpassFile != null || c.objectstore.s3.enable;
           objectstoreConfig = let s3 = c.objectstore.s3; in optionalString s3.enable ''
             'objectstore' => [
@@ -798,6 +847,10 @@ in {
 
           nextcloudGreaterOrEqualThan = req: versionAtLeast cfg.package.version req;
 
+          mkAppStoreConfig = name: { enabled, writable, ... }: optionalString enabled ''
+            [ 'path' => '${webroot}/${name}', 'url' => '/${name}', 'writable' => ${boolToString writable} ],
+          '';
+
           overrideConfig = pkgs.writeText "nextcloud-config.php" ''
             <?php
             ${optionalString requiresReadSecretFunction ''
@@ -826,20 +879,12 @@ in {
             }
             $CONFIG = [
               'apps_paths' => [
-                ${optionalString (cfg.extraApps != { }) "[ 'path' => '${cfg.home}/nix-apps', 'url' => '/nix-apps', 'writable' => false ],"}
-                [ 'path' => '${cfg.home}/apps', 'url' => '/apps', 'writable' => false ],
-                [ 'path' => '${cfg.home}/store-apps', 'url' => '/store-apps', 'writable' => true ],
+                ${concatStrings (mapAttrsToList mkAppStoreConfig appStores)}
               ],
               ${optionalString (showAppStoreSetting) "'appstoreenabled' => ${renderedAppStoreSetting},"}
-              'datadirectory' => '${datadir}/data',
-              'skeletondirectory' => '${cfg.skeletonDirectory}',
               ${optionalString cfg.caching.apcu "'memcache.local' => '\\OC\\Memcache\\APCu',"}
-              'log_type' => '${cfg.logType}',
-              'loglevel' => '${builtins.toString cfg.logLevel}',
-              ${optionalString (c.overwriteProtocol != null) "'overwriteprotocol' => '${c.overwriteProtocol}',"}
               ${optionalString (c.dbname != null) "'dbname' => '${c.dbname}',"}
               ${optionalString (c.dbhost != null) "'dbhost' => '${c.dbhost}',"}
-              ${optionalString (c.dbport != null) "'dbport' => '${toString c.dbport}',"}
               ${optionalString (c.dbuser != null) "'dbuser' => '${c.dbuser}',"}
               ${optionalString (c.dbtableprefix != null) "'dbtableprefix' => '${toString c.dbtableprefix}',"}
               ${optionalString (c.dbpassFile != null) ''
@@ -849,10 +894,6 @@ in {
                 ''
               }
               'dbtype' => '${c.dbtype}',
-              'trusted_domains' => ${writePhpArray ([ cfg.hostName ] ++ c.extraTrustedDomains)},
-              'trusted_proxies' => ${writePhpArray (c.trustedProxies)},
-              ${optionalString (c.defaultPhoneRegion != null) "'default_phone_region' => '${c.defaultPhoneRegion}',"}
-              ${optionalString (nextcloudGreaterOrEqualThan "23") "'profile.enabled' => ${boolToString cfg.globalProfiles},"}
               ${objectstoreConfig}
             ];
 
@@ -888,7 +929,6 @@ in {
               # will be omitted.
               ${if c.dbname != null then "--database-name" else null} = ''"${c.dbname}"'';
               ${if c.dbhost != null then "--database-host" else null} = ''"${c.dbhost}"'';
-              ${if c.dbport != null then "--database-port" else null} = ''"${toString c.dbport}"'';
               ${if c.dbuser != null then "--database-user" else null} = ''"${c.dbuser}"'';
               "--database-pass" = "\"\$${dbpass.arg}\"";
               "--admin-user" = ''"${c.adminuser}"'';
@@ -905,7 +945,7 @@ in {
             (i: v: ''
               ${occ}/bin/nextcloud-occ config:system:set trusted_domains \
                 ${toString i} --value="${toString v}"
-            '') ([ cfg.hostName ] ++ cfg.config.extraTrustedDomains));
+            '') ([ cfg.hostName ] ++ cfg.extraOptions.trusted_domains));
 
         in {
           wantedBy = [ "multi-user.target" ];
@@ -933,17 +973,16 @@ in {
               exit 1
             fi
 
-            ln -sf ${cfg.package}/apps ${cfg.home}/
-
-            # Install extra apps
-            ln -sfT \
-              ${pkgs.linkFarm "nix-apps"
-                (mapAttrsToList (name: path: { inherit name path; }) cfg.extraApps)} \
-              ${cfg.home}/nix-apps
+            ${concatMapStrings (name: ''
+              if [ -d "${cfg.home}"/${name} ]; then
+                echo "Cleaning up ${name}; these are now bundled in the webroot store-path!"
+                rm -r "${cfg.home}"/${name}
+              fi
+            '') [ "nix-apps" "apps" ]}
 
             # create nextcloud directories.
             # if the directories exist already with wrong permissions, we fix that
-            for dir in ${datadir}/config ${datadir}/data ${cfg.home}/store-apps ${cfg.home}/nix-apps; do
+            for dir in ${datadir}/config ${datadir}/data ${cfg.home}/store-apps; do
               if [ ! -e $dir ]; then
                 install -o nextcloud -g nextcloud -d $dir
               elif [ $(stat -c "%G" $dir) != "nextcloud" ]; then
@@ -980,7 +1019,7 @@ in {
           environment.NEXTCLOUD_CONFIG_DIR = "${datadir}/config";
           serviceConfig.Type = "oneshot";
           serviceConfig.User = "nextcloud";
-          serviceConfig.ExecStart = "${phpPackage}/bin/php -f ${cfg.package}/cron.php";
+          serviceConfig.ExecStart = "${phpPackage}/bin/php -f ${webroot}/cron.php";
         };
         nextcloud-update-plugins = mkIf cfg.autoUpdateApps.enable {
           after = [ "nextcloud-setup.service" ];
@@ -1041,22 +1080,25 @@ in {
         user = "nextcloud";
       };
 
-      services.nextcloud = lib.mkIf cfg.configureRedis {
-        caching.redis = true;
-        extraOptions = {
+      services.nextcloud = {
+        caching.redis = lib.mkIf cfg.configureRedis true;
+        extraOptions = mkMerge [({
+          datadirectory = lib.mkDefault "${datadir}/data";
+          trusted_domains = [ cfg.hostName ];
+        }) (lib.mkIf cfg.configureRedis {
           "memcache.distributed" = ''\OC\Memcache\Redis'';
           "memcache.locking" = ''\OC\Memcache\Redis'';
           redis = {
             host = config.services.redis.servers.nextcloud.unixSocket;
             port = 0;
           };
-        };
+        })];
       };
 
       services.nginx.enable = mkDefault true;
 
       services.nginx.virtualHosts.${cfg.hostName} = {
-        root = cfg.package;
+        root = webroot;
         locations = {
           "= /robots.txt" = {
             priority = 100;
@@ -1073,14 +1115,6 @@ in {
               }
             '';
           };
-          "~ ^/store-apps" = {
-            priority = 201;
-            extraConfig = "root ${cfg.home};";
-          };
-          "~ ^/nix-apps" = {
-            priority = 201;
-            extraConfig = "root ${cfg.home};";
-          };
           "^~ /.well-known" = {
             priority = 210;
             extraConfig = ''
@@ -1129,10 +1163,13 @@ in {
               fastcgi_read_timeout ${builtins.toString cfg.fastcgiTimeout}s;
             '';
           };
-          "~ \\.(?:css|js|mjs|svg|gif|png|jpg|jpeg|ico|wasm|tflite|map|html|ttf|bcmap|mp4|webm)$".extraConfig = ''
+          "~ \\.(?:css|js|mjs|svg|gif|png|jpg|jpeg|ico|wasm|tflite|map|html|ttf|bcmap|mp4|webm|ogg|flac)$".extraConfig = ''
             try_files $uri /index.php$request_uri;
             expires 6M;
             access_log off;
+            location ~ \.mjs$ {
+              default_type text/javascript;
+            }
             location ~ \.wasm$ {
               default_type application/wasm;
             }
diff --git a/nixos/modules/services/web-apps/node-red.nix b/nixos/modules/services/web-apps/node-red.nix
index de78f05a98ca2..82f89783d778a 100644
--- a/nixos/modules/services/web-apps/node-red.nix
+++ b/nixos/modules/services/web-apps/node-red.nix
@@ -19,7 +19,7 @@ in
   options.services.node-red = {
     enable = mkEnableOption (lib.mdDoc "the Node-RED service");
 
-    package = mkPackageOption pkgs "nodePackages.node-red" { };
+    package = mkPackageOption pkgs [ "nodePackages" "node-red" ] { };
 
     openFirewall = mkOption {
       type = types.bool;
diff --git a/nixos/modules/services/web-apps/windmill.nix b/nixos/modules/services/web-apps/windmill.nix
new file mode 100644
index 0000000000000..8e940dabdc1f8
--- /dev/null
+++ b/nixos/modules/services/web-apps/windmill.nix
@@ -0,0 +1,177 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.windmill;
+in
+{
+  options.services.windmill = {
+    enable = lib.mkEnableOption (lib.mdDoc "windmill service");
+
+    serverPort = lib.mkOption {
+      type = lib.types.port;
+      default = 8001;
+      description = lib.mdDoc "Port the windmill server listens on.";
+    };
+
+    lspPort = lib.mkOption {
+      type = lib.types.port;
+      default = 3001;
+      description = lib.mdDoc "Port the windmill lsp listens on.";
+    };
+
+    database = {
+      name = lib.mkOption {
+        type = lib.types.str;
+        # the simplest database setup is to have the database named like the user.
+        default = "windmill";
+        description = lib.mdDoc "Database name.";
+      };
+
+      user = lib.mkOption {
+        type = lib.types.str;
+        # the simplest database setup is to have the database user like the name.
+        default = "windmill";
+        description = lib.mdDoc "Database user.";
+      };
+
+      urlPath = lib.mkOption {
+        type = lib.types.path;
+        description = lib.mdDoc ''
+          Path to the file containing the database url windmill should connect to. This is not deducted from database user and name as it might contain a secret
+        '';
+        example = "config.age.secrets.DATABASE_URL_FILE.path";
+      };
+      createLocally = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to create a local database automatically.";
+      };
+    };
+
+    baseUrl = lib.mkOption {
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        The base url that windmill will be served on.
+      '';
+      example = "https://windmill.example.com";
+    };
+
+    logLevel = lib.mkOption {
+      type = lib.types.enum [ "error" "warn" "info" "debug" "trace" ];
+      default = "info";
+      description = lib.mdDoc "Log level";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    services.postgresql = lib.optionalAttrs (cfg.database.createLocally) {
+      enable = lib.mkDefault true;
+
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+
+   };
+
+   systemd.services =
+    let
+      serviceConfig = {
+        DynamicUser = true;
+        # using the same user to simplify db connection
+        User = cfg.database.user;
+        ExecStart = "${pkgs.windmill}/bin/windmill";
+
+        Restart = "always";
+        LoadCredential = [
+          "DATABASE_URL_FILE:${cfg.database.urlPath}"
+        ];
+      };
+    in
+    {
+
+    # coming from https://github.com/windmill-labs/windmill/blob/main/init-db-as-superuser.sql
+    # modified to not grant priviledges on all tables
+    # create role windmill_user and windmill_admin only if they don't exist
+    postgresql.postStart = lib.mkIf cfg.database.createLocally (lib.mkAfter ''
+      $PSQL -tA <<"EOF"
+DO $$
+BEGIN
+    IF NOT EXISTS (
+        SELECT FROM pg_catalog.pg_roles
+        WHERE rolname = 'windmill_user'
+    ) THEN
+        CREATE ROLE windmill_user;
+        GRANT ALL PRIVILEGES ON DATABASE ${cfg.database.name} TO windmill_user;
+    ELSE
+      RAISE NOTICE 'Role "windmill_user" already exists. Skipping.';
+    END IF;
+    IF NOT EXISTS (
+        SELECT FROM pg_catalog.pg_roles
+        WHERE rolname = 'windmill_admin'
+    ) THEN
+      CREATE ROLE windmill_admin WITH BYPASSRLS;
+      GRANT windmill_user TO windmill_admin;
+    ELSE
+      RAISE NOTICE 'Role "windmill_admin" already exists. Skipping.';
+    END IF;
+    GRANT windmill_admin TO windmill;
+END
+$$;
+EOF
+    '');
+
+     windmill-server = {
+        description = "Windmill server";
+        after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = serviceConfig // { StateDirectory = "windmill";};
+
+        environment = {
+          DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
+          PORT = builtins.toString cfg.serverPort;
+          WM_BASE_URL = cfg.baseUrl;
+          RUST_LOG = cfg.logLevel;
+          MODE = "server";
+        };
+      };
+
+     windmill-worker = {
+        description = "Windmill worker";
+        after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = serviceConfig // { StateDirectory = "windmill-worker";};
+
+        environment = {
+          DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
+          WM_BASE_URL = cfg.baseUrl;
+          RUST_LOG = cfg.logLevel;
+          MODE = "worker";
+          WORKER_GROUP = "default";
+          KEEP_JOB_DIR = "false";
+        };
+      };
+
+     windmill-worker-native = {
+        description = "Windmill worker native";
+        after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = serviceConfig // { StateDirectory = "windmill-worker-native";};
+
+        environment = {
+          DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
+          WM_BASE_URL = cfg.baseUrl;
+          RUST_LOG = cfg.logLevel;
+          MODE = "worker";
+          WORKER_GROUP = "native";
+        };
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/wordpress.nix b/nixos/modules/services/web-apps/wordpress.nix
index 03d5634854a37..002d6683b2ed5 100644
--- a/nixos/modules/services/web-apps/wordpress.nix
+++ b/nixos/modules/services/web-apps/wordpress.nix
@@ -34,7 +34,7 @@ let
       # copy additional plugin(s), theme(s) and language(s)
       ${concatStringsSep "\n" (mapAttrsToList (name: theme: "cp -r ${theme} $out/share/wordpress/wp-content/themes/${name}") cfg.themes)}
       ${concatStringsSep "\n" (mapAttrsToList (name: plugin: "cp -r ${plugin} $out/share/wordpress/wp-content/plugins/${name}") cfg.plugins)}
-      ${concatMapStringsSep "\n" (language: "cp -r ${language}/* $out/share/wordpress/wp-content/languages/") cfg.languages}
+      ${concatMapStringsSep "\n" (language: "cp -r ${language} $out/share/wordpress/wp-content/languages/") cfg.languages}
     '';
   };
 
diff --git a/nixos/modules/services/web-servers/caddy/default.nix b/nixos/modules/services/web-servers/caddy/default.nix
index 497aa9ba956e0..95dc219d108cc 100644
--- a/nixos/modules/services/web-servers/caddy/default.nix
+++ b/nixos/modules/services/web-servers/caddy/default.nix
@@ -147,7 +147,7 @@ in
       default = configFile;
       defaultText = "A Caddyfile automatically generated by values from services.caddy.*";
       example = literalExpression ''
-        pkgs.writeTextDir "Caddyfile" '''
+        pkgs.writeText "Caddyfile" '''
           example.com
 
           root * /var/www/wordpress
@@ -164,9 +164,9 @@ in
     };
 
     adapter = mkOption {
-      default = if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null;
+      default = if ((cfg.configFile != configFile) || (builtins.baseNameOf cfg.configFile) == "Caddyfile") then "caddyfile" else null;
       defaultText = literalExpression ''
-        if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null
+        if ((cfg.configFile != configFile) || (builtins.baseNameOf cfg.configFile) == "Caddyfile") then "caddyfile" else null
       '';
       example = literalExpression "nginx";
       type = with types; nullOr str;
@@ -342,8 +342,9 @@ in
       }
     '';
 
-    # https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size
+    # https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes
     boot.kernel.sysctl."net.core.rmem_max" = mkDefault 2500000;
+    boot.kernel.sysctl."net.core.wmem_max" = mkDefault 2500000;
 
     systemd.packages = [ cfg.package ];
     systemd.services.caddy = {
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index cf70dc3259456..1285c2bbb916d 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -352,10 +352,11 @@ let
 
         # The acme-challenge location doesn't need to be added if we are not using any automated
         # certificate provisioning and can also be omitted when we use a certificate obtained via a DNS-01 challenge
-        acmeLocation = optionalString (vhost.enableACME || (vhost.useACMEHost != null && config.security.acme.certs.${vhost.useACMEHost}.dnsProvider == null)) ''
+        acmeLocation = optionalString (vhost.enableACME || (vhost.useACMEHost != null && config.security.acme.certs.${vhost.useACMEHost}.dnsProvider == null))
           # Rule for legitimate ACME Challenge requests (like /.well-known/acme-challenge/xxxxxxxxx)
           # We use ^~ here, so that we don't check any regexes (which could
           # otherwise easily override this intended match accidentally).
+        ''
           location ^~ /.well-known/acme-challenge/ {
             ${optionalString (vhost.acmeFallbackHost != null) "try_files $uri @acme-fallback;"}
             ${optionalString (vhost.acmeRoot != null) "root ${vhost.acmeRoot};"}
@@ -375,10 +376,11 @@ let
             ${concatMapStringsSep "\n" listenString redirectListen}
 
             server_name ${vhost.serverName} ${concatStringsSep " " vhost.serverAliases};
-            ${acmeLocation}
+
             location / {
-              return 301 https://$host$request_uri;
+              return ${toString vhost.redirectCode} https://$host$request_uri;
             }
+            ${acmeLocation}
           }
         ''}
 
@@ -392,13 +394,6 @@ let
             http3 ${if vhost.http3 then "on" else "off"};
             http3_hq ${if vhost.http3_hq then "on" else "off"};
           ''}
-          ${acmeLocation}
-          ${optionalString (vhost.root != null) "root ${vhost.root};"}
-          ${optionalString (vhost.globalRedirect != null) ''
-            location / {
-              return 301 http${optionalString hasSSL "s"}://${vhost.globalRedirect}$request_uri;
-            }
-          ''}
           ${optionalString hasSSL ''
             ssl_certificate ${vhost.sslCertificate};
             ssl_certificate_key ${vhost.sslCertificateKey};
@@ -421,6 +416,14 @@ let
 
           ${mkBasicAuth vhostName vhost}
 
+          ${optionalString (vhost.root != null) "root ${vhost.root};"}
+
+          ${optionalString (vhost.globalRedirect != null) ''
+            location / {
+              return ${toString vhost.redirectCode} http${optionalString hasSSL "s"}://${vhost.globalRedirect}$request_uri;
+            }
+          ''}
+          ${acmeLocation}
           ${mkLocations vhost.locations}
 
           ${vhost.extraConfig}
@@ -449,7 +452,7 @@ let
       ${optionalString (config.tryFiles != null) "try_files ${config.tryFiles};"}
       ${optionalString (config.root != null) "root ${config.root};"}
       ${optionalString (config.alias != null) "alias ${config.alias};"}
-      ${optionalString (config.return != null) "return ${config.return};"}
+      ${optionalString (config.return != null) "return ${toString config.return};"}
       ${config.extraConfig}
       ${optionalString (config.proxyPass != null && config.recommendedProxySettings) "include ${recommendedProxyConfig};"}
       ${mkBasicAuth "sublocation" config}
@@ -649,6 +652,8 @@ in
           Nginx package to use. This defaults to the stable version. Note
           that the nginx team recommends to use the mainline version which
           available in nixpkgs as `nginxMainline`.
+          Supported Nginx forks include `angie`, `openresty` and `tengine`.
+          For HTTP/3 support use `nginxQuic` or `angieQuic`.
         '';
       };
 
@@ -1128,14 +1133,6 @@ in
       }
 
       {
-        assertion = any (host: host.kTLS) (attrValues virtualHosts) -> versionAtLeast cfg.package.version "1.21.4";
-        message = ''
-          services.nginx.virtualHosts.<name>.kTLS requires nginx version
-          1.21.4 or above; see the documentation for services.nginx.package.
-        '';
-      }
-
-      {
         assertion = all (host: !(host.enableACME && host.useACMEHost != null)) (attrValues virtualHosts);
         message = ''
           Options services.nginx.service.virtualHosts.<name>.enableACME and
@@ -1144,18 +1141,20 @@ in
       }
 
       {
-        assertion = cfg.package.pname != "nginxQuic" -> !(cfg.enableQuicBPF);
+        assertion = cfg.package.pname != "nginxQuic" && cfg.package.pname != "angieQuic" -> !(cfg.enableQuicBPF);
         message = ''
           services.nginx.enableQuicBPF requires using nginxQuic package,
-          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`.
+          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;` or
+          `services.nginx.package = pkgs.angieQuic;`.
         '';
       }
 
       {
-        assertion = cfg.package.pname != "nginxQuic" -> all (host: !host.quic) (attrValues virtualHosts);
+        assertion = cfg.package.pname != "nginxQuic" && cfg.package.pname != "angieQuic" -> all (host: !host.quic) (attrValues virtualHosts);
         message = ''
-          services.nginx.service.virtualHosts.<name>.quic requires using nginxQuic package,
-          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`.
+          services.nginx.service.virtualHosts.<name>.quic requires using nginxQuic or angie packages,
+          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;` or
+          `services.nginx.package = pkgs.angieQuic;`.
         '';
       }
 
@@ -1341,6 +1340,8 @@ in
       nginx.gid = config.ids.gids.nginx;
     };
 
+    boot.kernelModules = optional (versionAtLeast config.boot.kernelPackages.kernel.version "4.17") "tls";
+
     # do not delete the default temp directories created upon nginx startup
     systemd.tmpfiles.rules = [
       "X /tmp/systemd-private-%b-nginx.service-*/tmp/nginx_*"
diff --git a/nixos/modules/services/web-servers/nginx/location-options.nix b/nixos/modules/services/web-servers/nginx/location-options.nix
index 2728852058ea7..2138e551fd434 100644
--- a/nixos/modules/services/web-servers/nginx/location-options.nix
+++ b/nixos/modules/services/web-servers/nginx/location-options.nix
@@ -93,7 +93,7 @@ with lib;
     };
 
     return = mkOption {
-      type = types.nullOr types.str;
+      type = with types; nullOr (oneOf [ str int ]);
       default = null;
       example = "301 http://example.com$request_uri";
       description = lib.mdDoc ''
diff --git a/nixos/modules/services/web-servers/nginx/tailscale-auth.nix b/nixos/modules/services/web-servers/nginx/tailscale-auth.nix
new file mode 100644
index 0000000000000..a2e4d4a30be5c
--- /dev/null
+++ b/nixos/modules/services/web-servers/nginx/tailscale-auth.nix
@@ -0,0 +1,158 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nginx.tailscaleAuth;
+in
+{
+  options.services.nginx.tailscaleAuth = {
+    enable = mkEnableOption (lib.mdDoc "Enable tailscale.nginx-auth, to authenticate nginx users via tailscale.");
+
+    package = lib.mkPackageOptionMD pkgs "tailscale-nginx-auth" {};
+
+    user = mkOption {
+      type = types.str;
+      default = "tailscale-nginx-auth";
+      description = lib.mdDoc "User which runs tailscale-nginx-auth";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "tailscale-nginx-auth";
+      description = lib.mdDoc "Group which runs tailscale-nginx-auth";
+    };
+
+    expectedTailnet = mkOption {
+      default = "";
+      type = types.nullOr types.str;
+      example = "tailnet012345.ts.net";
+      description = lib.mdDoc ''
+        If you want to prevent node sharing from allowing users to access services
+        across tailnets, declare your expected tailnets domain here.
+      '';
+    };
+
+    socketPath = mkOption {
+      default = "/run/tailscale-nginx-auth/tailscale-nginx-auth.sock";
+      type = types.path;
+      description = lib.mdDoc ''
+        Path of the socket listening to nginx authorization requests.
+      '';
+    };
+
+    virtualHosts = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        A list of nginx virtual hosts to put behind tailscale.nginx-auth
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.tailscale.enable = true;
+    services.nginx.enable = true;
+
+    users.users.${cfg.user} = {
+      isSystemUser = true;
+      inherit (cfg) group;
+    };
+    users.groups.${cfg.group} = { };
+    users.users.${config.services.nginx.user}.extraGroups = [ cfg.group ];
+    systemd.sockets.tailscale-nginx-auth = {
+      description = "Tailscale NGINX Authentication socket";
+      partOf = [ "tailscale-nginx-auth.service" ];
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ cfg.socketPath ];
+      socketConfig = {
+        SocketMode = "0660";
+        SocketUser = cfg.user;
+        SocketGroup = cfg.group;
+      };
+    };
+
+
+    systemd.services.tailscale-nginx-auth = {
+      description = "Tailscale NGINX Authentication service";
+      after = [ "nginx.service" ];
+      wants = [ "nginx.service" ];
+      requires = [ "tailscale-nginx-auth.socket" ];
+
+      serviceConfig = {
+        ExecStart = "${lib.getExe cfg.package}";
+        RuntimeDirectory = "tailscale-nginx-auth";
+        User = cfg.user;
+        Group = cfg.group;
+
+        BindPaths = [ "/run/tailscale/tailscaled.sock" ];
+
+        CapabilityBoundingSet = "";
+        DeviceAllow = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictNamespaces = true;
+        RestrictAddressFamilies = [ "AF_UNIX" ];
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [
+          "@system-service"
+          "~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid"
+        ];
+      };
+    };
+
+    services.nginx.virtualHosts = genAttrs
+      cfg.virtualHosts
+      (vhost: {
+        locations."/auth" = {
+          extraConfig = ''
+            internal;
+
+            proxy_pass http://unix:${cfg.socketPath};
+            proxy_pass_request_body off;
+
+            # Upstream uses $http_host here, but we are using gixy to check nginx configurations
+            # gixy wants us to use $host: https://github.com/yandex/gixy/blob/master/docs/en/plugins/hostspoofing.md
+            proxy_set_header Host $host;
+            proxy_set_header Remote-Addr $remote_addr;
+            proxy_set_header Remote-Port $remote_port;
+            proxy_set_header Original-URI $request_uri;
+            proxy_set_header X-Scheme                $scheme;
+            proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
+          '';
+        };
+        locations."/".extraConfig = ''
+          auth_request /auth;
+          auth_request_set $auth_user $upstream_http_tailscale_user;
+          auth_request_set $auth_name $upstream_http_tailscale_name;
+          auth_request_set $auth_login $upstream_http_tailscale_login;
+          auth_request_set $auth_tailnet $upstream_http_tailscale_tailnet;
+          auth_request_set $auth_profile_picture $upstream_http_tailscale_profile_picture;
+
+          proxy_set_header X-Webauth-User "$auth_user";
+          proxy_set_header X-Webauth-Name "$auth_name";
+          proxy_set_header X-Webauth-Login "$auth_login";
+          proxy_set_header X-Webauth-Tailnet "$auth_tailnet";
+          proxy_set_header X-Webauth-Profile-Picture "$auth_profile_picture";
+
+          ${lib.optionalString (cfg.expectedTailnet != "") ''proxy_set_header Expected-Tailnet "${cfg.expectedTailnet}";''}
+        '';
+      });
+  };
+
+  meta.maintainers = with maintainers; [ phaer ];
+
+}
diff --git a/nixos/modules/services/web-servers/nginx/vhost-options.nix b/nixos/modules/services/web-servers/nginx/vhost-options.nix
index 9db4c8e23025b..64a95afab9f40 100644
--- a/nixos/modules/services/web-servers/nginx/vhost-options.nix
+++ b/nixos/modules/services/web-servers/nginx/vhost-options.nix
@@ -162,10 +162,11 @@ with lib;
       type = types.bool;
       default = false;
       description = lib.mdDoc ''
-        Whether to add a separate nginx server block that permanently redirects (301)
-        all plain HTTP traffic to HTTPS. This will set defaults for
-        `listen` to listen on all interfaces on the respective default
-        ports (80, 443), where the non-SSL listens are used for the redirect vhosts.
+        Whether to add a separate nginx server block that redirects (defaults
+        to 301, configurable with `redirectCode`) all plain HTTP traffic to
+        HTTPS. This will set defaults for `listen` to listen on all interfaces
+        on the respective default ports (80, 443), where the non-SSL listens
+        are used for the redirect vhosts.
       '';
     };
 
@@ -307,8 +308,20 @@ with lib;
       default = null;
       example = "newserver.example.org";
       description = lib.mdDoc ''
-        If set, all requests for this host are redirected permanently to
-        the given hostname.
+        If set, all requests for this host are redirected (defaults to 301,
+        configurable with `redirectCode`) to the given hostname.
+      '';
+    };
+
+    redirectCode = mkOption {
+      type = types.ints.between 300 399;
+      default = 301;
+      example = 308;
+      description = lib.mdDoc ''
+        HTTP status used by `globalRedirect` and `forceSSL`. Possible usecases
+        include temporary (302, 307) redirects, keeping the request method and
+        body (307, 308), or explicitly resetting the method to GET (303).
+        See <https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections>.
       '';
     };
 
diff --git a/nixos/modules/services/x11/desktop-managers/cinnamon.nix b/nixos/modules/services/x11/desktop-managers/cinnamon.nix
index e9cadf2194682..f5a6c05865c47 100644
--- a/nixos/modules/services/x11/desktop-managers/cinnamon.nix
+++ b/nixos/modules/services/x11/desktop-managers/cinnamon.nix
@@ -79,20 +79,19 @@ in
           package = mkDefault pkgs.cinnamon.mint-cursor-themes;
         };
       };
-      services.xserver.displayManager.sessionCommands = ''
-        if test "$XDG_CURRENT_DESKTOP" = "Cinnamon"; then
-            true
-            ${concatMapStrings (p: ''
-              if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
-                export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
-              fi
-
-              if [ -d "${p}/lib/girepository-1.0" ]; then
-                export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
-                export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
-              fi
-            '') cfg.sessionPath}
-        fi
+
+      # Have to take care of GDM + Cinnamon on Wayland users
+      environment.extraInit = ''
+        ${concatMapStrings (p: ''
+          if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
+            export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
+          fi
+
+          if [ -d "${p}/lib/girepository-1.0" ]; then
+            export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
+            export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
+          fi
+        '') cfg.sessionPath}
       '';
 
       # Default services
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index 027479b1ce094..e0227f93e2f25 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -295,7 +295,7 @@ in
         ++ lib.optional config.powerManagement.enable powerdevil
         ++ lib.optional config.services.colord.enable pkgs.colord-kde
         ++ lib.optional config.services.hardware.bolt.enable pkgs.plasma5Packages.plasma-thunderbolt
-        ++ lib.optionals config.services.samba.enable [ kdenetwork-filesharing pkgs.samba ]
+        ++ lib.optional config.services.samba.enable kdenetwork-filesharing
         ++ lib.optional config.services.xserver.wacom.enable pkgs.wacomtablet
         ++ lib.optional config.services.flatpak.enable flatpak-kcm;
 
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index 6ca7a4425f892..0576619cc8d28 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -7,7 +7,7 @@ let
   cfg = dmcfg.sddm;
   xEnv = config.systemd.services.display-manager.environment;
 
-  sddm = pkgs.libsForQt5.sddm;
+  sddm = cfg.package;
 
   iniFmt = pkgs.formats.ini { };
 
@@ -108,6 +108,8 @@ in
         '';
       };
 
+      package = mkPackageOption pkgs [ "plasma5Packages" "sddm" ] {};
+
       enableHidpi = mkOption {
         type = types.bool;
         default = true;
diff --git a/nixos/modules/services/x11/hardware/libinput.nix b/nixos/modules/services/x11/hardware/libinput.nix
index d2a5b5895e0aa..0ea21eb1dce3a 100644
--- a/nixos/modules/services/x11/hardware/libinput.nix
+++ b/nixos/modules/services/x11/hardware/libinput.nix
@@ -130,9 +130,9 @@ let cfg = config.services.xserver.libinput;
         default = true;
         description =
           lib.mdDoc ''
-            Disables horizontal scrolling. When disabled, this driver will discard any horizontal scroll
-            events from libinput. Note that this does not disable horizontal scrolling, it merely
-            discards the horizontal axis from any scroll events.
+            Enables or disables horizontal scrolling. When disabled, this driver will discard any
+            horizontal scroll events from libinput. This does not disable horizontal scroll events
+            from libinput; it merely discards the horizontal axis from any scroll events.
           '';
       };
 
diff --git a/nixos/modules/system/activation/bootspec.nix b/nixos/modules/system/activation/bootspec.nix
index 98c234bc340d0..2ed6964b2a6a6 100644
--- a/nixos/modules/system/activation/bootspec.nix
+++ b/nixos/modules/system/activation/bootspec.nix
@@ -11,6 +11,7 @@
 let
   cfg = config.boot.bootspec;
   children = lib.mapAttrs (childName: childConfig: childConfig.configuration.system.build.toplevel) config.specialisation;
+  hasAtLeastOneInitrdSecret = lib.length (lib.attrNames config.boot.initrd.secrets) > 0;
   schemas = {
     v1 = rec {
       filename = "boot.json";
@@ -27,6 +28,7 @@ let
               label = "${config.system.nixos.distroName} ${config.system.nixos.codeName} ${config.system.nixos.label} (Linux ${config.boot.kernelPackages.kernel.modDirVersion})";
             } // lib.optionalAttrs config.boot.initrd.enable {
               initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
+            } // lib.optionalAttrs hasAtLeastOneInitrdSecret {
               initrdSecrets = "${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets";
             };
           }));
diff --git a/nixos/modules/system/boot/binfmt.nix b/nixos/modules/system/boot/binfmt.nix
index d16152ab9dec5..08e3dce708447 100644
--- a/nixos/modules/system/boot/binfmt.nix
+++ b/nixos/modules/system/boot/binfmt.nix
@@ -1,6 +1,6 @@
 { config, lib, pkgs, ... }:
 let
-  inherit (lib) mkOption mkDefault types optionalString stringAfter;
+  inherit (lib) mkOption mkDefault types optionalString;
 
   cfg = config.boot.binfmt;
 
diff --git a/nixos/modules/system/boot/grow-partition.nix b/nixos/modules/system/boot/grow-partition.nix
index 897602f9826ab..8a0fc3a03dac4 100644
--- a/nixos/modules/system/boot/grow-partition.nix
+++ b/nixos/modules/system/boot/grow-partition.nix
@@ -25,7 +25,7 @@ with lib;
     systemd.services.growpart = {
       wantedBy = [ "-.mount" ];
       after = [ "-.mount" ];
-      before = [ "systemd-growfs-root.service" ];
+      before = [ "systemd-growfs-root.service" "shutdown.target" ];
       conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       serviceConfig = {
diff --git a/nixos/modules/system/boot/initrd-ssh.nix b/nixos/modules/system/boot/initrd-ssh.nix
index a8cd2e8f05fcc..61e61f32bc5ee 100644
--- a/nixos/modules/system/boot/initrd-ssh.nix
+++ b/nixos/modules/system/boot/initrd-ssh.nix
@@ -243,8 +243,10 @@ in
 
       services.sshd = {
         description = "SSH Daemon";
-        wantedBy = ["initrd.target"];
-        after = ["network.target" "initrd-nixos-copy-secrets.service"];
+        wantedBy = [ "initrd.target" ];
+        after = [ "network.target" "initrd-nixos-copy-secrets.service" ];
+        before = [ "shutdown.target" ];
+        conflicts = [ "shutdown.target" ];
 
         # Keys from Nix store are world-readable, which sshd doesn't
         # like. If this were a real nix store and not the initrd, we
diff --git a/nixos/modules/system/boot/loader/grub/grub.nix b/nixos/modules/system/boot/loader/grub/grub.nix
index 7097e1d83dca9..0556c875241a1 100644
--- a/nixos/modules/system/boot/loader/grub/grub.nix
+++ b/nixos/modules/system/boot/loader/grub/grub.nix
@@ -36,7 +36,7 @@ let
     # Package set of targeted architecture
     if cfg.forcei686 then pkgs.pkgsi686Linux else pkgs;
 
-  realGrub = if cfg.zfsSupport then grubPkgs.grub2.override { zfsSupport = true; }
+  realGrub = if cfg.zfsSupport then grubPkgs.grub2.override { zfsSupport = true; zfs = cfg.zfsPackage; }
     else grubPkgs.grub2;
 
   grub =
@@ -614,6 +614,16 @@ in
         '';
       };
 
+      zfsPackage = mkOption {
+        type = types.package;
+        internal = true;
+        default = pkgs.zfs;
+        defaultText = literalExpression "pkgs.zfs";
+        description = lib.mdDoc ''
+          Which ZFS package to use if `config.boot.loader.grub.zfsSupport` is true.
+        '';
+      };
+
       efiSupport = mkOption {
         default = false;
         type = types.bool;
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
index e2e7ffe59dcd2..6cd46f30373b5 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
@@ -20,13 +20,13 @@ from dataclasses import dataclass
 class BootSpec:
     init: str
     initrd: str
-    initrdSecrets: str
     kernel: str
     kernelParams: List[str]
     label: str
     system: str
     toplevel: str
     specialisations: Dict[str, "BootSpec"]
+    initrdSecrets: str | None = None
 
 
 
@@ -131,9 +131,8 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
         specialisation=" (%s)" % specialisation if specialisation else "")
 
     try:
-        subprocess.check_call([bootspec.initrdSecrets, "@efiSysMountPoint@%s" % (initrd)])
-    except FileNotFoundError:
-        pass
+        if bootspec.initrdSecrets is not None:
+            subprocess.check_call([bootspec.initrdSecrets, "@efiSysMountPoint@%s" % (initrd)])
     except subprocess.CalledProcessError:
         if current:
             print("failed to create initrd secrets!", file=sys.stderr)
diff --git a/nixos/modules/system/boot/luksroot.nix b/nixos/modules/system/boot/luksroot.nix
index 8bd9e71cb3a99..221e90b6f38fb 100644
--- a/nixos/modules/system/boot/luksroot.nix
+++ b/nixos/modules/system/boot/luksroot.nix
@@ -513,7 +513,7 @@ let
   postLVM = filterAttrs (n: v: !v.preLVM) luks.devices;
 
 
-  stage1Crypttab = pkgs.writeText "initrd-crypttab" (lib.concatStringsSep "\n" (lib.mapAttrsToList (n: v: let
+  stage1Crypttab = pkgs.writeText "initrd-crypttab" (lib.concatLines (lib.mapAttrsToList (n: v: let
     opts = v.crypttabExtraOpts
       ++ optional v.allowDiscards "discard"
       ++ optionals v.bypassWorkqueues [ "no-read-workqueue" "no-write-workqueue" ]
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index 33261021480f1..f236a4c005ad6 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -597,6 +597,8 @@ let
           "DHCP"
           "DHCPServer"
           "LinkLocalAddressing"
+          "IPv6LinkLocalAddressGenerationMode"
+          "IPv6StableSecretAddress"
           "IPv4LLRoute"
           "DefaultRouteOnDevice"
           "LLMNR"
@@ -648,6 +650,7 @@ let
         (assertValueOneOf "DHCP" ["yes" "no" "ipv4" "ipv6"])
         (assertValueOneOf "DHCPServer" boolValues)
         (assertValueOneOf "LinkLocalAddressing" ["yes" "no" "ipv4" "ipv6" "fallback" "ipv4-fallback"])
+        (assertValueOneOf "IPv6LinkLocalAddressGenerationMode" ["eui64" "none" "stable-privacy" "random"])
         (assertValueOneOf "IPv4LLRoute" boolValues)
         (assertValueOneOf "DefaultRouteOnDevice" boolValues)
         (assertValueOneOf "LLMNR" (boolValues ++ ["resolve"]))
@@ -1612,7 +1615,7 @@ let
         description = lib.mdDoc ''
           Each attribute in this set specifies an option in the
           `[WireGuardPeer]` section of the unit.  See
-          {manpage}`systemd.network(5)` for details.
+          {manpage}`systemd.netdev(5)` for details.
         '';
       };
     };
diff --git a/nixos/modules/system/boot/resolved.nix b/nixos/modules/system/boot/resolved.nix
index b898a63179624..538f71cc0b9ae 100644
--- a/nixos/modules/system/boot/resolved.nix
+++ b/nixos/modules/system/boot/resolved.nix
@@ -23,12 +23,13 @@ in
     };
 
     services.resolved.fallbackDns = mkOption {
-      default = [ ];
+      default = null;
       example = [ "8.8.8.8" "2001:4860:4860::8844" ];
-      type = types.listOf types.str;
+      type = types.nullOr (types.listOf types.str);
       description = lib.mdDoc ''
         A list of IPv4 and IPv6 addresses to use as the fallback DNS servers.
-        If this option is empty, a compiled-in list of DNS servers is used instead.
+        If this option is null, a compiled-in list of DNS servers is used instead.
+        Setting this option to an empty list will override the built-in list to an empty list, disabling fallback.
       '';
     };
 
@@ -134,7 +135,7 @@ in
         [Resolve]
         ${optionalString (config.networking.nameservers != [])
           "DNS=${concatStringsSep " " config.networking.nameservers}"}
-        ${optionalString (cfg.fallbackDns != [])
+        ${optionalString (cfg.fallbackDns != null)
           "FallbackDNS=${concatStringsSep " " cfg.fallbackDns}"}
         ${optionalString (cfg.domains != [])
           "Domains=${concatStringsSep " " cfg.domains}"}
diff --git a/nixos/modules/system/boot/systemd/initrd-secrets.nix b/nixos/modules/system/boot/systemd/initrd-secrets.nix
index 7b59c0cbe7b84..d375238aa146e 100644
--- a/nixos/modules/system/boot/systemd/initrd-secrets.nix
+++ b/nixos/modules/system/boot/systemd/initrd-secrets.nix
@@ -11,7 +11,8 @@
       description = "Copy secrets into place";
       # Run as early as possible
       wantedBy = [ "sysinit.target" ];
-      before = [ "cryptsetup-pre.target" ];
+      before = [ "cryptsetup-pre.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
 
       # We write the secrets to /.initrd-secrets and move them because this allows
diff --git a/nixos/modules/system/boot/systemd/initrd.nix b/nixos/modules/system/boot/systemd/initrd.nix
index 0e7d59b32075b..4ae07944afc3c 100644
--- a/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixos/modules/system/boot/systemd/initrd.nix
@@ -90,8 +90,6 @@ let
 
   fileSystems = filter utils.fsNeededForBoot config.system.build.fileSystems;
 
-  needMakefs = lib.any (fs: fs.autoFormat) fileSystems;
-
   kernel-name = config.boot.kernelPackages.kernel.name or "kernel";
   modulesTree = config.system.modulesTree.override { name = kernel-name + "-modules"; };
   firmware = config.hardware.firmware;
@@ -398,8 +396,7 @@ in {
           ManagerEnvironment=${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "${n}=${lib.escapeShellArg v}") cfg.managerEnvironment)}
         '';
 
-        "/lib/modules".source = "${modulesClosure}/lib/modules";
-        "/lib/firmware".source = "${modulesClosure}/lib/firmware";
+        "/lib".source = "${modulesClosure}/lib";
 
         "/etc/modules-load.d/nixos.conf".text = concatStringsSep "\n" config.boot.initrd.kernelModules;
 
@@ -430,7 +427,7 @@ in {
         "${cfg.package}/lib/systemd/systemd-fsck"
         "${cfg.package}/lib/systemd/systemd-hibernate-resume"
         "${cfg.package}/lib/systemd/systemd-journald"
-        (lib.mkIf needMakefs "${cfg.package}/lib/systemd/systemd-makefs")
+        "${cfg.package}/lib/systemd/systemd-makefs"
         "${cfg.package}/lib/systemd/systemd-modules-load"
         "${cfg.package}/lib/systemd/systemd-remount-fs"
         "${cfg.package}/lib/systemd/systemd-shutdown"
diff --git a/nixos/modules/system/boot/systemd/oomd.nix b/nixos/modules/system/boot/systemd/oomd.nix
index fad755e278c77..000b18c01609a 100644
--- a/nixos/modules/system/boot/systemd/oomd.nix
+++ b/nixos/modules/system/boot/systemd/oomd.nix
@@ -3,14 +3,18 @@
   cfg = config.systemd.oomd;
 
 in {
+  imports = [
+    (lib.mkRenamedOptionModule [ "systemd" "oomd" "enableUserServices" ] [ "systemd" "oomd" "enableUserSlices" ])
+  ];
+
   options.systemd.oomd = {
     enable = lib.mkEnableOption (lib.mdDoc "the `systemd-oomd` OOM killer") // { default = true; };
 
     # Fedora enables the first and third option by default. See the 10-oomd-* files here:
-    # https://src.fedoraproject.org/rpms/systemd/tree/acb90c49c42276b06375a66c73673ac351025597
+    # https://src.fedoraproject.org/rpms/systemd/tree/806c95e1c70af18f81d499b24cd7acfa4c36ffd6
     enableRootSlice = lib.mkEnableOption (lib.mdDoc "oomd on the root slice (`-.slice`)");
     enableSystemSlice = lib.mkEnableOption (lib.mdDoc "oomd on the system slice (`system.slice`)");
-    enableUserServices = lib.mkEnableOption (lib.mdDoc "oomd on all user services (`user@.service`)");
+    enableUserSlices = lib.mkEnableOption (lib.mdDoc "oomd on all user slices (`user@.slice`) and all user owned slices");
 
     extraConfig = lib.mkOption {
       type = with lib.types; attrsOf (oneOf [ str int bool ]);
@@ -44,14 +48,24 @@ in {
     users.groups.systemd-oom = { };
 
     systemd.slices."-".sliceConfig = lib.mkIf cfg.enableRootSlice {
-      ManagedOOMSwap = "kill";
+      ManagedOOMMemoryPressure = "kill";
+      ManagedOOMMemoryPressureLimit = "80%";
     };
     systemd.slices."system".sliceConfig = lib.mkIf cfg.enableSystemSlice {
-      ManagedOOMSwap = "kill";
+      ManagedOOMMemoryPressure = "kill";
+      ManagedOOMMemoryPressureLimit = "80%";
     };
-    systemd.services."user@".serviceConfig = lib.mkIf cfg.enableUserServices {
+    systemd.slices."user-".sliceConfig = lib.mkIf cfg.enableUserSlices {
       ManagedOOMMemoryPressure = "kill";
-      ManagedOOMMemoryPressureLimit = "50%";
+      ManagedOOMMemoryPressureLimit = "80%";
+    };
+    systemd.user.units."slice" = lib.mkIf cfg.enableUserSlices {
+      text = ''
+        [Slice]
+        ManagedOOMMemoryPressure=kill
+        ManagedOOMMemoryPressureLimit=80%
+      '';
+      overrideStrategy = "asDropin";
     };
   };
 }
diff --git a/nixos/modules/tasks/filesystems.nix b/nixos/modules/tasks/filesystems.nix
index 91e30aa4c0af9..1378a0090c1df 100644
--- a/nixos/modules/tasks/filesystems.nix
+++ b/nixos/modules/tasks/filesystems.nix
@@ -406,7 +406,8 @@ in
             ConditionVirtualization = "!container";
             DefaultDependencies = false; # needed to prevent a cycle
           };
-          before = [ "systemd-pstore.service" ];
+          before = [ "systemd-pstore.service" "shutdown.target" ];
+          conflicts = [ "shutdown.target" ];
           wantedBy = [ "systemd-pstore.service" ];
         };
       };
diff --git a/nixos/modules/tasks/filesystems/bcachefs.nix b/nixos/modules/tasks/filesystems/bcachefs.nix
index a9134b79b4a64..e771b706becea 100644
--- a/nixos/modules/tasks/filesystems/bcachefs.nix
+++ b/nixos/modules/tasks/filesystems/bcachefs.nix
@@ -78,9 +78,10 @@ let
     value = {
       description = "Unlock bcachefs for ${fs.mountPoint}";
       requiredBy = [ mountUnit ];
-      before = [ mountUnit ];
-      bindsTo = [ deviceUnit ];
       after = [ deviceUnit ];
+      before = [ mountUnit "shutdown.target" ];
+      bindsTo = [ deviceUnit ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       serviceConfig = {
         Type = "oneshot";
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index 784040f0ce9e3..b38f228fc1606 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -130,7 +130,8 @@ let
         "systemd-ask-password-console.service"
       ] ++ optional (config.boot.initrd.clevis.useTang) "network-online.target";
       requiredBy = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
-      before = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
+      before = getPoolMounts prefix pool ++ [ "shutdown.target" "zfs-import.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig = {
         DefaultDependencies = "no";
       };
@@ -508,9 +509,15 @@ in
     };
 
     services.zfs.zed = {
-      enableMail = mkEnableOption (lib.mdDoc "ZED's ability to send emails") // {
-        default = cfgZfs.package.enableMail;
-        defaultText = literalExpression "config.${optZfs.package}.enableMail";
+      enableMail = mkOption {
+        type = types.bool;
+        default = config.services.mail.sendmailSetuidWrapper != null;
+        defaultText = literalExpression ''
+          config.services.mail.sendmailSetuidWrapper != null
+        '';
+        description = mdDoc ''
+          Whether to enable ZED's ability to send emails.
+        '';
       };
 
       settings = mkOption {
@@ -551,14 +558,6 @@ in
           message = "The kernel module and the userspace tooling versions are not matching, this is an unsupported usecase.";
         }
         {
-          assertion = cfgZED.enableMail -> cfgZfs.package.enableMail;
-          message = ''
-            To allow ZED to send emails, ZFS needs to be configured to enable
-            this. To do so, one must override the `zfs` package and set
-            `enableMail` to true.
-          '';
-        }
-        {
           assertion = config.networking.hostId != null;
           message = "ZFS requires networking.hostId to be set";
         }
@@ -668,10 +667,17 @@ in
       # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
       boot.loader.grub = mkIf (inInitrd || inSystem) {
         zfsSupport = true;
+        zfsPackage = cfgZfs.package;
       };
 
       services.zfs.zed.settings = {
-        ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault "${pkgs.mailutils}/bin/mail");
+        ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault (
+          config.security.wrapperDir + "/" +
+          config.services.mail.sendmailSetuidWrapper.program
+        ));
+        # subject in header for sendmail
+        ZED_EMAIL_OPTS = mkIf cfgZED.enableMail (mkDefault "@ADDRESS@");
+
         PATH = lib.makeBinPath [
           cfgZfs.package
           pkgs.coreutils
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index 298add13437a0..2b2d24a64cb20 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -1396,6 +1396,8 @@ in
       "net.ipv4.conf.all.forwarding" = mkDefault (any (i: i.proxyARP) interfaces);
       "net.ipv6.conf.all.disable_ipv6" = mkDefault (!cfg.enableIPv6);
       "net.ipv6.conf.default.disable_ipv6" = mkDefault (!cfg.enableIPv6);
+      # allow all users to do ICMP echo requests (ping)
+      "net.ipv4.ping_group_range" = mkDefault "0 2147483647";
       # networkmanager falls back to "/proc/sys/net/ipv6/conf/default/use_tempaddr"
       "net.ipv6.conf.default.use_tempaddr" = tempaddrValues.${cfg.tempAddresses}.sysctl;
     } // listToAttrs (forEach interfaces
@@ -1408,9 +1410,11 @@ in
 
     systemd.services.domainname = lib.mkIf (cfg.domain != null) {
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       serviceConfig.ExecStart = ''${pkgs.nettools}/bin/domainname "${cfg.domain}"'';
+      serviceConfig.Type = "oneshot";
     };
 
     environment.etc.hostid = mkIf (cfg.hostId != null) { source = hostidFile; };
diff --git a/nixos/modules/tasks/trackpoint.nix b/nixos/modules/tasks/trackpoint.nix
index d197a0feb337c..317613b847927 100644
--- a/nixos/modules/tasks/trackpoint.nix
+++ b/nixos/modules/tasks/trackpoint.nix
@@ -80,10 +80,17 @@ with lib;
         ACTION=="add|change", SUBSYSTEM=="input", ATTR{name}=="${cfg.device}", ATTR{device/speed}="${toString cfg.speed}", ATTR{device/sensitivity}="${toString cfg.sensitivity}"
       '';
 
-      system.activationScripts.trackpoint =
-        ''
-          ${config.systemd.package}/bin/udevadm trigger --attr-match=name="${cfg.device}"
+      systemd.services.trackpoint = {
+        wantedBy = [ "sysinit.target" ] ;
+        before = [ "sysinit.target" "shutdown.target" ];
+        conflicts = [ "shutdown.target" ];
+        unitConfig.DefaultDependencies = false;
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.ExecStart = ''
+          ${config.systemd.package}/bin/udevadm trigger --attr-match=name="${cfg.device}
         '';
+      };
     })
 
     (mkIf (cfg.emulateWheel) {
diff --git a/nixos/modules/virtualisation/azure-agent.nix b/nixos/modules/virtualisation/azure-agent.nix
index e712fac17a462..ac4cd752615da 100644
--- a/nixos/modules/virtualisation/azure-agent.nix
+++ b/nixos/modules/virtualisation/azure-agent.nix
@@ -202,6 +202,13 @@ in
 
     services.udev.packages = [ pkgs.waagent ];
 
+    # Provide waagent-shipped udev rules in initrd too.
+    boot.initrd.services.udev.packages = [ pkgs.waagent ];
+    # udev rules shell out to chmod, cut and readlink, which are all
+    # provided by pkgs.coreutils, which is in services.udev.path, but not
+    # boot.initrd.services.udev.binPackages.
+    boot.initrd.services.udev.binPackages = [ pkgs.coreutils ];
+
     networking.dhcpcd.persistent = true;
 
     services.logrotate = {
@@ -245,6 +252,27 @@ in
         pkgs.e2fsprogs
         pkgs.bash
 
+        pkgs.findutils
+        pkgs.gnugrep
+        pkgs.gnused
+        pkgs.iproute2
+        pkgs.iptables
+
+        # for hostname
+        pkgs.nettools
+
+        pkgs.openssh
+        pkgs.openssl
+        pkgs.parted
+
+        # for pidof
+        pkgs.procps
+
+        # for useradd, usermod
+        pkgs.shadow
+
+        pkgs.util-linux # for (u)mount, fdisk, sfdisk, mkswap
+
         # waagent's Microsoft.OSTCExtensions.VMAccessForLinux needs Python 3
         pkgs.python39
 
diff --git a/nixos/modules/virtualisation/incus.nix b/nixos/modules/virtualisation/incus.nix
index 47a5e462262d4..3e48f8873ed4f 100644
--- a/nixos/modules/virtualisation/incus.nix
+++ b/nixos/modules/virtualisation/incus.nix
@@ -5,7 +5,9 @@ let
   preseedFormat = pkgs.formats.yaml { };
 in
 {
-  meta.maintainers = [ lib.maintainers.adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   options = {
     virtualisation.incus = {
diff --git a/nixos/modules/virtualisation/lxc-container.nix b/nixos/modules/virtualisation/lxc-container.nix
index 4db4df02fe8c9..8d3a480e6dc8c 100644
--- a/nixos/modules/virtualisation/lxc-container.nix
+++ b/nixos/modules/virtualisation/lxc-container.nix
@@ -1,7 +1,9 @@
 { lib, config, pkgs, ... }:
 
 {
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   imports = [
     ./lxc-instance-common.nix
diff --git a/nixos/modules/virtualisation/lxc.nix b/nixos/modules/virtualisation/lxc.nix
index 5bd64a5f9a565..3febb4b4f2483 100644
--- a/nixos/modules/virtualisation/lxc.nix
+++ b/nixos/modules/virtualisation/lxc.nix
@@ -2,21 +2,19 @@
 
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
-
   cfg = config.virtualisation.lxc;
-
 in
 
 {
-  ###### interface
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   options.virtualisation.lxc = {
     enable =
-      mkOption {
-        type = types.bool;
+      lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description =
           lib.mdDoc ''
@@ -27,8 +25,8 @@ in
       };
 
     systemConfig =
-      mkOption {
-        type = types.lines;
+      lib.mkOption {
+        type = lib.types.lines;
         default = "";
         description =
           lib.mdDoc ''
@@ -38,8 +36,8 @@ in
       };
 
     defaultConfig =
-      mkOption {
-        type = types.lines;
+      lib.mkOption {
+        type = lib.types.lines;
         default = "";
         description =
           lib.mdDoc ''
@@ -49,8 +47,8 @@ in
       };
 
     usernetConfig =
-      mkOption {
-        type = types.lines;
+      lib.mkOption {
+        type = lib.types.lines;
         default = "";
         description =
           lib.mdDoc ''
@@ -62,7 +60,7 @@ in
 
   ###### implementation
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     environment.systemPackages = [ pkgs.lxc ];
     environment.etc."lxc/lxc.conf".text = cfg.systemConfig;
     environment.etc."lxc/lxc-usernet".text = cfg.usernetConfig;
diff --git a/nixos/modules/virtualisation/lxcfs.nix b/nixos/modules/virtualisation/lxcfs.nix
index fb0ba49f73044..b2eaec774a65c 100644
--- a/nixos/modules/virtualisation/lxcfs.nix
+++ b/nixos/modules/virtualisation/lxcfs.nix
@@ -2,18 +2,18 @@
 
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.virtualisation.lxc.lxcfs;
 in {
-  meta.maintainers = [ maintainers.mic92 ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   ###### interface
   options.virtualisation.lxc.lxcfs = {
     enable =
-      mkOption {
-        type = types.bool;
+      lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           This enables LXCFS, a FUSE filesystem for LXC.
@@ -27,7 +27,7 @@ in {
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     systemd.services.lxcfs = {
       description = "FUSE filesystem for LXC";
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/virtualisation/lxd-agent.nix b/nixos/modules/virtualisation/lxd-agent.nix
index 5bcc86e3bcbe9..8a2a1530eeb79 100644
--- a/nixos/modules/virtualisation/lxd-agent.nix
+++ b/nixos/modules/virtualisation/lxd-agent.nix
@@ -45,7 +45,9 @@ let
     chown -R root:root "$PREFIX"
   '';
 in {
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   options = {
     virtualisation.lxd.agent.enable = lib.mkEnableOption (lib.mdDoc "Enable LXD agent");
@@ -56,18 +58,28 @@ in {
     systemd.services.lxd-agent = {
       enable = true;
       wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.kmod pkgs.util-linux ];
+      before = [ "shutdown.target" ] ++ lib.optionals config.services.cloud-init.enable [
+        "cloud-init.target" "cloud-init.service" "cloud-init-local.service"
+      ];
+      conflicts = [ "shutdown.target" ];
+      path = [
+        pkgs.kmod
+        pkgs.util-linux
+
+        # allow `incus exec` to find system binaries
+        "/run/current-system/sw"
+      ];
 
       preStart = preStartScript;
 
       # avoid killing nixos-rebuild switch when executed through lxc exec
+      restartIfChanged = false;
       stopIfChanged = false;
 
       unitConfig = {
         Description = "LXD - agent";
         Documentation = "https://documentation.ubuntu.com/lxd/en/latest";
         ConditionPathExists = "/dev/virtio-ports/org.linuxcontainers.lxd";
-        Before = lib.optionals config.services.cloud-init.enable [ "cloud-init.target" "cloud-init.service" "cloud-init-local.service" ];
         DefaultDependencies = "no";
         StartLimitInterval = "60";
         StartLimitBurst = "10";
diff --git a/nixos/modules/virtualisation/lxd-virtual-machine.nix b/nixos/modules/virtualisation/lxd-virtual-machine.nix
index ba729465ec2f8..92434cb9babf4 100644
--- a/nixos/modules/virtualisation/lxd-virtual-machine.nix
+++ b/nixos/modules/virtualisation/lxd-virtual-machine.nix
@@ -6,6 +6,10 @@ let
     then "ttyS0"
     else "ttyAMA0"; # aarch64
 in {
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
+
   imports = [
     ./lxc-instance-common.nix
 
diff --git a/nixos/modules/virtualisation/lxd.nix b/nixos/modules/virtualisation/lxd.nix
index 6f628c4a6e328..c4c856d9be30d 100644
--- a/nixos/modules/virtualisation/lxd.nix
+++ b/nixos/modules/virtualisation/lxd.nix
@@ -6,12 +6,14 @@ let
   cfg = config.virtualisation.lxd;
   preseedFormat = pkgs.formats.yaml {};
 in {
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
+
   imports = [
     (lib.mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
   ];
 
-  ###### interface
-
   options = {
     virtualisation.lxd = {
       enable = lib.mkOption {
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index d92fd48a6103c..3d7f3ccb62f84 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -1256,6 +1256,8 @@ in
         unitConfig.RequiresMountsFor = "/sysroot/nix/.ro-store";
       }];
       services.rw-store = {
+        before = [ "shutdown.target" ];
+        conflicts = [ "shutdown.target" ];
         unitConfig = {
           DefaultDependencies = false;
           RequiresMountsFor = "/sysroot/nix/.rw-store";
diff --git a/nixos/modules/virtualisation/vmware-host.nix b/nixos/modules/virtualisation/vmware-host.nix
index 1eaa896fe0965..094114623a424 100644
--- a/nixos/modules/virtualisation/vmware-host.nix
+++ b/nixos/modules/virtualisation/vmware-host.nix
@@ -85,34 +85,43 @@ in
       };
     };
 
-    ###### wrappers activation script
+    # Services
 
-    system.activationScripts.vmwareWrappers =
-      lib.stringAfter [ "specialfs" "users" ]
-        ''
-          mkdir -p "${parentWrapperDir}"
-          chmod 755 "${parentWrapperDir}"
-          # We want to place the tmpdirs for the wrappers to the parent dir.
-          wrapperDir=$(mktemp --directory --tmpdir="${parentWrapperDir}" wrappers.XXXXXXXXXX)
-          chmod a+rx "$wrapperDir"
-          ${lib.concatStringsSep "\n" (vmwareWrappers)}
-          if [ -L ${wrapperDir} ]; then
-            # Atomically replace the symlink
-            # See https://axialcorps.com/2013/07/03/atomically-replacing-files-and-directories/
-            old=$(readlink -f ${wrapperDir})
-            if [ -e "${wrapperDir}-tmp" ]; then
-              rm --force --recursive "${wrapperDir}-tmp"
-            fi
-            ln --symbolic --force --no-dereference "$wrapperDir" "${wrapperDir}-tmp"
-            mv --no-target-directory "${wrapperDir}-tmp" "${wrapperDir}"
-            rm --force --recursive "$old"
-          else
-            # For initial setup
-            ln --symbolic "$wrapperDir" "${wrapperDir}"
+    systemd.services."vmware-wrappers" = {
+      description = "Create VMVare Wrappers";
+      wantedBy = [ "multi-user.target" ];
+      before = [
+        "vmware-authdlauncher.service"
+        "vmware-networks-configuration.service"
+        "vmware-networks.service"
+        "vmware-usbarbitrator.service"
+      ];
+      after = [ "systemd-sysusers.service" ];
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      script = ''
+        mkdir -p "${parentWrapperDir}"
+        chmod 755 "${parentWrapperDir}"
+        # We want to place the tmpdirs for the wrappers to the parent dir.
+        wrapperDir=$(mktemp --directory --tmpdir="${parentWrapperDir}" wrappers.XXXXXXXXXX)
+        chmod a+rx "$wrapperDir"
+        ${lib.concatStringsSep "\n" (vmwareWrappers)}
+        if [ -L ${wrapperDir} ]; then
+          # Atomically replace the symlink
+          # See https://axialcorps.com/2013/07/03/atomically-replacing-files-and-directories/
+          old=$(readlink -f ${wrapperDir})
+          if [ -e "${wrapperDir}-tmp" ]; then
+            rm --force --recursive "${wrapperDir}-tmp"
           fi
-        '';
-
-    # Services
+          ln --symbolic --force --no-dereference "$wrapperDir" "${wrapperDir}-tmp"
+          mv --no-target-directory "${wrapperDir}-tmp" "${wrapperDir}"
+          rm --force --recursive "$old"
+        else
+          # For initial setup
+          ln --symbolic "$wrapperDir" "${wrapperDir}"
+        fi
+      '';
+    };
 
     systemd.services."vmware-authdlauncher" = {
       description = "VMware Authentication Daemon";
diff --git a/nixos/modules/virtualisation/waydroid.nix b/nixos/modules/virtualisation/waydroid.nix
index b0e85b685083b..1f466c780cf22 100644
--- a/nixos/modules/virtualisation/waydroid.nix
+++ b/nixos/modules/virtualisation/waydroid.nix
@@ -32,7 +32,7 @@ in
     system.requiredKernelConfig = [
       (kCfg.isEnabled "ANDROID_BINDER_IPC")
       (kCfg.isEnabled "ANDROID_BINDERFS")
-      (kCfg.isEnabled "ASHMEM") # FIXME Needs memfd support instead on Linux 5.18 and waydroid 1.2.1
+      (kCfg.isEnabled "MEMFD_CREATE")
     ];
 
     /* NOTE: we always enable this flag even if CONFIG_PSI_DEFAULT_DISABLED is not on
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index 9b4b92be6f3ac..a2e141b5bcafd 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -5,7 +5,7 @@
 { nixpkgs ? { outPath = (import ../lib).cleanSource ./..; revCount = 56789; shortRev = "gfedcba"; }
 , stableBranch ? false
 , supportedSystems ? [ "aarch64-linux" "x86_64-linux" ]
-, limitedSupportedSystems ? [ "i686-linux" ]
+, limitedSupportedSystems ? [ ]
 }:
 
 let
@@ -90,6 +90,7 @@ in rec {
         (onSystems ["x86_64-linux"] "nixos.tests.installer.btrfsSubvols")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.luksroot")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.lvm")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.separateBootZfs")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.separateBootFat")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.separateBoot")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.simpleLabels")
@@ -167,6 +168,7 @@ in rec {
         (onFullSupported "nixos.tests.xfce")
         (onFullSupported "nixpkgs.emacs")
         (onFullSupported "nixpkgs.jdk")
+        (onSystems ["x86_64-linux"] "nixpkgs.mesa_i686") # i686 sanity check + useful
         ["nixpkgs.tarball"]
 
         # Ensure that nixpkgs-check-by-name is available in all release channels and nixos-unstable,
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index e0572e3bed9cd..02e3e91e2e3db 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -120,6 +120,7 @@ in {
   amazon-ssm-agent = handleTest ./amazon-ssm-agent.nix {};
   amd-sev = runTest ./amd-sev.nix;
   anbox = runTest ./anbox.nix;
+  angie-api = handleTest ./angie-api.nix {};
   anki-sync-server = handleTest ./anki-sync-server.nix {};
   anuko-time-tracker = handleTest ./anuko-time-tracker.nix {};
   apcupsd = handleTest ./apcupsd.nix {};
@@ -197,6 +198,7 @@ in {
   cntr = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cntr.nix {};
   cockpit = handleTest ./cockpit.nix {};
   cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
+  code-server = handleTest ./code-server.nix {};
   coder = handleTest ./coder.nix {};
   collectd = handleTest ./collectd.nix {};
   connman = handleTest ./connman.nix {};
@@ -255,6 +257,7 @@ in {
   dolibarr = handleTest ./dolibarr.nix {};
   domination = handleTest ./domination.nix {};
   dovecot = handleTest ./dovecot.nix {};
+  drawterm = discoverTests (import ./drawterm.nix);
   drbd = handleTest ./drbd.nix {};
   dublin-traceroute = handleTest ./dublin-traceroute.nix {};
   earlyoom = handleTestOn ["x86_64-linux"] ./earlyoom.nix {};
@@ -315,6 +318,7 @@ in {
   freetube = discoverTests (import ./freetube.nix);
   freshrss-sqlite = handleTest ./freshrss-sqlite.nix {};
   freshrss-pgsql = handleTest ./freshrss-pgsql.nix {};
+  freshrss-http-auth = handleTest ./freshrss-http-auth.nix {};
   frigate = handleTest ./frigate.nix {};
   frp = handleTest ./frp.nix {};
   frr = handleTest ./frr.nix {};
@@ -340,6 +344,7 @@ in {
   gnome-extensions = handleTest ./gnome-extensions.nix {};
   gnome-flashback = handleTest ./gnome-flashback.nix {};
   gnome-xorg = handleTest ./gnome-xorg.nix {};
+  gns3-server = handleTest ./gns3-server.nix {};
   gnupg = handleTest ./gnupg.nix {};
   go-neb = handleTest ./go-neb.nix {};
   gobgpd = handleTest ./gobgpd.nix {};
@@ -406,7 +411,7 @@ in {
   incus = pkgs.recurseIntoAttrs (handleTest ./incus { inherit handleTestOn; });
   influxdb = handleTest ./influxdb.nix {};
   influxdb2 = handleTest ./influxdb2.nix {};
-  initrd-network-openvpn = handleTest ./initrd-network-openvpn {};
+  initrd-network-openvpn = handleTestOn [ "x86_64-linux" "i686-linux" ] ./initrd-network-openvpn {};
   initrd-network-ssh = handleTest ./initrd-network-ssh {};
   initrd-luks-empty-passphrase = handleTest ./initrd-luks-empty-passphrase.nix {};
   initrdNetwork = handleTest ./initrd-network.nix {};
@@ -541,6 +546,7 @@ in {
   munin = handleTest ./munin.nix {};
   mutableUsers = handleTest ./mutable-users.nix {};
   mxisd = handleTest ./mxisd.nix {};
+  mympd = handleTest ./mympd.nix {};
   mysql = handleTest ./mysql/mysql.nix {};
   mysql-autobackup = handleTest ./mysql/mysql-autobackup.nix {};
   mysql-backup = handleTest ./mysql/mysql-backup.nix {};
@@ -583,6 +589,7 @@ in {
   nginx-njs = handleTest ./nginx-njs.nix {};
   nginx-proxyprotocol = handleTest ./nginx-proxyprotocol {};
   nginx-pubhtml = handleTest ./nginx-pubhtml.nix {};
+  nginx-redirectcode = handleTest ./nginx-redirectcode.nix {};
   nginx-sso = handleTest ./nginx-sso.nix {};
   nginx-status-page = handleTest ./nginx-status-page.nix {};
   nginx-tmpdir = handleTest ./nginx-tmpdir.nix {};
@@ -605,6 +612,7 @@ in {
   noto-fonts = handleTest ./noto-fonts.nix {};
   noto-fonts-cjk-qt-default-weight = handleTest ./noto-fonts-cjk-qt-default-weight.nix {};
   novacomd = handleTestOn ["x86_64-linux"] ./novacomd.nix {};
+  npmrc = handleTest ./npmrc.nix {};
   nscd = handleTest ./nscd.nix {};
   nsd = handleTest ./nsd.nix {};
   ntfy-sh = handleTest ./ntfy-sh.nix {};
@@ -716,6 +724,7 @@ in {
   qgis-ltr = handleTest ./qgis.nix { qgisPackage = pkgs.qgis-ltr; };
   qownnotes = handleTest ./qownnotes.nix {};
   quake3 = handleTest ./quake3.nix {};
+  quicktun = handleTest ./quicktun.nix {};
   quorum = handleTest ./quorum.nix {};
   rabbitmq = handleTest ./rabbitmq.nix {};
   radarr = handleTest ./radarr.nix {};
@@ -734,6 +743,7 @@ in {
   rosenpass = handleTest ./rosenpass.nix {};
   rshim = handleTest ./rshim.nix {};
   rspamd = handleTest ./rspamd.nix {};
+  rspamd-trainer = handleTest ./rspamd-trainer.nix {};
   rss2email = handleTest ./rss2email.nix {};
   rstudio-server = handleTest ./rstudio-server.nix {};
   rsyncd = handleTest ./rsyncd.nix {};
@@ -777,6 +787,7 @@ in {
   spark = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./spark {};
   sqlite3-to-mysql = handleTest ./sqlite3-to-mysql.nix {};
   sslh = handleTest ./sslh.nix {};
+  ssh-agent-auth = handleTest ./ssh-agent-auth.nix {};
   ssh-audit = handleTest ./ssh-audit.nix {};
   sssd = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd.nix {};
   sssd-ldap = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd-ldap.nix {};
@@ -787,6 +798,7 @@ in {
   step-ca = handleTestOn ["x86_64-linux"] ./step-ca.nix {};
   stratis = handleTest ./stratis {};
   strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
+  stub-ld = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./stub-ld.nix {};
   stunnel = handleTest ./stunnel.nix {};
   sudo = handleTest ./sudo.nix {};
   sudo-rs = handleTest ./sudo-rs.nix {};
@@ -826,7 +838,7 @@ in {
   systemd-initrd-vconsole = handleTest ./systemd-initrd-vconsole.nix {};
   systemd-initrd-networkd = handleTest ./systemd-initrd-networkd.nix {};
   systemd-initrd-networkd-ssh = handleTest ./systemd-initrd-networkd-ssh.nix {};
-  systemd-initrd-networkd-openvpn = handleTest ./initrd-network-openvpn { systemdStage1 = true; };
+  systemd-initrd-networkd-openvpn = handleTestOn [ "x86_64-linux" "i686-linux" ] ./initrd-network-openvpn { systemdStage1 = true; };
   systemd-initrd-vlan = handleTest ./systemd-initrd-vlan.nix {};
   systemd-journal = handleTest ./systemd-journal.nix {};
   systemd-machinectl = handleTest ./systemd-machinectl.nix {};
diff --git a/nixos/tests/anbox.nix b/nixos/tests/anbox.nix
index dfd6c13d93181..a00116536db7e 100644
--- a/nixos/tests/anbox.nix
+++ b/nixos/tests/anbox.nix
@@ -15,7 +15,7 @@
     test-support.displayManager.auto.user = "alice";
 
     virtualisation.anbox.enable = true;
-    boot.kernelPackages = pkgs.linuxPackages_5_15;
+    boot.kernelPackages = pkgs.linuxKernel.packages.linux_5_15;
     virtualisation.memorySize = 2500;
   };
 
diff --git a/nixos/tests/angie-api.nix b/nixos/tests/angie-api.nix
new file mode 100644
index 0000000000000..4c8d6b54247b1
--- /dev/null
+++ b/nixos/tests/angie-api.nix
@@ -0,0 +1,148 @@
+import ./make-test-python.nix ({lib, pkgs, ...}:
+let
+  hosts = ''
+    192.168.2.101 example.com
+    192.168.2.101 api.example.com
+    192.168.2.101 backend.example.com
+  '';
+
+in
+{
+  name = "angie-api";
+  meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
+
+  nodes = {
+    server = { pkgs, ... }: {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.101"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 80 ];
+      };
+
+      services.nginx = {
+        enable = true;
+        package = pkgs.angie;
+
+        upstreams = {
+          "backend-http" = {
+            servers = { "backend.example.com:8080" = { fail_timeout = "0"; }; };
+            extraConfig = ''
+              zone upstream 256k;
+            '';
+          };
+          "backend-socket" = {
+            servers = { "unix:/run/example.sock" = { fail_timeout = "0"; }; };
+            extraConfig = ''
+              zone upstream 256k;
+            '';
+          };
+        };
+
+        virtualHosts."api.example.com" = {
+          locations."/console/" = {
+            extraConfig = ''
+              api /status/;
+
+              allow 192.168.2.201;
+              deny all;
+            '';
+          };
+        };
+
+        virtualHosts."example.com" = {
+          locations."/test/" = {
+            root = lib.mkForce (pkgs.runCommandLocal "testdir" {} ''
+              mkdir -p "$out/test"
+              cat > "$out/test/index.html" <<EOF
+              <html><body>Hello World!</body></html>
+              EOF
+            '');
+            extraConfig = ''
+              status_zone test_zone;
+
+              allow 192.168.2.201;
+              deny all;
+            '';
+          };
+          locations."/test/locked/" = {
+            extraConfig = ''
+              status_zone test_zone;
+
+              deny all;
+            '';
+          };
+          locations."/test/error/" = {
+            extraConfig = ''
+              status_zone test_zone;
+
+              allow all;
+            '';
+          };
+          locations."/upstream-http/" = {
+            proxyPass = "http://backend-http";
+          };
+          locations."/upstream-socket/" = {
+            proxyPass = "http://backend-socket";
+          };
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.201"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("nginx")
+    server.wait_for_open_port(80)
+
+    # Check Angie version
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.angie.version' | grep '${pkgs.angie.version}'")
+
+    # Check access
+    client.succeed("curl --verbose --head http://api.example.com/console/ | grep 'HTTP/1.1 200'")
+    server.succeed("curl --verbose --head http://api.example.com/console/ | grep 'HTTP/1.1 403 Forbidden'")
+
+    # Check responses and requests
+    client.succeed("curl --verbose http://example.com/test/")
+    client.succeed("curl --verbose http://example.com/test/locked/")
+    client.succeed("curl --verbose http://example.com/test/locked/")
+    client.succeed("curl --verbose http://example.com/test/error/")
+    client.succeed("curl --verbose http://example.com/test/error/")
+    client.succeed("curl --verbose http://example.com/test/error/")
+    server.succeed("curl --verbose http://example.com/test/")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.location_zones.test_zone.responses.\"200\"' | grep '1'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.location_zones.test_zone.responses.\"403\"' | grep '3'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.location_zones.test_zone.responses.\"404\"' | grep '3'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.location_zones.test_zone.requests.total' | grep '7'")
+
+    # Check upstreams
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-http\".peers.\"192.168.2.101:8080\".state' | grep 'up'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-http\".peers.\"192.168.2.101:8080\".health.fails' | grep '0'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-socket\".peers.\"unix:/run/example.sock\".state' | grep 'up'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-socket\".peers.\"unix:/run/example.sock\".health.fails' | grep '0'")
+    client.succeed("curl --verbose http://example.com/upstream-http/")
+    client.succeed("curl --verbose http://example.com/upstream-socket/")
+    client.succeed("curl --verbose http://example.com/upstream-socket/")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-http\".peers.\"192.168.2.101:8080\".health.fails' | grep '1'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-socket\".peers.\"unix:/run/example.sock\".health.fails' | grep '2'")
+
+    server.shutdown()
+    client.shutdown()
+  '';
+})
diff --git a/nixos/tests/avahi.nix b/nixos/tests/avahi.nix
index c53a95903291c..d8f4d13340fbc 100644
--- a/nixos/tests/avahi.nix
+++ b/nixos/tests/avahi.nix
@@ -16,7 +16,7 @@ import ./make-test-python.nix {
     cfg = { ... }: {
       services.avahi = {
         enable = true;
-        nssmdns = true;
+        nssmdns4 = true;
         publish.addresses = true;
         publish.domain = true;
         publish.enable = true;
diff --git a/nixos/tests/bootspec.nix b/nixos/tests/bootspec.nix
index 9295500422a92..14928b2206251 100644
--- a/nixos/tests/bootspec.nix
+++ b/nixos/tests/bootspec.nix
@@ -112,10 +112,39 @@ in
 
       bootspec = json.loads(machine.succeed("jq -r '.\"org.nixos.bootspec.v1\"' /run/current-system/boot.json"))
 
-      assert all(key in bootspec for key in ('initrd', 'initrdSecrets')), "Bootspec should contain initrd or initrdSecrets field when initrd is enabled"
+      assert 'initrd' in bootspec, "Bootspec should contain initrd field when initrd is enabled"
+      assert 'initrdSecrets' not in bootspec, "Bootspec should not contain initrdSecrets when there's no initrdSecrets"
     '';
   };
 
+  # Check that initrd secrets create corresponding entries in bootspec.
+  initrd-secrets = makeTest {
+    name = "bootspec-with-initrd-secrets";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      imports = [ standard ];
+      environment.systemPackages = [ pkgs.jq ];
+      # It's probably the case, but we want to make it explicit here.
+      boot.initrd.enable = true;
+      boot.initrd.secrets."/some/example" = pkgs.writeText "example-secret" "test";
+    };
+
+    testScript = ''
+      import json
+
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/boot.json")
+
+      bootspec = json.loads(machine.succeed("jq -r '.\"org.nixos.bootspec.v1\"' /run/current-system/boot.json"))
+
+      assert 'initrdSecrets' in bootspec, "Bootspec should contain an 'initrdSecrets' field given there's an initrd secret"
+    '';
+  };
+
+
   # Check that specialisations create corresponding entries in bootspec.
   specialisation = makeTest {
     name = "bootspec-with-specialisation";
diff --git a/nixos/tests/btrbk-section-order.nix b/nixos/tests/btrbk-section-order.nix
index 20f1afcf80ec7..6082de947f66f 100644
--- a/nixos/tests/btrbk-section-order.nix
+++ b/nixos/tests/btrbk-section-order.nix
@@ -29,10 +29,12 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
   };
 
   testScript = ''
+    import difflib
     machine.wait_for_unit("basic.target")
-    got = machine.succeed("cat /etc/btrbk/local.conf")
+    got = machine.succeed("cat /etc/btrbk/local.conf").strip()
     expect = """
     backend btrfs-progs-sudo
+    stream_compress no
     timestamp_format long
     target ssh://global-target/
      ssh_user root
@@ -46,6 +48,9 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
        ssh_user root
     """.strip()
     print(got)
+    if got != expect:
+      diff = difflib.unified_diff(expect.splitlines(keepends=True), got.splitlines(keepends=True), fromfile="expected", tofile="got")
+      print("".join(diff))
     assert got == expect
   '';
 })
diff --git a/nixos/tests/btrbk.nix b/nixos/tests/btrbk.nix
index 5261321dfa2c5..403c9595530d8 100644
--- a/nixos/tests/btrbk.nix
+++ b/nixos/tests/btrbk.nix
@@ -27,7 +27,6 @@ import ./make-test-python.nix ({ pkgs, ... }:
         # don't do it with real ssh keys.
         environment.etc."btrbk_key".text = privateKey;
         services.btrbk = {
-          extraPackages = [ pkgs.lz4 ];
           instances = {
             remote = {
               onCalendar = "minutely";
diff --git a/nixos/tests/buildbot.nix b/nixos/tests/buildbot.nix
index dbf68aba9467f..2f6926313b7cd 100644
--- a/nixos/tests/buildbot.nix
+++ b/nixos/tests/buildbot.nix
@@ -104,5 +104,5 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         bbworker.fail("nc -z bbmaster 8011")
   '';
 
-  meta.maintainers = with pkgs.lib.maintainers; [ ];
+  meta.maintainers = pkgs.lib.teams.buildbot.members;
 })
diff --git a/nixos/tests/caddy.nix b/nixos/tests/caddy.nix
index 5a0d3539394b6..41d8e57de4686 100644
--- a/nixos/tests/caddy.nix
+++ b/nixos/tests/caddy.nix
@@ -48,11 +48,19 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           };
         };
       };
+      specialisation.explicit-config-file.configuration = {
+        services.caddy.configFile = pkgs.writeText "Caddyfile" ''
+        localhost:80
+
+        respond "hello world"
+        '';
+      };
     };
   };
 
   testScript = { nodes, ... }:
     let
+      explicitConfigFile = "${nodes.webserver.system.build.toplevel}/specialisation/explicit-config-file";
       justReloadSystem = "${nodes.webserver.system.build.toplevel}/specialisation/config-reload";
       multipleConfigs = "${nodes.webserver.system.build.toplevel}/specialisation/multiple-configs";
       rfc42Config = "${nodes.webserver.system.build.toplevel}/specialisation/rfc42";
@@ -84,5 +92,12 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           )
           webserver.wait_for_open_port(80)
           webserver.succeed("curl http://localhost | grep hello")
+
+      with subtest("explicit configFile"):
+          webserver.succeed(
+              "${explicitConfigFile}/bin/switch-to-configuration test >&2"
+          )
+          webserver.wait_for_open_port(80)
+          webserver.succeed("curl http://localhost | grep hello")
     '';
 })
diff --git a/nixos/tests/ceph-single-node.nix b/nixos/tests/ceph-single-node.nix
index 4a5636fac1563..a3a4072365af8 100644
--- a/nixos/tests/ceph-single-node.nix
+++ b/nixos/tests/ceph-single-node.nix
@@ -182,16 +182,19 @@ let
     monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
     monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
 
+    # This test has been commented out due to the upstream issue with pyo3
+    # that has broken this dashboard
+    # Reference: https://www.spinics.net/lists/ceph-users/msg77812.html
     # Enable the dashboard and recheck health
-    monA.succeed(
-        "ceph mgr module enable dashboard",
-        "ceph config set mgr mgr/dashboard/ssl false",
-        # default is 8080 but it's better to be explicit
-        "ceph config set mgr mgr/dashboard/server_port 8080",
-    )
-    monA.wait_for_open_port(8080)
-    monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
-    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+    # monA.succeed(
+    #     "ceph mgr module enable dashboard",
+    #     "ceph config set mgr mgr/dashboard/ssl false",
+    #     # default is 8080 but it's better to be explicit
+    #     "ceph config set mgr mgr/dashboard/server_port 8080",
+    # )
+    # monA.wait_for_open_port(8080)
+    # monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
+    # monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
   '';
 in {
   name = "basic-single-node-ceph-cluster";
diff --git a/nixos/tests/cinnamon-wayland.nix b/nixos/tests/cinnamon-wayland.nix
index 58dddbbb0866a..824a606004cc0 100644
--- a/nixos/tests/cinnamon-wayland.nix
+++ b/nixos/tests/cinnamon-wayland.nix
@@ -12,6 +12,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       autoLogin.user = nodes.machine.users.users.alice.name;
       defaultSession = "cinnamon-wayland";
     };
+
+    # For the sessionPath subtest.
+    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gnome.gpaste ];
   };
 
   enableOCR = true;
@@ -47,6 +50,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'")
           machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'")
 
+      with subtest("Check if sessionPath option actually works"):
+          machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste")
+
       with subtest("Open Cinnamon Settings"):
           machine.succeed("${su "cinnamon-settings themes >&2 &"}")
           machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'")
diff --git a/nixos/tests/cinnamon.nix b/nixos/tests/cinnamon.nix
index 7637b55a2b124..eab907d0b712c 100644
--- a/nixos/tests/cinnamon.nix
+++ b/nixos/tests/cinnamon.nix
@@ -7,6 +7,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     imports = [ ./common/user-account.nix ];
     services.xserver.enable = true;
     services.xserver.desktopManager.cinnamon.enable = true;
+
+    # For the sessionPath subtest.
+    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gnome.gpaste ];
   };
 
   enableOCR = true;
@@ -49,6 +52,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'")
           machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'")
 
+      with subtest("Check if sessionPath option actually works"):
+          machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste")
+
       with subtest("Open Cinnamon Settings"):
           machine.succeed("${su "cinnamon-settings themes >&2 &"}")
           machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'")
diff --git a/nixos/tests/code-server.nix b/nixos/tests/code-server.nix
new file mode 100644
index 0000000000000..7d523dfc617e3
--- /dev/null
+++ b/nixos/tests/code-server.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+{
+  name = "code-server";
+
+  nodes = {
+    machine = {pkgs, ...}: {
+      services.code-server = {
+        enable = true;
+        auth = "none";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("code-server.service")
+    machine.wait_for_open_port(4444)
+    machine.succeed("curl -k --fail http://localhost:4444", timeout=10)
+  '';
+
+  meta.maintainers = [ lib.maintainers.drupol ];
+})
diff --git a/nixos/tests/containers-custom-pkgs.nix b/nixos/tests/containers-custom-pkgs.nix
index e8740ac631345..57184787c85f6 100644
--- a/nixos/tests/containers-custom-pkgs.nix
+++ b/nixos/tests/containers-custom-pkgs.nix
@@ -9,7 +9,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
 in {
   name = "containers-custom-pkgs";
   meta = {
-    maintainers = with lib.maintainers; [ adisbladis erikarvstedt ];
+    maintainers = with lib.maintainers; [ erikarvstedt ];
   };
 
   nodes.machine = { config, ... }: {
diff --git a/nixos/tests/containers-imperative.nix b/nixos/tests/containers-imperative.nix
index 18bec1db78e88..fff00e4f73a85 100644
--- a/nixos/tests/containers-imperative.nix
+++ b/nixos/tests/containers-imperative.nix
@@ -13,6 +13,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       nix.settings.sandbox = false;
       nix.settings.substituters = []; # don't try to access cache.nixos.org
 
+      virtualisation.memorySize = 2048;
       virtualisation.writableStore = true;
       # Make sure we always have all the required dependencies for creating a
       # container available within the VM, because we don't have network access.
diff --git a/nixos/tests/dhparams.nix b/nixos/tests/dhparams.nix
index 021042fafdb10..8d7082c114001 100644
--- a/nixos/tests/dhparams.nix
+++ b/nixos/tests/dhparams.nix
@@ -18,6 +18,8 @@ import ./make-test-python.nix {
         systemd.services.foo = {
           description = "Check systemd Ordering";
           wantedBy = [ "multi-user.target" ];
+          before = [ "shutdown.target" ];
+          conflicts = [ "shutdown.target" ];
           unitConfig = {
             # This is to make sure that the dhparams generation of foo occurs
             # before this service so we need this service to start as early as
diff --git a/nixos/tests/docker-tools.nix b/nixos/tests/docker-tools.nix
index fcdfa586fd55d..90af817e75ed3 100644
--- a/nixos/tests/docker-tools.nix
+++ b/nixos/tests/docker-tools.nix
@@ -11,7 +11,7 @@ let
       # Rootfs diffs for layers 1 and 2 are identical (and empty)
       layer1 = pkgs.dockerTools.buildImage {  name = "empty";  };
       layer2 = layer1.overrideAttrs (_: { fromImage = layer1; });
-      repeatedRootfsDiffs = pkgs.runCommandNoCC "image-with-links.tar" {
+      repeatedRootfsDiffs = pkgs.runCommand "image-with-links.tar" {
         nativeBuildInputs = [pkgs.jq];
       } ''
         mkdir contents
diff --git a/nixos/tests/drawterm.nix b/nixos/tests/drawterm.nix
new file mode 100644
index 0000000000000..1d444bb55433b
--- /dev/null
+++ b/nixos/tests/drawterm.nix
@@ -0,0 +1,58 @@
+{ system, pkgs }:
+let
+  tests = {
+    xorg = {
+      node = { pkgs, ... }: {
+        imports = [ ./common/user-account.nix ./common/x11.nix ];
+        services.xserver.enable = true;
+        services.xserver.displayManager.sessionCommands = ''
+          ${pkgs.drawterm}/bin/drawterm -g 1024x768 &
+        '';
+        test-support.displayManager.auto.user = "alice";
+      };
+      systems = [ "x86_64-linux" "aarch64-linux" ];
+    };
+    wayland = {
+      node = { pkgs, ... }: {
+        imports = [ ./common/wayland-cage.nix ];
+        services.cage.program = "${pkgs.drawterm-wayland}/bin/drawterm";
+      };
+      systems = [ "x86_64-linux" ];
+    };
+  };
+
+  mkTest = name: machine:
+    import ./make-test-python.nix ({ pkgs, ... }: {
+      inherit name;
+
+      nodes = { "${name}" = machine; };
+
+      meta = with pkgs.lib.maintainers; {
+        maintainers = [ moody ];
+      };
+
+      enableOCR = true;
+
+      testScript = ''
+        @polling_condition
+        def drawterm_running():
+            machine.succeed("pgrep drawterm")
+
+        start_all()
+
+        machine.wait_for_unit("graphical.target")
+        drawterm_running.wait() # type: ignore[union-attr]
+        machine.wait_for_text("cpu")
+        machine.send_chars("cpu\n")
+        machine.wait_for_text("auth")
+        machine.send_chars("cpu\n")
+        machine.wait_for_text("ending")
+        machine.screenshot("out.png")
+      '';
+
+    });
+  mkTestOn = systems: name: machine:
+    if pkgs.lib.elem system systems then mkTest name machine
+    else { ... }: { };
+in
+builtins.mapAttrs (k: v: mkTestOn v.systems k v.node { inherit system; }) tests
diff --git a/nixos/tests/frr.nix b/nixos/tests/frr.nix
index 598d7a7d28675..0d1a6a694a82c 100644
--- a/nixos/tests/frr.nix
+++ b/nixos/tests/frr.nix
@@ -29,7 +29,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
       name = "frr";
 
       meta = with pkgs.lib.maintainers; {
-        maintainers = [ hexa ];
+        maintainers = [ ];
       };
 
       nodes = {
diff --git a/nixos/tests/ft2-clone.nix b/nixos/tests/ft2-clone.nix
index a8395d4ebaa62..5476b38c00bd2 100644
--- a/nixos/tests/ft2-clone.nix
+++ b/nixos/tests/ft2-clone.nix
@@ -4,12 +4,11 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     maintainers = [ fgaz ];
   };
 
-  nodes.machine = { config, pkgs, ... }: {
+  nodes.machine = { pkgs, ... }: {
     imports = [
       ./common/x11.nix
     ];
 
-    services.xserver.enable = true;
     sound.enable = true;
     environment.systemPackages = [ pkgs.ft2-clone ];
   };
@@ -30,4 +29,3 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       machine.screenshot("screen")
     '';
 })
-
diff --git a/nixos/tests/gitlab.nix b/nixos/tests/gitlab.nix
index 88cd774f815a5..8d31264253119 100644
--- a/nixos/tests/gitlab.nix
+++ b/nixos/tests/gitlab.nix
@@ -34,7 +34,7 @@ in {
     gitlab = { ... }: {
       imports = [ common/user-account.nix ];
 
-      virtualisation.memorySize = if pkgs.stdenv.is64bit then 4096 else 2047;
+      virtualisation.memorySize = 6144;
       virtualisation.cores = 4;
       virtualisation.useNixStoreImage = true;
       virtualisation.writableStore = false;
diff --git a/nixos/tests/gns3-server.nix b/nixos/tests/gns3-server.nix
new file mode 100644
index 0000000000000..e37d751f5f640
--- /dev/null
+++ b/nixos/tests/gns3-server.nix
@@ -0,0 +1,55 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "gns3-server";
+  meta.maintainers = [ lib.maintainers.anthonyroussel ];
+
+  nodes.machine =
+    { ... }:
+    let
+      tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+        openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -days 365 \
+          -subj '/CN=localhost'
+        install -D -t $out key.pem cert.pem
+      '';
+    in {
+      services.gns3-server = {
+        enable = true;
+        auth = {
+          enable = true;
+          user = "user";
+          passwordFile = pkgs.writeText "gns3-auth-password-file" "password";
+        };
+        ssl = {
+          enable = true;
+          certFile = "${tls-cert}/cert.pem";
+          keyFile = "${tls-cert}/key.pem";
+        };
+        dynamips.enable = true;
+        ubridge.enable = true;
+        vpcs.enable = true;
+      };
+
+      security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
+    };
+
+  testScript = let
+    createProject = pkgs.writeText "createProject.json" (builtins.toJSON {
+      name = "test_project";
+    });
+  in
+  ''
+    start_all()
+
+    machine.wait_for_unit("gns3-server.service")
+    machine.wait_for_open_port(3080)
+
+    with subtest("server is listening"):
+      machine.succeed("curl -sSfL -u user:password https://localhost:3080/v2/version")
+
+    with subtest("create dummy project"):
+      machine.succeed("curl -sSfL -u user:password https://localhost:3080/v2/projects -d @${createProject}")
+
+    with subtest("logging works"):
+      log_path = "/var/log/gns3/server.log"
+      machine.wait_for_file(log_path)
+  '';
+})
diff --git a/nixos/tests/google-oslogin/default.nix b/nixos/tests/google-oslogin/default.nix
index 72c87d7153bdf..cd05af6b9ed7a 100644
--- a/nixos/tests/google-oslogin/default.nix
+++ b/nixos/tests/google-oslogin/default.nix
@@ -12,7 +12,7 @@ let
 in {
   name = "google-oslogin";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ adisbladis flokli ];
+    maintainers = [ flokli ];
   };
 
   nodes = {
@@ -71,4 +71,3 @@ in {
     )
   '';
   })
-
diff --git a/nixos/tests/guix/basic.nix b/nixos/tests/guix/basic.nix
index 7f90bdeeb1e0f..9b943b8965e60 100644
--- a/nixos/tests/guix/basic.nix
+++ b/nixos/tests/guix/basic.nix
@@ -11,7 +11,10 @@ import ../make-test-python.nix ({ lib, pkgs, ... }: {
 
   nodes.machine = { config, ... }: {
     environment.etc."guix/scripts".source = ./scripts;
-    services.guix.enable = true;
+    services.guix = {
+      enable = true;
+      gc.enable = true;
+    };
   };
 
   testScript = ''
@@ -19,6 +22,7 @@ import ../make-test-python.nix ({ lib, pkgs, ... }: {
 
     machine.wait_for_unit("multi-user.target")
     machine.wait_for_unit("guix-daemon.service")
+    machine.succeed("systemctl start guix-gc.service")
 
     # Can't do much here since the environment has restricted network access.
     with subtest("Guix basic package management"):
diff --git a/nixos/tests/guix/publish.nix b/nixos/tests/guix/publish.nix
index 6dbe8f99ebd68..a15e00b0fa981 100644
--- a/nixos/tests/guix/publish.nix
+++ b/nixos/tests/guix/publish.nix
@@ -16,7 +16,7 @@ in {
       # substitute server which requires Avahi.
       services.avahi = {
         enable = true;
-        nssmdns = true;
+        nssmdns4 = true;
         publish = {
           enable = true;
           userServices = true;
diff --git a/nixos/tests/harmonia.nix b/nixos/tests/harmonia.nix
index 6cf9ad4d23358..a9beac82f8e12 100644
--- a/nixos/tests/harmonia.nix
+++ b/nixos/tests/harmonia.nix
@@ -13,6 +13,9 @@
 
       networking.firewall.allowedTCPPorts = [ 5000 ];
       system.extraDependencies = [ pkgs.emptyFile ];
+
+      # check that extra-allowed-users is effective for harmonia
+      nix.settings.allowed-users = [];
     };
 
     client01 = {
diff --git a/nixos/tests/home-assistant.nix b/nixos/tests/home-assistant.nix
index e1588088ba198..05fb2fa1e06aa 100644
--- a/nixos/tests/home-assistant.nix
+++ b/nixos/tests/home-assistant.nix
@@ -43,7 +43,7 @@ in {
 
       # test loading custom components
       customComponents = with pkgs.home-assistant-custom-components; [
-        prometheus-sensor
+        prometheus_sensor
       ];
 
       # test loading lovelace modules
@@ -182,7 +182,7 @@ in {
         hass.wait_until_succeeds("journalctl -u home-assistant.service | grep -q 'We found a custom integration prometheus_sensor which has not been tested by Home Assistant'")
 
     with subtest("Check that lovelace modules are referenced and fetchable"):
-        hass.succeed("grep -q 'mini-graph-card-bundle.js' '${configDir}/ui-lovelace.yaml'")
+        hass.succeed("grep -q 'mini-graph-card-bundle.js' '${configDir}/configuration.yaml'")
         hass.succeed("curl --fail http://localhost:8123/local/nixos-lovelace-modules/mini-graph-card-bundle.js")
 
     with subtest("Check that optional dependencies are in the PYTHONPATH"):
diff --git a/nixos/tests/incron.nix b/nixos/tests/incron.nix
index c978ff27dfad5..d016360ba0ef8 100644
--- a/nixos/tests/incron.nix
+++ b/nixos/tests/incron.nix
@@ -13,9 +13,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       '';
 
       # ensure the directory to be monitored exists before incron is started
-      system.activationScripts.incronTest = ''
-        mkdir /test
-      '';
+      systemd.tmpfiles.settings.incron-test = {
+        "/test".d = { };
+      };
     };
 
   testScript = ''
diff --git a/nixos/tests/incus/container.nix b/nixos/tests/incus/container.nix
index 49a22c08aad1c..2fa1709c7484b 100644
--- a/nixos/tests/incus/container.nix
+++ b/nixos/tests/incus/container.nix
@@ -14,7 +14,9 @@ in
 {
   name = "incus-container";
 
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   nodes.machine = { ... }: {
     virtualisation = {
@@ -54,6 +56,10 @@ in
           retry(instance_is_up)
         machine.succeed("echo true | incus exec container /run/current-system/sw/bin/bash -")
 
+    with subtest("Container mounts lxcfs overlays"):
+        machine.succeed("incus exec container mount | grep 'lxcfs on /proc/cpuinfo type fuse.lxcfs'")
+        machine.succeed("incus exec container mount | grep 'lxcfs on /proc/meminfo type fuse.lxcfs'")
+
     with subtest("Container CPU limits can be managed"):
         set_container("limits.cpu 1")
         cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
diff --git a/nixos/tests/incus/preseed.nix b/nixos/tests/incus/preseed.nix
index 47b2d0cd62284..a488d71f3c92a 100644
--- a/nixos/tests/incus/preseed.nix
+++ b/nixos/tests/incus/preseed.nix
@@ -3,7 +3,9 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
 {
   name = "incus-preseed";
 
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   nodes.machine = { lib, ... }: {
     virtualisation = {
diff --git a/nixos/tests/incus/socket-activated.nix b/nixos/tests/incus/socket-activated.nix
index 4d25b26a15f5d..fca536b7054f0 100644
--- a/nixos/tests/incus/socket-activated.nix
+++ b/nixos/tests/incus/socket-activated.nix
@@ -3,7 +3,9 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
 {
   name = "incus-socket-activated";
 
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   nodes.machine = { lib, ... }: {
     virtualisation = {
diff --git a/nixos/tests/incus/virtual-machine.nix b/nixos/tests/incus/virtual-machine.nix
index bfa116679d43b..343a25ca72970 100644
--- a/nixos/tests/incus/virtual-machine.nix
+++ b/nixos/tests/incus/virtual-machine.nix
@@ -19,7 +19,9 @@ in
 {
   name = "incus-virtual-machine";
 
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   nodes.machine = {...}: {
     virtualisation = {
@@ -51,5 +53,8 @@ in
 
     with subtest("lxd-agent is started"):
         machine.succeed("incus exec ${instance-name} systemctl is-active lxd-agent")
+
+    with subtest("lxd-agent has a valid path"):
+        machine.succeed("incus exec ${instance-name} -- bash -c 'true'")
   '';
 })
diff --git a/nixos/tests/initrd-network-openvpn/default.nix b/nixos/tests/initrd-network-openvpn/default.nix
index 769049905eb8c..69db7dd1037f7 100644
--- a/nixos/tests/initrd-network-openvpn/default.nix
+++ b/nixos/tests/initrd-network-openvpn/default.nix
@@ -59,18 +59,19 @@ import ../make-test-python.nix ({ lib, ...}:
 
             # This command does not fork to keep the VM in the state where
             # only the initramfs is loaded
-            preLVMCommands =
-            ''
-              /bin/nc -p 1234 -lke /bin/echo TESTVALUE
-            '';
+            preLVMCommands = lib.mkIf (!systemdStage1)
+              ''
+                /bin/nc -p 1234 -lke /bin/echo TESTVALUE
+              '';
 
             network = {
               enable = true;
 
               # Work around udhcpc only getting a lease on eth0
-              postCommands = ''
-                /bin/ip addr add 192.168.1.2/24 dev eth1
-              '';
+              postCommands = lib.mkIf (!systemdStage1)
+                ''
+                  /bin/ip addr add 192.168.1.2/24 dev eth1
+                '';
 
               # Example configuration for OpenVPN
               # This is the main reason for this test
diff --git a/nixos/tests/installed-tests/flatpak.nix b/nixos/tests/installed-tests/flatpak.nix
index 9524d890c4025..fa191202f52d4 100644
--- a/nixos/tests/installed-tests/flatpak.nix
+++ b/nixos/tests/installed-tests/flatpak.nix
@@ -7,6 +7,7 @@ makeInstalledTest {
   testConfig = {
     xdg.portal.enable = true;
     xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
+    xdg.portal.config.common.default = "gtk";
     services.flatpak.enable = true;
     environment.systemPackages = with pkgs; [ gnupg ostree python3 ];
     virtualisation.memorySize = 2047;
diff --git a/nixos/tests/installer-systemd-stage-1.nix b/nixos/tests/installer-systemd-stage-1.nix
index d0c01a779ef18..662017935412c 100644
--- a/nixos/tests/installer-systemd-stage-1.nix
+++ b/nixos/tests/installer-systemd-stage-1.nix
@@ -22,6 +22,7 @@
     # lvm
     separateBoot
     separateBootFat
+    separateBootZfs
     simple
     simpleLabels
     simpleProvided
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index 17b6aeca1b438..eff2ba05d2f1a 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -878,6 +878,78 @@ in {
     '';
   };
 
+  # Same as the previous, but with ZFS /boot.
+  separateBootZfs = makeInstallerTest "separateBootZfs" {
+    extraInstallerConfig = {
+      boot.supportedFilesystems = [ "zfs" ];
+    };
+
+    extraConfig = ''
+      # Using by-uuid overrides the default of by-id, and is unique
+      # to the qemu disks, as they don't produce by-id paths for
+      # some reason.
+      boot.zfs.devNodes = "/dev/disk/by-uuid/";
+      networking.hostId = "00000000";
+    '';
+
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary ext2 1M 256MB"   # /boot
+          + " mkpart primary linux-swap 256MB 1280M"
+          + " mkpart primary ext2 1280M -1s", # /
+          "udevadm settle",
+
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+
+          "mkfs.ext4 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+
+          # Use as many ZFS features as possible to verify that GRUB can handle them
+          "zpool create"
+            " -o compatibility=grub2"
+            " -O utf8only=on"
+            " -O normalization=formD"
+            " -O compression=lz4"      # Activate the lz4_compress feature
+            " -O xattr=sa"
+            " -O acltype=posixacl"
+            " bpool /dev/vda1",
+          "zfs create"
+            " -o recordsize=1M"        # Prepare activating the large_blocks feature
+            " -o mountpoint=legacy"
+            " -o relatime=on"
+            " -o quota=1G"
+            " -o filesystem_limit=100" # Activate the filesystem_limits features
+            " bpool/boot",
+
+          # Snapshotting the top-level dataset would trigger a bug in GRUB2: https://github.com/openzfs/zfs/issues/13873
+          "zfs snapshot bpool/boot@snap-1",                     # Prepare activating the livelist and bookmarks features
+          "zfs clone bpool/boot@snap-1 bpool/test",             # Activate the livelist feature
+          "zfs bookmark bpool/boot@snap-1 bpool/boot#bookmark", # Activate the bookmarks feature
+          "zpool checkpoint bpool",                             # Activate the zpool_checkpoint feature
+          "mkdir -p /mnt/boot",
+          "mount -t zfs bpool/boot /mnt/boot",
+          "touch /mnt/boot/empty",                              # Activate zilsaxattr feature
+          "dd if=/dev/urandom of=/mnt/boot/test bs=1M count=1", # Activate the large_blocks feature
+
+          # Print out all enabled and active ZFS features (and some other stuff)
+          "sync /mnt/boot",
+          "zpool get all bpool >&2",
+
+          # Abort early if GRUB2 doesn't like the disks
+          "grub-probe --target=device /mnt/boot >&2",
+      )
+    '';
+
+    # umount & export bpool before shutdown
+    # this is a fix for "cannot import 'bpool': pool was previously in use from another system."
+    postInstallCommands = ''
+      machine.succeed("umount /mnt/boot")
+      machine.succeed("zpool export bpool")
+    '';
+  };
+
   # zfs on / with swap
   zfsroot = makeInstallerTest "zfs-root" {
     extraInstallerConfig = {
@@ -897,7 +969,7 @@ in {
     createPartitions = ''
       machine.succeed(
           "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
-          + " mkpart primary 1M 100MB"  # bpool
+          + " mkpart primary 1M 100MB"  # /boot
           + " mkpart primary linux-swap 100M 1024M"
           + " mkpart primary 1024M -1s", # rpool
           "udevadm settle",
@@ -909,20 +981,12 @@ in {
           "zfs create -o mountpoint=legacy rpool/root/usr",
           "mkdir /mnt/usr",
           "mount -t zfs rpool/root/usr /mnt/usr",
-          "zpool create -o compatibility=grub2 bpool /dev/vda1",
-          "zfs create -o mountpoint=legacy bpool/boot",
+          "mkfs.vfat -n BOOT /dev/vda1",
           "mkdir /mnt/boot",
-          "mount -t zfs bpool/boot /mnt/boot",
+          "mount LABEL=BOOT /mnt/boot",
           "udevadm settle",
       )
     '';
-
-    # umount & export bpool before shutdown
-    # this is a fix for "cannot import 'bpool': pool was previously in use from another system."
-    postInstallCommands = ''
-      machine.succeed("umount /mnt/boot")
-      machine.succeed("zpool export bpool")
-    '';
   };
 
   # Create two physical LVM partitions combined into one volume group
diff --git a/nixos/tests/invidious.nix b/nixos/tests/invidious.nix
index 701e8e5e7a3fc..e31cd87f6a004 100644
--- a/nixos/tests/invidious.nix
+++ b/nixos/tests/invidious.nix
@@ -5,48 +5,72 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     maintainers = [ sbruder ];
   };
 
-  nodes.machine = { config, lib, pkgs, ... }: {
-    services.invidious = {
-      enable = true;
+  nodes = {
+    postgres-tcp = { config, pkgs, ... }: {
+      services.postgresql = {
+        enable = true;
+        initialScript = pkgs.writeText "init-postgres-with-password" ''
+          CREATE USER invidious WITH PASSWORD 'correct horse battery staple';
+          CREATE DATABASE invidious WITH OWNER invidious;
+        '';
+        enableTCPIP = true;
+        authentication = ''
+          host invidious invidious samenet scram-sha-256
+        '';
+      };
+      networking.firewall.allowedTCPPorts = [ config.services.postgresql.port ];
     };
+    machine = { config, lib, pkgs, ... }: {
+      services.invidious = {
+        enable = true;
+      };
 
-    specialisation = {
-      nginx.configuration = {
-        services.invidious = {
-          nginx.enable = true;
-          domain = "invidious.example.com";
-        };
-        services.nginx.virtualHosts."invidious.example.com" = {
-          forceSSL = false;
-          enableACME = false;
+      specialisation = {
+        nginx.configuration = {
+          services.invidious = {
+            nginx.enable = true;
+            domain = "invidious.example.com";
+          };
+          services.nginx.virtualHosts."invidious.example.com" = {
+            forceSSL = false;
+            enableACME = false;
+          };
+          networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
         };
-        networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
-      };
-      postgres-tcp.configuration = {
-        services.invidious = {
-          database = {
-            createLocally = false;
-            host = "127.0.0.1";
-            passwordFile = toString (pkgs.writeText "database-password" "correct horse battery staple");
+        nginx-scale.configuration = {
+          services.invidious = {
+            nginx.enable = true;
+            domain = "invidious.example.com";
+            serviceScale = 3;
+          };
+          services.nginx.virtualHosts."invidious.example.com" = {
+            forceSSL = false;
+            enableACME = false;
           };
+          networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
         };
-        # Normally not needed because when connecting to postgres over TCP/IP
-        # the database is most likely on another host.
-        systemd.services.invidious = {
-          after = [ "postgresql.service" ];
-          requires = [ "postgresql.service" ];
+        nginx-scale-ytproxy.configuration = {
+          services.invidious = {
+            nginx.enable = true;
+            http3-ytproxy.enable = true;
+            domain = "invidious.example.com";
+            serviceScale = 3;
+          };
+          services.nginx.virtualHosts."invidious.example.com" = {
+            forceSSL = false;
+            enableACME = false;
+          };
+          networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
         };
-        services.postgresql =
-          let
-            inherit (config.services.invidious.settings.db) dbname user;
-          in
-          {
-            enable = true;
-            initialScript = pkgs.writeText "init-postgres-with-password" ''
-              CREATE USER kemal WITH PASSWORD 'correct horse battery staple';
-              CREATE DATABASE invidious OWNER kemal;
-            '';
+        postgres-tcp.configuration = {
+          services.invidious = {
+            database = {
+              createLocally = false;
+              host = "postgres-tcp";
+              passwordFile = toString (pkgs.writeText "database-password" "correct horse battery staple");
+            };
           };
+        };
       };
     };
   };
@@ -63,6 +87,9 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     url = "http://localhost:${toString nodes.machine.config.services.invidious.port}"
     port = ${toString nodes.machine.config.services.invidious.port}
 
+    # start postgres vm now
+    postgres_tcp.start()
+
     machine.wait_for_open_port(port)
     curl_assert_status_code(f"{url}/search", 200)
 
@@ -70,9 +97,26 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     machine.wait_for_open_port(80)
     curl_assert_status_code("http://invidious.example.com/search", 200)
 
-    # Remove the state so the `initialScript` gets run
-    machine.succeed("systemctl stop postgresql")
-    machine.succeed("rm -r /var/lib/postgresql")
+    activate_specialisation("nginx-scale")
+    machine.wait_for_open_port(80)
+    # this depends on nginx round-robin behaviour for the upstream servers
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+    machine.succeed("journalctl -eu invidious.service | grep -o '200 GET /search'")
+    machine.succeed("journalctl -eu invidious-1.service | grep -o '200 GET /search'")
+    machine.succeed("journalctl -eu invidious-2.service | grep -o '200 GET /search'")
+
+    activate_specialisation("nginx-scale-ytproxy")
+    machine.wait_for_unit("http3-ytproxy.service")
+    machine.wait_for_open_port(80)
+    machine.wait_until_succeeds("ls /run/http3-ytproxy/socket/http-proxy.sock")
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+    # this should error out as no internet connectivity is available in the test
+    curl_assert_status_code("http://invidious.example.com/vi/dQw4w9WgXcQ/mqdefault.jpg", 502)
+    machine.succeed("journalctl -eu http3-ytproxy.service | grep -o 'dQw4w9WgXcQ'")
+
+    postgres_tcp.wait_for_unit("postgresql.service")
     activate_specialisation("postgres-tcp")
     machine.wait_for_open_port(port)
     curl_assert_status_code(f"{url}/search", 200)
diff --git a/nixos/tests/iscsi-root.nix b/nixos/tests/iscsi-root.nix
index eb0719edc3796..0d7c48464eecc 100644
--- a/nixos/tests/iscsi-root.nix
+++ b/nixos/tests/iscsi-root.nix
@@ -7,8 +7,8 @@ import ./make-test-python.nix (
       {
         name = "iscsi";
         meta = {
-          maintainers = pkgs.lib.teams.deshaw.members
-          ++ (with pkgs.lib.maintainers; [ ajs124 ]);
+          maintainers = lib.teams.deshaw.members
+            ++ lib.teams.helsinki-systems.members;
         };
 
         nodes = {
diff --git a/nixos/tests/kernel-generic.nix b/nixos/tests/kernel-generic.nix
index 352deb521a478..72d31246b75d3 100644
--- a/nixos/tests/kernel-generic.nix
+++ b/nixos/tests/kernel-generic.nix
@@ -31,6 +31,7 @@ let
       linux_5_15_hardened
       linux_6_1_hardened
       linux_6_5_hardened
+      linux_6_6_hardened
       linux_rt_5_4
       linux_rt_5_10
       linux_rt_5_15
diff --git a/nixos/tests/kubo/default.nix b/nixos/tests/kubo/default.nix
index 629922fc366db..d8c0c69dc1fbd 100644
--- a/nixos/tests/kubo/default.nix
+++ b/nixos/tests/kubo/default.nix
@@ -1,5 +1,7 @@
 { recurseIntoAttrs, runTest }:
 recurseIntoAttrs {
   kubo = runTest ./kubo.nix;
-  kubo-fuse = runTest ./kubo-fuse.nix;
+  # The FUSE functionality is completely broken since Kubo v0.24.0
+  # See https://github.com/ipfs/kubo/issues/10242
+  # kubo-fuse = runTest ./kubo-fuse.nix;
 }
diff --git a/nixos/tests/kubo/kubo.nix b/nixos/tests/kubo/kubo.nix
index 7965ad2773854..b8222c652b33c 100644
--- a/nixos/tests/kubo/kubo.nix
+++ b/nixos/tests/kubo/kubo.nix
@@ -46,6 +46,13 @@
             f"ipfs --api /unix/run/ipfs.sock cat /ipfs/{ipfs_hash.strip()} | grep fnord2"
         )
 
+    machine.stop_job("ipfs")
+
+    with subtest("Socket activation for the Gateway"):
+        machine.succeed(
+            f"curl 'http://127.0.0.1:8080/ipfs/{ipfs_hash.strip()}' | grep fnord2"
+        )
+
     with subtest("Setting dataDir works properly with the hardened systemd unit"):
         machine.succeed("test -e /mnt/ipfs/config")
         machine.succeed("test ! -e /var/lib/ipfs/")
diff --git a/nixos/tests/livebook-service.nix b/nixos/tests/livebook-service.nix
index 9397e3cb75ffa..56b4eb932f343 100644
--- a/nixos/tests/livebook-service.nix
+++ b/nixos/tests/livebook-service.nix
@@ -11,7 +11,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
         enableUserService = true;
         port = 20123;
         environmentFile = pkgs.writeText "livebook.env" ''
-          LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+          LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
         '';
         options = {
           cookie = "chocolate chip";
@@ -22,7 +22,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
 
   testScript = { nodes, ... }:
     let
-      user = nodes.machine.config.users.users.alice;
+      user = nodes.machine.users.users.alice;
       sudo = lib.concatStringsSep " " [
         "XDG_RUNTIME_DIR=/run/user/${toString user.uid}"
         "sudo"
diff --git a/nixos/tests/lvm2/systemd-stage-1.nix b/nixos/tests/lvm2/systemd-stage-1.nix
index b581f2b23507e..1c95aadfcb3f1 100644
--- a/nixos/tests/lvm2/systemd-stage-1.nix
+++ b/nixos/tests/lvm2/systemd-stage-1.nix
@@ -54,9 +54,9 @@
     '';
   }.${flavour};
 
-in import ../make-test-python.nix ({ pkgs, ... }: {
+in import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lvm2-${flavour}-systemd-stage-1";
-  meta.maintainers = with pkgs.lib.maintainers; [ das_j ];
+  meta.maintainers = lib.teams.helsinki-systems.members;
 
   nodes.machine = { pkgs, lib, ... }: {
     imports = [ extraConfig ];
diff --git a/nixos/tests/lvm2/thinpool.nix b/nixos/tests/lvm2/thinpool.nix
index 14781a8a60459..f49c8980613ce 100644
--- a/nixos/tests/lvm2/thinpool.nix
+++ b/nixos/tests/lvm2/thinpool.nix
@@ -1,7 +1,7 @@
 { kernelPackages ? null }:
 import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lvm2-thinpool";
-  meta.maintainers = with pkgs.lib.maintainers; [ ajs124 ];
+  meta.maintainers = lib.teams.helsinki-systems.members;
 
   nodes.machine = { pkgs, lib, ... }: {
     virtualisation.emptyDiskImages = [ 4096 ];
diff --git a/nixos/tests/lvm2/vdo.nix b/nixos/tests/lvm2/vdo.nix
index 5b014c2f72223..75c1fc094e97f 100644
--- a/nixos/tests/lvm2/vdo.nix
+++ b/nixos/tests/lvm2/vdo.nix
@@ -1,7 +1,7 @@
 { kernelPackages ? null }:
-import ../make-test-python.nix ({ pkgs, ... }: {
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lvm2-vdo";
-  meta.maintainers = with pkgs.lib.maintainers; [ ajs124 ];
+  meta.maintainers = lib.teams.helsinki-systems.members;
 
   nodes.machine = { pkgs, lib, ... }: {
     # Minimum required size for VDO volume: 5063921664 bytes
diff --git a/nixos/tests/lxd/container.nix b/nixos/tests/lxd/container.nix
index 0ebe73d872f2b..ef9c3f4bbee7e 100644
--- a/nixos/tests/lxd/container.nix
+++ b/nixos/tests/lxd/container.nix
@@ -18,8 +18,8 @@ let
 in {
   name = "lxd-container";
 
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ patryk27 adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = { lib, ... }: {
diff --git a/nixos/tests/lxd/nftables.nix b/nixos/tests/lxd/nftables.nix
index d98bd4952906b..e6ce4089d719d 100644
--- a/nixos/tests/lxd/nftables.nix
+++ b/nixos/tests/lxd/nftables.nix
@@ -5,11 +5,11 @@
 # iptables to nftables requires a full reboot, which is a bit hard inside NixOS
 # tests.
 
-import ../make-test-python.nix ({ pkgs, ...} : {
+import ../make-test-python.nix ({ pkgs, lib, ...} : {
   name = "lxd-nftables";
 
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ patryk27 ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = { lib, ... }: {
diff --git a/nixos/tests/lxd/preseed.nix b/nixos/tests/lxd/preseed.nix
index 7d89b9f56daa4..fb80dcf3893e4 100644
--- a/nixos/tests/lxd/preseed.nix
+++ b/nixos/tests/lxd/preseed.nix
@@ -4,7 +4,7 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
   name = "lxd-preseed";
 
   meta = {
-    maintainers = with lib.maintainers; [ adamcstephens ];
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = { lib, ... }: {
diff --git a/nixos/tests/lxd/ui.nix b/nixos/tests/lxd/ui.nix
index ff651725ba705..c442f44ab81cd 100644
--- a/nixos/tests/lxd/ui.nix
+++ b/nixos/tests/lxd/ui.nix
@@ -1,8 +1,8 @@
 import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lxd-ui";
 
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ jnsgruk ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = { lib, ... }: {
diff --git a/nixos/tests/lxd/virtual-machine.nix b/nixos/tests/lxd/virtual-machine.nix
index 93705e9350c5a..2a9dd8fcdbf61 100644
--- a/nixos/tests/lxd/virtual-machine.nix
+++ b/nixos/tests/lxd/virtual-machine.nix
@@ -18,8 +18,8 @@ let
 in {
   name = "lxd-virtual-machine";
 
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [adamcstephens];
+  meta = {
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = {lib, ...}: {
diff --git a/nixos/tests/mate.nix b/nixos/tests/mate.nix
index 78ba59c5fc20d..48582e18d520c 100644
--- a/nixos/tests/mate.nix
+++ b/nixos/tests/mate.nix
@@ -27,9 +27,12 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     hardware.pulseaudio.enable = true;
   };
 
+  enableOCR = true;
+
   testScript = { nodes, ... }:
     let
       user = nodes.machine.users.users.alice;
+      env = "DISPLAY=:0.0 DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus";
     in
     ''
       with subtest("Wait for login"):
@@ -48,11 +51,31 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           machine.wait_for_window("Bottom Panel")
           machine.wait_until_succeeds("pgrep caja")
           machine.wait_for_window("Caja")
+          machine.wait_for_text('(Applications|Places|System)')
+          machine.wait_for_text('(Computer|Home|Trash)')
+
+      with subtest("Lock the screen"):
+          machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is inactive'")
+          machine.succeed("su - ${user.name} -c '${env} mate-screensaver-command -l >&2 &'")
+          machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is active'")
+          machine.sleep(2)
+          machine.send_chars("${user.password}", delay=0.2)
+          machine.wait_for_text("${user.description}")
+          machine.screenshot("screensaver")
+          machine.send_chars("\n")
+          machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is inactive'")
+
+      with subtest("Open MATE control center"):
+          machine.succeed("su - ${user.name} -c '${env} mate-control-center >&2 &'")
+          machine.wait_for_window("Control Center")
+          machine.wait_for_text('(Groups|Administration|Hardware)')
 
       with subtest("Open MATE terminal"):
-          machine.succeed("su - ${user.name} -c 'DISPLAY=:0.0 mate-terminal >&2 &'")
+          machine.succeed("su - ${user.name} -c '${env} mate-terminal >&2 &'")
           machine.wait_for_window("Terminal")
-          machine.sleep(20)
+
+      with subtest("Check if MATE has ever coredumped"):
+          machine.fail("coredumpctl --json=short | grep -E 'mate|marco|caja'")
           machine.screenshot("screen")
     '';
 })
diff --git a/nixos/tests/mobilizon.nix b/nixos/tests/mobilizon.nix
index 398c8530dc565..2b070ca9d9609 100644
--- a/nixos/tests/mobilizon.nix
+++ b/nixos/tests/mobilizon.nix
@@ -10,7 +10,7 @@ import ./make-test-python.nix ({ lib, ... }:
     meta.maintainers = with lib.maintainers; [ minijackson erictapen ];
 
     nodes.server =
-      { pkgs, ... }:
+      { ... }:
       {
         services.mobilizon = {
           enable = true;
@@ -25,8 +25,6 @@ import ./make-test-python.nix ({ lib, ... }:
           };
         };
 
-        services.postgresql.package = pkgs.postgresql_14;
-
         security.pki.certificateFiles = [ certs.ca.cert ];
 
         services.nginx.virtualHosts."${mobilizonDomain}" = {
diff --git a/nixos/tests/mongodb.nix b/nixos/tests/mongodb.nix
index 1afc891817aff..68be6926865ec 100644
--- a/nixos/tests/mongodb.nix
+++ b/nixos/tests/mongodb.nix
@@ -27,7 +27,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
   in {
     name = "mongodb";
     meta = with pkgs.lib.maintainers; {
-      maintainers = [ bluescreen303 offline rvl phile314 ];
+      maintainers = [ bluescreen303 offline phile314 ];
     };
 
     nodes = {
diff --git a/nixos/tests/munin.nix b/nixos/tests/munin.nix
index 4ec17e0339df0..e371b2dffa6b8 100644
--- a/nixos/tests/munin.nix
+++ b/nixos/tests/munin.nix
@@ -37,8 +37,10 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     with subtest("ensure munin-node starts and listens on 4949"):
         one.wait_for_unit("munin-node.service")
         one.wait_for_open_port(4949)
+
     with subtest("ensure munin-cron output is correct"):
         one.wait_for_file("/var/lib/munin/one/one-uptime-uptime-g.rrd")
         one.wait_for_file("/var/www/munin/one/index.html")
+        one.wait_for_file("/var/www/munin/one/one/diskstat_iops_vda-day.png", timeout=60)
   '';
 })
diff --git a/nixos/tests/musescore.nix b/nixos/tests/musescore.nix
index 6aeb0558a49db..0720631ed284b 100644
--- a/nixos/tests/musescore.nix
+++ b/nixos/tests/musescore.nix
@@ -63,14 +63,11 @@ in
 
     machine.send_key("tab")
     machine.send_key("tab")
-    machine.send_key("tab")
-    machine.send_key("tab")
-    machine.send_key("right")
-    machine.send_key("right")
     machine.send_key("ret")
 
-    machine.sleep(1)
+    machine.sleep(2)
 
+    machine.send_key("tab")
     # Type the beginning of https://de.wikipedia.org/wiki/Alle_meine_Entchen
     machine.send_chars("cdef6gg5aaaa7g")
     machine.sleep(1)
diff --git a/nixos/tests/mympd.nix b/nixos/tests/mympd.nix
new file mode 100644
index 0000000000000..ac6a896966e6b
--- /dev/null
+++ b/nixos/tests/mympd.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({pkgs, lib, ... }: {
+  name = "mympd";
+
+  nodes.mympd = {
+    services.mympd = {
+      enable = true;
+      settings = {
+        http_port = 8081;
+      };
+    };
+
+    services.mpd.enable = true;
+  };
+
+  testScript = ''
+    start_all();
+    machine.wait_for_unit("mympd.service");
+
+    # Ensure that mympd can connect to mpd
+    machine.wait_until_succeeds(
+      "journalctl -eu mympd -o cat | grep 'Connected to MPD'"
+    )
+
+    # Ensure that the web server is working
+    machine.succeed("curl http://localhost:8081 --compressed | grep -o myMPD")
+  '';
+})
diff --git a/nixos/tests/mysql/mariadb-galera.nix b/nixos/tests/mysql/mariadb-galera.nix
index c9962f49c02fd..7455abbce5fb0 100644
--- a/nixos/tests/mysql/mariadb-galera.nix
+++ b/nixos/tests/mysql/mariadb-galera.nix
@@ -17,8 +17,8 @@ let
     galeraPackage ? pkgs.mariadb-galera
   }: makeTest {
     name = "${name}-galera-mariabackup";
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ izorkin ajs124 das_j ];
+    meta = {
+      maintainers = with lib.maintainers; [ izorkin ] ++ lib.teams.helsinki-systems.members;
     };
 
     # The test creates a Galera cluster with 3 nodes and is checking if mariabackup-based SST works. The cluster is tested by creating a DB and an empty table on one node,
diff --git a/nixos/tests/mysql/mysql-backup.nix b/nixos/tests/mysql/mysql-backup.nix
index 968f56dd3c9bd..451f5c04ce467 100644
--- a/nixos/tests/mysql/mysql-backup.nix
+++ b/nixos/tests/mysql/mysql-backup.nix
@@ -15,9 +15,6 @@ let
     name ? mkTestName package
   }: makeTest {
     name = "${name}-backup";
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ rvl ];
-    };
 
     nodes = {
       master = { pkgs, ... }: {
diff --git a/nixos/tests/mysql/mysql-replication.nix b/nixos/tests/mysql/mysql-replication.nix
index 8f1695eb97e27..83da1e7b6cb88 100644
--- a/nixos/tests/mysql/mysql-replication.nix
+++ b/nixos/tests/mysql/mysql-replication.nix
@@ -18,8 +18,8 @@ let
     name ? mkTestName package,
   }: makeTest {
     name = "${name}-replication";
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ ajs124 das_j ];
+    meta = {
+      maintainers = lib.teams.helsinki-systems.members;
     };
 
     nodes = {
diff --git a/nixos/tests/mysql/mysql.nix b/nixos/tests/mysql/mysql.nix
index 3e059cad09e97..0a61f9d38fe2e 100644
--- a/nixos/tests/mysql/mysql.nix
+++ b/nixos/tests/mysql/mysql.nix
@@ -18,8 +18,8 @@ let
     hasRocksDB ? pkgs.stdenv.hostPlatform.is64bit
   }: makeTest {
     inherit name;
-    meta = with lib.maintainers; {
-      maintainers = [ ajs124 das_j ];
+    meta = {
+      maintainers = lib.teams.helsinki-systems.members;
     };
 
     nodes = {
diff --git a/nixos/tests/nextcloud/basic.nix b/nixos/tests/nextcloud/basic.nix
index ab1d8353dba0b..428fe0aa10db9 100644
--- a/nixos/tests/nextcloud/basic.nix
+++ b/nixos/tests/nextcloud/basic.nix
@@ -13,10 +13,12 @@ in {
     # The only thing the client needs to do is download a file.
     client = { ... }: {
       services.davfs2.enable = true;
-      system.activationScripts.davfs2-secrets = ''
-        echo "http://nextcloud/remote.php/dav/files/${adminuser} ${adminuser} ${adminpass}" > /tmp/davfs2-secrets
-        chmod 600 /tmp/davfs2-secrets
-      '';
+      systemd.tmpfiles.settings.nextcloud = {
+        "/tmp/davfs2-secrets"."f+" = {
+          mode = "0600";
+          argument = "http://nextcloud/remote.php/dav/files/${adminuser} ${adminuser} ${adminpass}";
+        };
+      };
       virtualisation.fileSystems = {
         "/mnt/dav" = {
           device = "http://nextcloud/remote.php/dav/files/${adminuser}";
diff --git a/nixos/tests/nextcloud/default.nix b/nixos/tests/nextcloud/default.nix
index 19d04b28b4f99..84ac371537271 100644
--- a/nixos/tests/nextcloud/default.nix
+++ b/nixos/tests/nextcloud/default.nix
@@ -22,4 +22,4 @@ foldl
     };
   })
 { }
-  [ 26 27 ]
+  [ 26 27 28 ]
diff --git a/nixos/tests/nextcloud/with-postgresql-and-redis.nix b/nixos/tests/nextcloud/with-postgresql-and-redis.nix
index 586bf50fd939c..d95af8a89d07a 100644
--- a/nixos/tests/nextcloud/with-postgresql-and-redis.nix
+++ b/nixos/tests/nextcloud/with-postgresql-and-redis.nix
@@ -32,7 +32,6 @@ in {
           adminpassFile = toString (pkgs.writeText "admin-pass-file" ''
             ${adminpass}
           '');
-          trustedProxies = [ "::1" ];
         };
         notify_push = {
           enable = true;
@@ -42,6 +41,7 @@ in {
         extraApps = {
           inherit (pkgs."nextcloud${lib.versions.major config.services.nextcloud.package.version}Packages".apps) notify_push;
         };
+        extraOptions.trusted_proxies = [ "::1" ];
       };
 
       services.redis.servers."nextcloud".enable = true;
diff --git a/nixos/tests/nginx-http3.nix b/nixos/tests/nginx-http3.nix
index fc9f31037f989..22f7f61f10ce6 100644
--- a/nixos/tests/nginx-http3.nix
+++ b/nixos/tests/nginx-http3.nix
@@ -1,97 +1,113 @@
-import ./make-test-python.nix ({lib, pkgs, ...}:
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
 let
   hosts = ''
     192.168.2.101 acme.test
   '';
 
 in
-{
-  name = "nginx-http3";
-  meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
-
-  nodes = {
-    server = { pkgs, ... }: {
-      networking = {
-        interfaces.eth1 = {
-          ipv4.addresses = [
-            { address = "192.168.2.101"; prefixLength = 24; }
-          ];
-        };
-        extraHosts = hosts;
-        firewall.allowedTCPPorts = [ 443 ];
-        firewall.allowedUDPPorts = [ 443 ];
-      };
-
-      security.pki.certificates = [
-        (builtins.readFile ./common/acme/server/ca.cert.pem)
-      ];
-
-      services.nginx = {
-        enable = true;
-        package = pkgs.nginxQuic;
-
-        virtualHosts."acme.test" = {
-          onlySSL = true;
-          sslCertificate = ./common/acme/server/acme.test.cert.pem;
-          sslCertificateKey = ./common/acme/server/acme.test.key.pem;
-          http2 = true;
-          http3 = true;
-          http3_hq = false;
-          quic = true;
-          reuseport = true;
-          root = lib.mkForce (pkgs.runCommandLocal "testdir" {} ''
-            mkdir "$out"
-            cat > "$out/index.html" <<EOF
-            <html><body>Hello World!</body></html>
-            EOF
-            cat > "$out/example.txt" <<EOF
-            Check http3 protocol.
-            EOF
-          '');
-        };
-      };
-    };
-
-    client = { pkgs, ... }: {
-      environment.systemPackages = [ pkgs.curlHTTP3 ];
-      networking = {
-        interfaces.eth1 = {
-          ipv4.addresses = [
-            { address = "192.168.2.201"; prefixLength = 24; }
-          ];
-        };
-        extraHosts = hosts;
-      };
 
-      security.pki.certificates = [
-        (builtins.readFile ./common/acme/server/ca.cert.pem)
-      ];
-    };
-  };
+builtins.listToAttrs (
+  builtins.map
+    (nginxPackage:
+      {
+        name = pkgs.lib.getName nginxPackage;
+        value = makeTest {
+          name = "nginx-http3-${pkgs.lib.getName nginxPackage}";
+          meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
 
-  testScript = ''
-    start_all()
+          nodes = {
+            server = { lib, pkgs, ... }: {
+              networking = {
+                interfaces.eth1 = {
+                  ipv4.addresses = [
+                    { address = "192.168.2.101"; prefixLength = 24; }
+                  ];
+                };
+                extraHosts = hosts;
+                firewall.allowedTCPPorts = [ 443 ];
+                firewall.allowedUDPPorts = [ 443 ];
+              };
 
-    server.wait_for_unit("nginx")
-    server.wait_for_open_port(443)
+              security.pki.certificates = [
+                (builtins.readFile ./common/acme/server/ca.cert.pem)
+              ];
 
-    # Check http connections
-    client.succeed("curl --verbose --http3-only https://acme.test | grep 'Hello World!'")
+              services.nginx = {
+                enable = true;
+                package = nginxPackage;
 
-    # Check downloadings
-    client.succeed("curl --verbose --http3-only https://acme.test/example.txt --output /tmp/example.txt")
-    client.succeed("cat /tmp/example.txt | grep 'Check http3 protocol.'")
+                virtualHosts."acme.test" = {
+                  onlySSL = true;
+                  sslCertificate = ./common/acme/server/acme.test.cert.pem;
+                  sslCertificateKey = ./common/acme/server/acme.test.key.pem;
+                  http2 = true;
+                  http3 = true;
+                  http3_hq = false;
+                  quic = true;
+                  reuseport = true;
+                  root = lib.mkForce (pkgs.runCommandLocal "testdir" {} ''
+                    mkdir "$out"
+                    cat > "$out/index.html" <<EOF
+                    <html><body>Hello World!</body></html>
+                    EOF
+                    cat > "$out/example.txt" <<EOF
+                    Check http3 protocol.
+                    EOF
+                  '');
+                };
+              };
+            };
 
-    # Check header reading
-    client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'content-type'")
-    client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'HTTP/3 200'")
-    client.succeed("curl --verbose --http3-only --head https://acme.test/error | grep 'HTTP/3 404'")
+            client = { pkgs, ... }: {
+              environment.systemPackages = [ pkgs.curlHTTP3 ];
+              networking = {
+                interfaces.eth1 = {
+                  ipv4.addresses = [
+                    { address = "192.168.2.201"; prefixLength = 24; }
+                  ];
+                };
+                extraHosts = hosts;
+              };
 
-    # Check change User-Agent
-    client.succeed("curl --verbose --http3-only --user-agent 'Curl test 3.0' https://acme.test")
-    server.succeed("cat /var/log/nginx/access.log | grep 'Curl test 3.0'")
+              security.pki.certificates = [
+                (builtins.readFile ./common/acme/server/ca.cert.pem)
+              ];
+            };
+          };
 
-    server.shutdown()
-    client.shutdown()
-  '';
-})
+          testScript = ''
+            start_all()
+
+            server.wait_for_unit("nginx")
+            server.wait_for_open_port(443)
+
+            # Check http connections
+            client.succeed("curl --verbose --http3-only https://acme.test | grep 'Hello World!'")
+
+            # Check downloadings
+            client.succeed("curl --verbose --http3-only https://acme.test/example.txt --output /tmp/example.txt")
+            client.succeed("cat /tmp/example.txt | grep 'Check http3 protocol.'")
+
+            # Check header reading
+            client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'content-type'")
+            client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'HTTP/3 200'")
+            client.succeed("curl --verbose --http3-only --head https://acme.test/error | grep 'HTTP/3 404'")
+
+            # Check change User-Agent
+            client.succeed("curl --verbose --http3-only --user-agent 'Curl test 3.0' https://acme.test")
+            server.succeed("cat /var/log/nginx/access.log | grep 'Curl test 3.0'")
+
+            server.shutdown()
+            client.shutdown()
+          '';
+        };
+      }
+    )
+    [ pkgs.angieQuic pkgs.nginxQuic ]
+)
diff --git a/nixos/tests/nginx-redirectcode.nix b/nixos/tests/nginx-redirectcode.nix
new file mode 100644
index 0000000000000..f60434a21a85d
--- /dev/null
+++ b/nixos/tests/nginx-redirectcode.nix
@@ -0,0 +1,25 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "nginx-redirectcode";
+  meta.maintainers = with lib.maintainers; [ misterio77 ];
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.nginx = {
+        enable = true;
+        virtualHosts.localhost = {
+          globalRedirect = "example.com/foo";
+          # With 308 (and 307), the method and body are to be kept when following it
+          redirectCode = 308;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    # Check the status code
+    webserver.succeed("curl -si http://localhost | grep '^HTTP/[0-9.]\+ 308 Permanent Redirect'")
+  '';
+})
diff --git a/nixos/tests/nginx-variants.nix b/nixos/tests/nginx-variants.nix
index 0faa0127669dd..8c24052aacce3 100644
--- a/nixos/tests/nginx-variants.nix
+++ b/nixos/tests/nginx-variants.nix
@@ -7,17 +7,17 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
 
 builtins.listToAttrs (
   builtins.map
-    (nginxName:
+    (nginxPackage:
       {
-        name = nginxName;
+        name = pkgs.lib.getName nginxPackage;
         value = makeTest {
-          name = "nginx-variant-${nginxName}";
+          name = "nginx-variant-${pkgs.lib.getName nginxPackage}";
 
           nodes.machine = { pkgs, ... }: {
             services.nginx = {
               enable = true;
               virtualHosts.localhost.locations."/".return = "200 'foo'";
-              package = pkgs."${nginxName}";
+              package = nginxPackage;
             };
           };
 
@@ -29,5 +29,5 @@ builtins.listToAttrs (
         };
       }
     )
-    [ "nginxStable" "nginxMainline" "nginxQuic" "nginxShibboleth" "openresty" "tengine" ]
+    [ pkgs.angie pkgs.angieQuic pkgs.nginxStable pkgs.nginxMainline pkgs.nginxQuic pkgs.nginxShibboleth pkgs.openresty pkgs.tengine ]
 )
diff --git a/nixos/tests/nitter.nix b/nixos/tests/nitter.nix
index 8bc55ba8c69fc..114f1aac7c7af 100644
--- a/nixos/tests/nitter.nix
+++ b/nixos/tests/nitter.nix
@@ -1,13 +1,28 @@
 import ./make-test-python.nix ({ pkgs, ... }:
 
+let
+  # In a real deployment this should naturally not common from the nix store
+  # and be seeded via agenix or as a non-nix managed file.
+  #
+  # These credentials are from the nitter wiki and are expired. We must provide
+  # credentials in the correct format, otherwise nitter fails to start. They
+  # must not be valid, as unauthorized errors are handled gracefully.
+  guestAccountFile = pkgs.writeText "guest_accounts.jsonl" ''
+    {"oauth_token":"1719213587296620928-BsXY2RIJEw7fjxoNwbBemgjJhueK0m","oauth_token_secret":"N0WB0xhL4ng6WTN44aZO82SUJjz7ssI3hHez2CUhTiYqy"}
+  '';
+in
 {
   name = "nitter";
   meta.maintainers = with pkgs.lib.maintainers; [ erdnaxe ];
 
   nodes.machine = {
-    services.nitter.enable = true;
-    # Test CAP_NET_BIND_SERVICE
-    services.nitter.server.port = 80;
+    services.nitter = {
+      enable = true;
+      # Test CAP_NET_BIND_SERVICE
+      server.port = 80;
+      # Provide dummy guest accounts
+      guestAccounts = guestAccountFile;
+    };
   };
 
   testScript = ''
diff --git a/nixos/tests/nixos-rebuild-specialisations.nix b/nixos/tests/nixos-rebuild-specialisations.nix
index 444ff7a3b9771..9192b8a8a030b 100644
--- a/nixos/tests/nixos-rebuild-specialisations.nix
+++ b/nixos/tests/nixos-rebuild-specialisations.nix
@@ -23,7 +23,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
       virtualisation = {
         cores = 2;
-        memorySize = 2048;
+        memorySize = 4096;
       };
     };
   };
diff --git a/nixos/tests/npmrc.nix b/nixos/tests/npmrc.nix
new file mode 100644
index 0000000000000..dbf24d372feb4
--- /dev/null
+++ b/nixos/tests/npmrc.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ ... }:
+let
+  machineName = "machine";
+  settingName = "prefix";
+  settingValue = "/some/path";
+in
+{
+  name = "npmrc";
+
+  nodes."${machineName}".programs.npm = {
+    enable = true;
+    npmrc = ''
+      ${settingName} = ${settingValue}
+    '';
+  };
+
+  testScript = ''
+    ${machineName}.start()
+
+    assert ${machineName}.succeed("npm config get ${settingName}") == "${settingValue}\n"
+  '';
+})
diff --git a/nixos/tests/oci-containers.nix b/nixos/tests/oci-containers.nix
index 1afa9df36dfa4..205ce623d089c 100644
--- a/nixos/tests/oci-containers.nix
+++ b/nixos/tests/oci-containers.nix
@@ -12,7 +12,7 @@ let
     name = "oci-containers-${backend}";
 
     meta.maintainers = lib.teams.serokell.members
-                       ++ (with lib.maintainers; [ adisbladis benley mkaito ]);
+                       ++ (with lib.maintainers; [ benley mkaito ]);
 
     nodes = {
       ${backend} = { pkgs, ... }: {
diff --git a/nixos/tests/opentabletdriver.nix b/nixos/tests/opentabletdriver.nix
index b7583f6dd2648..a71a007c41100 100644
--- a/nixos/tests/opentabletdriver.nix
+++ b/nixos/tests/opentabletdriver.nix
@@ -20,9 +20,11 @@ in {
     ''
       machine.start()
       machine.wait_for_x()
+
+      machine.wait_for_unit('graphical.target')
       machine.wait_for_unit("opentabletdriver.service", "${testUser}")
 
-      machine.succeed("cat /etc/udev/rules.d/99-opentabletdriver.rules")
+      machine.succeed("cat /etc/udev/rules.d/70-opentabletdriver.rules")
       # Will fail if service is not running
       # Needs to run as the same user that started the service
       machine.succeed("su - ${testUser} -c 'otd detect'")
diff --git a/nixos/tests/pgjwt.nix b/nixos/tests/pgjwt.nix
index 4793a3e315031..8d3310b74eb3b 100644
--- a/nixos/tests/pgjwt.nix
+++ b/nixos/tests/pgjwt.nix
@@ -11,7 +11,7 @@ with pkgs; {
     {
       services.postgresql = {
         enable = true;
-        extraPlugins = [ pgjwt pgtap ];
+        extraPlugins = ps: with ps; [ pgjwt pgtap ];
       };
     };
   };
diff --git a/nixos/tests/postgis.nix b/nixos/tests/postgis.nix
index d0685abc510c9..09c738b938ba8 100644
--- a/nixos/tests/postgis.nix
+++ b/nixos/tests/postgis.nix
@@ -9,10 +9,10 @@ import ./make-test-python.nix ({ pkgs, ...} : {
       { pkgs, ... }:
 
       {
-        services.postgresql = let mypg = pkgs.postgresql; in {
+        services.postgresql = {
             enable = true;
-            package = mypg;
-            extraPlugins = with mypg.pkgs; [
+            package = pkgs.postgresql;
+            extraPlugins = ps: with ps; [
               postgis
             ];
         };
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 7840130d4a364..53e6626c0e324 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -431,8 +431,8 @@ let
     };
 
     kea = let
-      controlSocketPathV4 = "/run/kea-dhcp4/dhcp4.sock";
-      controlSocketPathV6 = "/run/kea-dhcp6/dhcp6.sock";
+      controlSocketPathV4 = "/run/kea/dhcp4.sock";
+      controlSocketPathV6 = "/run/kea/dhcp6.sock";
     in
     {
       exporterConfig = {
@@ -806,6 +806,7 @@ let
     nginx = {
       exporterConfig = {
         enable = true;
+        constLabels = [ "foo=bar" ];
       };
       metricProvider = {
         services.nginx = {
@@ -818,7 +819,7 @@ let
         wait_for_unit("nginx.service")
         wait_for_unit("prometheus-nginx-exporter.service")
         wait_for_open_port(9113)
-        succeed("curl -sSf http://localhost:9113/metrics | grep 'nginx_up 1'")
+        succeed("curl -sSf http://localhost:9113/metrics | grep 'nginx_up{foo=\"bar\"} 1'")
       '';
     };
 
@@ -1052,6 +1053,50 @@ let
       '';
     };
 
+    ping = {
+      exporterConfig = {
+        enable = true;
+
+        settings = {
+          targets = [ {
+            "localhost" = {
+              alias = "local machine";
+              env = "prod";
+              type = "domain";
+            };
+          } {
+            "127.0.0.1" = {
+              alias = "local machine";
+              type = "v4";
+            };
+          } {
+            "::1" = {
+              alias = "local machine";
+              type = "v6";
+            };
+          } {
+            "google.com" = {};
+          } ];
+          dns = {};
+          ping = {
+            interval = "2s";
+            timeout = "3s";
+            history-size = 42;
+            payload-size = 56;
+          };
+          log = {
+            level = "warn";
+          };
+        };
+      };
+
+      exporterTest = ''
+        wait_for_unit("prometheus-ping-exporter.service")
+        wait_for_open_port(9427)
+        succeed("curl -sSf http://localhost:9427/metrics | grep 'ping_up{.*} 1'")
+      '';
+    };
+
     postfix = {
       exporterConfig = {
         enable = true;
diff --git a/nixos/tests/promscale.nix b/nixos/tests/promscale.nix
index d4825b6d7f551..da18628f2482c 100644
--- a/nixos/tests/promscale.nix
+++ b/nixos/tests/promscale.nix
@@ -27,7 +27,7 @@ let
         services.postgresql = {
           enable = true;
           package = postgresql-package;
-          extraPlugins = with postgresql-package.pkgs; [
+          extraPlugins = ps: with ps; [
             timescaledb
             promscale_extension
           ];
diff --git a/nixos/tests/quicktun.nix b/nixos/tests/quicktun.nix
new file mode 100644
index 0000000000000..a5a6324571174
--- /dev/null
+++ b/nixos/tests/quicktun.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "quicktun";
+  meta.maintainers = with lib.maintainers; [ h7x4 ];
+
+  nodes = {
+    machine = { ... }: {
+      services.quicktun."test-tunnel" = {
+        protocol = "raw";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("quicktun-test-tunnel.service")
+  '';
+})
diff --git a/nixos/tests/rspamd-trainer.nix b/nixos/tests/rspamd-trainer.nix
new file mode 100644
index 0000000000000..9c157903d24b6
--- /dev/null
+++ b/nixos/tests/rspamd-trainer.nix
@@ -0,0 +1,155 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+in {
+  name = "rspamd-trainer";
+  meta = with pkgs.lib.maintainers; { maintainers = [ onny ]; };
+
+  nodes = {
+    machine = { options, config, ... }: {
+
+      security.pki.certificateFiles = [
+        certs.ca.cert
+      ];
+
+      networking.extraHosts = ''
+        127.0.0.1 ${domain}
+     '';
+
+      services.rspamd-trainer = {
+        enable = true;
+        settings = {
+          HOST = domain;
+          USERNAME = "spam@${domain}";
+          INBOXPREFIX = "INBOX/";
+        };
+        secrets = [
+          # Do not use this in production. This will make passwords
+          # world-readable in the Nix store
+          "${pkgs.writeText "secrets" ''
+            PASSWORD = test123
+          ''}"
+        ];
+      };
+
+      services.maddy = {
+        enable = true;
+        hostname = domain;
+        primaryDomain = domain;
+        ensureAccounts = [ "spam@${domain}" ];
+        ensureCredentials = {
+          # Do not use this in production. This will make passwords world-readable
+          # in the Nix store
+          "spam@${domain}".passwordFile = "${pkgs.writeText "postmaster" "test123"}";
+        };
+        tls = {
+          loader = "file";
+          certificates = [{
+            certPath = "${certs.${domain}.cert}";
+            keyPath = "${certs.${domain}.key}";
+          }];
+        };
+        config = builtins.replaceStrings [
+          "imap tcp://0.0.0.0:143"
+          "submission tcp://0.0.0.0:587"
+        ] [
+          "imap tls://0.0.0.0:993 tcp://0.0.0.0:143"
+          "submission tls://0.0.0.0:465 tcp://0.0.0.0:587"
+        ] options.services.maddy.config.default;
+      };
+
+      services.rspamd = {
+        enable = true;
+        locals = {
+          "redis.conf".text = ''
+            servers = "${config.services.redis.servers.rspamd.unixSocket}";
+          '';
+          "classifier-bayes.conf".text = ''
+            backend = "redis";
+            autolearn = true;
+          '';
+        };
+      };
+
+      services.redis.servers.rspamd = {
+        enable = true;
+        port = 0;
+        unixSocket = "/run/redis-rspamd/redis.sock";
+        user = config.services.rspamd.user;
+      };
+
+      environment.systemPackages = [
+        (pkgs.writers.writePython3Bin "send-testmail" { } ''
+          import smtplib
+          import ssl
+          from email.mime.text import MIMEText
+          context = ssl.create_default_context()
+          msg = MIMEText("Hello World")
+          msg['Subject'] = 'Test'
+          msg['From'] = "spam@${domain}"
+          msg['To'] = "spam@${domain}"
+          with smtplib.SMTP_SSL(host='${domain}', port=465, context=context) as smtp:
+              smtp.login('spam@${domain}', 'test123')
+              smtp.sendmail(
+                'spam@${domain}', 'spam@${domain}', msg.as_string()
+              )
+        '')
+        (pkgs.writers.writePython3Bin "create-mail-dirs" { } ''
+          import imaplib
+          with imaplib.IMAP4_SSL('${domain}') as imap:
+              imap.login('spam@${domain}', 'test123')
+              imap.create("\"INBOX/report_spam\"")
+              imap.create("\"INBOX/report_ham\"")
+              imap.create("\"INBOX/report_spam_reply\"")
+              imap.select("INBOX")
+              imap.copy("1", "\"INBOX/report_ham\"")
+              imap.logout()
+        '')
+        (pkgs.writers.writePython3Bin "test-imap" { } ''
+          import imaplib
+          with imaplib.IMAP4_SSL('${domain}') as imap:
+              imap.login('spam@${domain}', 'test123')
+              imap.select("INBOX/learned_ham")
+              status, refs = imap.search(None, 'ALL')
+              assert status == 'OK'
+              assert len(refs) == 1
+              status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+              assert status == 'OK'
+              assert msg[0][1].strip() == b"Hello World"
+              imap.logout()
+        '')
+      ];
+
+
+
+    };
+
+  };
+
+  testScript = { nodes }: ''
+    start_all()
+    machine.wait_for_unit("maddy.service")
+    machine.wait_for_open_port(143)
+    machine.wait_for_open_port(993)
+    machine.wait_for_open_port(587)
+    machine.wait_for_open_port(465)
+
+    # Send test mail to spam@domain
+    machine.succeed("send-testmail")
+
+    # Create mail directories required for rspamd-trainer and copy mail from
+    # INBOX into INBOX/report_ham
+    machine.succeed("create-mail-dirs")
+
+    # Start rspamd-trainer. It should read mail from INBOX/report_ham
+    machine.wait_for_unit("rspamd.service")
+    machine.wait_for_unit("redis-rspamd.service")
+    machine.wait_for_file("/run/rspamd/rspamd.sock")
+    machine.succeed("systemctl start rspamd-trainer.service")
+
+    # Check if mail got processed by rspamd-trainer successfully and check for
+    # it in INBOX/learned_ham
+    machine.succeed("test-imap")
+  '';
+})
diff --git a/nixos/tests/slimserver.nix b/nixos/tests/slimserver.nix
index c3f7b6fde4de0..95cbdcf4a2a15 100644
--- a/nixos/tests/slimserver.nix
+++ b/nixos/tests/slimserver.nix
@@ -39,8 +39,8 @@ import ./make-test-python.nix ({ pkgs, ...} : {
 
       with subtest("squeezelite player successfully connects to slimserver"):
           machine.wait_for_unit("squeezelite.service")
-          machine.wait_until_succeeds("journalctl -u squeezelite.service | grep 'slimproto:937 connected'")
-          player_mac = machine.wait_until_succeeds("journalctl -eu squeezelite.service | grep 'sendHELO:148 mac:'").strip().split(" ")[-1]
+          machine.wait_until_succeeds("journalctl -u squeezelite.service | grep -E 'slimproto:[0-9]+ connected'")
+          player_mac = machine.wait_until_succeeds("journalctl -eu squeezelite.service | grep -E 'sendHELO:[0-9]+ mac:'").strip().split(" ")[-1]
           player_id = machine.succeed(f"curl http://localhost:9000/jsonrpc.js -g -X POST -d '{json.dumps(rpc_get_player)}'")
           assert player_mac == json.loads(player_id)["result"]["_id"], "squeezelite player not found"
     '';
diff --git a/nixos/tests/sogo.nix b/nixos/tests/sogo.nix
index acdad8d0f473b..e9059a2ab7734 100644
--- a/nixos/tests/sogo.nix
+++ b/nixos/tests/sogo.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ pkgs, ... }: {
   name = "sogo";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ ajs124 das_j ];
+    maintainers = [];
   };
 
   nodes = {
diff --git a/nixos/tests/spark/default.nix b/nixos/tests/spark/default.nix
index 462f0d23a4032..eed7db35bf4f1 100644
--- a/nixos/tests/spark/default.nix
+++ b/nixos/tests/spark/default.nix
@@ -1,28 +1,48 @@
-import ../make-test-python.nix ({...}: {
-  name = "spark";
+{ pkgs, ... }:
 
-  nodes = {
-    worker = { nodes, pkgs, ... }: {
-      services.spark.worker = {
-        enable = true;
-        master = "master:7077";
+let
+  inherit (pkgs) lib;
+  tests = {
+    default = testsForPackage { sparkPackage = pkgs.spark; };
+  };
+
+  testsForPackage = args: lib.recurseIntoAttrs {
+    sparkCluster = testSparkCluster args;
+    passthru.override = args': testsForPackage (args // args');
+  };
+  testSparkCluster = { sparkPackage, ... }: pkgs.nixosTest ({
+    name = "spark";
+
+    nodes = {
+      worker = { nodes, pkgs, ... }: {
+        services.spark = {
+          package = sparkPackage;
+          worker = {
+            enable = true;
+            master = "master:7077";
+          };
+        };
+        virtualisation.memorySize = 2048;
       };
-      virtualisation.memorySize = 2048;
-    };
-    master = { config, pkgs, ... }: {
-      services.spark.master = {
-        enable = true;
-        bind = "0.0.0.0";
+      master = { config, pkgs, ... }: {
+        services.spark = {
+          package = sparkPackage;
+          master = {
+            enable = true;
+            bind = "0.0.0.0";
+          };
+        };
+        networking.firewall.allowedTCPPorts = [ 22 7077 8080 ];
       };
-      networking.firewall.allowedTCPPorts = [ 22 7077 8080 ];
     };
-  };
 
-  testScript = ''
-    master.wait_for_unit("spark-master.service")
-    worker.wait_for_unit("spark-worker.service")
-    worker.copy_from_host( "${./spark_sample.py}", "/spark_sample.py" )
-    assert "<title>Spark Master at spark://" in worker.succeed("curl -sSfkL http://master:8080/")
-    worker.succeed("spark-submit --master spark://master:7077 --executor-memory 512m --executor-cores 1 /spark_sample.py")
-  '';
-})
+    testScript = ''
+      master.wait_for_unit("spark-master.service")
+      worker.wait_for_unit("spark-worker.service")
+      worker.copy_from_host( "${./spark_sample.py}", "/spark_sample.py" )
+      assert "<title>Spark Master at spark://" in worker.succeed("curl -sSfkL http://master:8080/")
+      worker.succeed("spark-submit --version | systemd-cat")
+      worker.succeed("spark-submit --master spark://master:7077 --executor-memory 512m --executor-cores 1 /spark_sample.py")
+    '';
+  });
+in tests
diff --git a/nixos/tests/ssh-agent-auth.nix b/nixos/tests/ssh-agent-auth.nix
new file mode 100644
index 0000000000000..2274e463ce95a
--- /dev/null
+++ b/nixos/tests/ssh-agent-auth.nix
@@ -0,0 +1,51 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+  let
+    inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+  in {
+    name = "ssh-agent-auth";
+    meta.maintainers = with lib.maintainers; [ nicoo ];
+
+    nodes = let nodeConfig = n: { ... }: {
+      users.users = {
+        admin = {
+          isNormalUser = true;
+          extraGroups = [ "wheel" ];
+          openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+        };
+        foo.isNormalUser = true;
+      };
+
+      security.pam.enableSSHAgentAuth = true;
+      security.${lib.replaceStrings [ "_" ] [ "-" ] n} = {
+        enable = true;
+        wheelNeedsPassword = true;  # We are checking `pam_ssh_agent_auth(8)` works for a sudoer
+      };
+
+      # Necessary for pam_ssh_agent_auth  >_>'
+      services.openssh.enable = true;
+    };
+    in lib.genAttrs [ "sudo" "sudo_rs" ] nodeConfig;
+
+    testScript = let
+      privateKeyPath = "/home/admin/.ssh/id_ecdsa";
+      userScript = pkgs.writeShellScript "test-script" ''
+        set -e
+        ssh-add -q ${privateKeyPath}
+
+        # faketty needed to ensure `sudo` doesn't write to the controlling PTY,
+        #  which would break the test-driver's line-oriented protocol.
+        ${lib.getExe pkgs.faketty} sudo -u foo -- id -un
+      '';
+    in ''
+      for vm in (sudo, sudo_rs):
+        sudo_impl = vm.name.replace("_", "-")
+        with subtest(f"wheel user can auth with ssh-agent for {sudo_impl}"):
+            vm.copy_from_host("${snakeOilPrivateKey}", "${privateKeyPath}")
+            vm.succeed("chmod -R 0700 /home/admin")
+            vm.succeed("chown -R admin:users /home/admin")
+
+            # Run `userScript` in an environment with an SSH-agent available
+            assert vm.succeed("sudo -u admin -- ssh-agent ${userScript} 2>&1").strip() == "foo"
+    '';
+  }
+)
diff --git a/nixos/tests/stub-ld.nix b/nixos/tests/stub-ld.nix
new file mode 100644
index 0000000000000..25161301741b7
--- /dev/null
+++ b/nixos/tests/stub-ld.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "stub-ld";
+
+  nodes.machine = { lib, ... }:
+    {
+      environment.stub-ld.enable = true;
+
+      specialisation.nostub = {
+        inheritParentConfig = true;
+
+        configuration = { ... }: {
+          environment.stub-ld.enable = lib.mkForce false;
+        };
+      };
+    };
+
+  testScript = let
+    libDir = pkgs.stdenv.hostPlatform.libDir;
+    ldsoBasename = lib.last (lib.splitString "/" pkgs.stdenv.cc.bintools.dynamicLinker);
+
+    check32 = pkgs.stdenv.isx86_64;
+    pkgs32 = pkgs.pkgsi686Linux;
+
+    libDir32 = pkgs32.stdenv.hostPlatform.libDir;
+    ldsoBasename32 = lib.last (lib.splitString "/" pkgs32.stdenv.cc.bintools.dynamicLinker);
+
+    test-exec = builtins.mapAttrs (n: v: pkgs.runCommand "test-exec-${n}" { src = pkgs.fetchurl v; } "mkdir -p $out;cd $out;tar -xzf $src") {
+      x86_64-linux.url = "https://github.com/rustic-rs/rustic/releases/download/v0.6.1/rustic-v0.6.1-x86_64-unknown-linux-gnu.tar.gz";
+      x86_64-linux.hash = "sha256-3zySzx8MKFprMOi++yr2ZGASE0aRfXHQuG3SN+kWUCI=";
+      i686-linux.url = "https://github.com/rustic-rs/rustic/releases/download/v0.6.1/rustic-v0.6.1-i686-unknown-linux-gnu.tar.gz";
+      i686-linux.hash = "sha256-fWNiATFeg0B2pfB5zndlnzGn7Ztl8diVS1rFLEDnSLU=";
+      aarch64-linux.url = "https://github.com/rustic-rs/rustic/releases/download/v0.6.1/rustic-v0.6.1-aarch64-unknown-linux-gnu.tar.gz";
+      aarch64-linux.hash = "sha256-hnldbd2cctQIAhIKoEZLIWY8H3jiFBClkNy2UlyyvAs=";
+    };
+    exec-name = "rustic";
+
+    if32 = pythonStatement: if check32 then pythonStatement else "pass";
+  in
+    ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      with subtest("Check for stub (enabled, initial)"):
+          machine.succeed('test -L /${libDir}/${ldsoBasename}')
+          ${if32 "machine.succeed('test -L /${libDir32}/${ldsoBasename32}')"}
+
+      with subtest("Try FHS executable"):
+          machine.copy_from_host('${test-exec.${pkgs.system}}','test-exec')
+          machine.succeed('if test-exec/${exec-name} 2>outfile; then false; else [ $? -eq 127 ];fi')
+          machine.succeed('grep -qi nixos outfile')
+          ${if32 "machine.copy_from_host('${test-exec.${pkgs32.system}}','test-exec32')"}
+          ${if32 "machine.succeed('if test-exec32/${exec-name} 2>outfile32; then false; else [ $? -eq 127 ];fi')"}
+          ${if32 "machine.succeed('grep -qi nixos outfile32')"}
+
+      with subtest("Disable stub"):
+          machine.succeed("/run/booted-system/specialisation/nostub/bin/switch-to-configuration test")
+
+      with subtest("Check for stub (disabled)"):
+          machine.fail('test -e /${libDir}/${ldsoBasename}')
+          ${if32 "machine.fail('test -e /${libDir32}/${ldsoBasename32}')"}
+
+      with subtest("Create file in stub location (to be overwritten)"):
+          machine.succeed('mkdir -p /${libDir};touch /${libDir}/${ldsoBasename}')
+          ${if32 "machine.succeed('mkdir -p /${libDir32};touch /${libDir32}/${ldsoBasename32}')"}
+
+      with subtest("Re-enable stub"):
+          machine.succeed("/run/booted-system/bin/switch-to-configuration test")
+
+      with subtest("Check for stub (enabled, final)"):
+          machine.succeed('test -L /${libDir}/${ldsoBasename}')
+          ${if32 "machine.succeed('test -L /${libDir32}/${ldsoBasename32}')"}
+    '';
+})
diff --git a/nixos/tests/stunnel.nix b/nixos/tests/stunnel.nix
index 07fba435d4df6..f8cfa0414761d 100644
--- a/nixos/tests/stunnel.nix
+++ b/nixos/tests/stunnel.nix
@@ -19,8 +19,10 @@ let
   makeCert = { config, pkgs, ... }: {
     systemd.services.create-test-cert = {
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
+      serviceConfig.Type = "oneshot";
       script = ''
         ${pkgs.openssl}/bin/openssl req -batch -x509 -newkey rsa -nodes -out /test-cert.pem -keyout /test-key.pem -subj /CN=${config.networking.hostName}
         ( umask 077; cat /test-key.pem /test-cert.pem > /test-key-and-cert.pem )
diff --git a/nixos/tests/sway.nix b/nixos/tests/sway.nix
index 695d4a7708104..185c5b1b0aa90 100644
--- a/nixos/tests/sway.nix
+++ b/nixos/tests/sway.nix
@@ -134,7 +134,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     machine.wait_for_file("/tmp/sway-ipc.sock")
 
     # Test XWayland (foot does not support X):
-    swaymsg("exec WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY=invalid alacritty")
+    swaymsg("exec WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY= alacritty")
     wait_for_window("alice@machine")
     machine.send_chars("test-x11\n")
     machine.wait_for_file("/tmp/test-x11-exit-ok")
diff --git a/nixos/tests/systemd-boot.nix b/nixos/tests/systemd-boot.nix
index 256a18532b0a2..c0b37a230df0f 100644
--- a/nixos/tests/systemd-boot.nix
+++ b/nixos/tests/systemd-boot.nix
@@ -253,7 +253,7 @@ in
   };
 
   garbage-collect-entry = makeTest {
-    name = "systemd-boot-switch-test";
+    name = "systemd-boot-garbage-collect-entry";
     meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
 
     nodes = {
diff --git a/nixos/tests/systemd-initrd-networkd.nix b/nixos/tests/systemd-initrd-networkd.nix
index 9c4ddb6e4b363..691f4300d7a23 100644
--- a/nixos/tests/systemd-initrd-networkd.nix
+++ b/nixos/tests/systemd-initrd-networkd.nix
@@ -33,7 +33,8 @@ let
       boot.initrd.network.flushBeforeStage2 = flush;
       systemd.services.check-flush = {
         requiredBy = ["multi-user.target"];
-        before = ["network-pre.target" "multi-user.target"];
+        before = [ "network-pre.target" "multi-user.target" "shutdown.target" ];
+        conflicts = [ "shutdown.target" ];
         wants = ["network-pre.target"];
         unitConfig.DefaultDependencies = false;
         serviceConfig.Type = "oneshot";
diff --git a/nixos/tests/telegraf.nix b/nixos/tests/telegraf.nix
index af9c5c387a5dd..c3cdb1645213a 100644
--- a/nixos/tests/telegraf.nix
+++ b/nixos/tests/telegraf.nix
@@ -12,7 +12,6 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     services.telegraf.extraConfig = {
       agent.interval = "1s";
       agent.flush_interval = "1s";
-      inputs.procstat = {};
       inputs.exec = {
         commands = [
           "${pkgs.runtimeShell} -c 'echo $SECRET,tag=a i=42i'"
diff --git a/nixos/tests/timescaledb.nix b/nixos/tests/timescaledb.nix
index 00a7f9af09fb8..ba0a3cec6076f 100644
--- a/nixos/tests/timescaledb.nix
+++ b/nixos/tests/timescaledb.nix
@@ -52,7 +52,7 @@ let
         services.postgresql = {
           enable = true;
           package = postgresql-package;
-          extraPlugins = with postgresql-package.pkgs; [
+          extraPlugins = ps: with ps; [
             timescaledb
             timescaledb_toolkit
           ];
diff --git a/nixos/tests/tomcat.nix b/nixos/tests/tomcat.nix
index ff58ca8ac618b..df5cb033b78f0 100644
--- a/nixos/tests/tomcat.nix
+++ b/nixos/tests/tomcat.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
   name = "tomcat";
   meta.maintainers = [ lib.maintainers.anthonyroussel ];
 
diff --git a/nixos/tests/tsja.nix b/nixos/tests/tsja.nix
index 176783088d8d5..f34358ff3f5f3 100644
--- a/nixos/tests/tsja.nix
+++ b/nixos/tests/tsja.nix
@@ -11,7 +11,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
       {
         services.postgresql = {
           enable = true;
-          extraPlugins = with config.services.postgresql.package.pkgs; [
+          extraPlugins = ps: with ps; [
             tsja
           ];
         };
diff --git a/nixos/tests/unbound.nix b/nixos/tests/unbound.nix
index f6732390b4347..39a01259edeb5 100644
--- a/nixos/tests/unbound.nix
+++ b/nixos/tests/unbound.nix
@@ -106,8 +106,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
               {
                 name = ".";
                 forward-addr = [
-                  (lib.head nodes.authoritative.config.networking.interfaces.eth1.ipv6.addresses).address
-                  (lib.head nodes.authoritative.config.networking.interfaces.eth1.ipv4.addresses).address
+                  (lib.head nodes.authoritative.networking.interfaces.eth1.ipv6.addresses).address
+                  (lib.head nodes.authoritative.networking.interfaces.eth1.ipv4.addresses).address
                 ];
               }
             ];
@@ -168,8 +168,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
           "unbound-extra1.conf".text = ''
             forward-zone:
             name: "example.local."
-            forward-addr: ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address}
-            forward-addr: ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}
+            forward-addr: ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address}
+            forward-addr: ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}
           '';
           "unbound-extra2.conf".text = ''
             auth-zone:
@@ -187,8 +187,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       client = { lib, nodes, ... }: {
         imports = [ common ];
         networking.nameservers = [
-          (lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address
-          (lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address
+          (lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address
+          (lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address
         ];
         networking.interfaces.eth1.ipv4.addresses = [
           { address = "192.168.0.10"; prefixLength = 24; }
@@ -276,7 +276,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       resolver.wait_for_unit("multi-user.target")
 
       with subtest("client should be able to query the resolver"):
-          test(client, ["${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address}", "${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}"], doh=True)
+          test(client, ["${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address}", "${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}"], doh=True)
 
       # discard the client we do not need anymore
       client.shutdown()
@@ -298,7 +298,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
           ).strip()
 
           # Thank you black! Can't really break this line into a readable version.
-          expected = "example.local. IN forward ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address} ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}"
+          expected = "example.local. IN forward ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address} ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}"
           assert out == expected, f"Expected `{expected}` but got `{out}` instead."
           local_resolver.fail("sudo -u unauthorizeduser -- unbound-control list_forwards")
 
diff --git a/nixos/tests/varnish.nix b/nixos/tests/varnish.nix
index 9dcdeec9d8c8f..76cea1ada5477 100644
--- a/nixos/tests/varnish.nix
+++ b/nixos/tests/varnish.nix
@@ -3,12 +3,12 @@
 , pkgs ? import ../.. { inherit system; }
 , package
 }:
-import ./make-test-python.nix ({ pkgs, ... }: let
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
   testPath = pkgs.hello;
 in {
   name = "varnish";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ ajs124 ];
+  meta = {
+    maintainers = lib.teams.helsinki-systems.members;
   };
 
   nodes = {
diff --git a/nixos/tests/zammad.nix b/nixos/tests/zammad.nix
index 7a2d40e82b3ed..faae1949e37b9 100644
--- a/nixos/tests/zammad.nix
+++ b/nixos/tests/zammad.nix
@@ -4,9 +4,13 @@ import ./make-test-python.nix (
   {
     name = "zammad";
 
-    meta.maintainers = with lib.maintainers; [ garbas taeer n0emis ];
+    meta.maintainers = with lib.maintainers; [ taeer n0emis netali ];
 
     nodes.machine = { config, ... }: {
+      virtualisation = {
+        memorySize = 2048;
+      };
+
       services.zammad.enable = true;
       services.zammad.secretKeyBaseFile = pkgs.writeText "secret" ''
         52882ef142066e09ab99ce816ba72522e789505caba224a52d750ec7dc872c2c371b2fd19f16b25dfbdd435a4dd46cb3df9f82eb63fafad715056bdfe25740d6
@@ -44,9 +48,10 @@ import ./make-test-python.nix (
     testScript = ''
       start_all()
       machine.wait_for_unit("postgresql.service")
+      machine.wait_for_unit("redis-zammad.service")
       machine.wait_for_unit("zammad-web.service")
       machine.wait_for_unit("zammad-websocket.service")
-      machine.wait_for_unit("zammad-scheduler.service")
+      machine.wait_for_unit("zammad-worker.service")
       # wait for zammad to fully come up
       machine.sleep(120)
 
diff --git a/nixos/tests/zfs.nix b/nixos/tests/zfs.nix
index ad4ea254f34d7..8fedcf095af69 100644
--- a/nixos/tests/zfs.nix
+++ b/nixos/tests/zfs.nix
@@ -19,7 +19,7 @@ let
     makeTest {
       name = "zfs-" + name;
       meta = with pkgs.lib.maintainers; {
-        maintainers = [ adisbladis elvishjerricco ];
+        maintainers = [ elvishjerricco ];
       };
 
       nodes.machine = { config, pkgs, lib, ... }:
@@ -210,6 +210,7 @@ in {
     enableSystemdStage1 = true;
   };
 
+  installerBoot = (import ./installer.nix { }).separateBootZfs;
   installer = (import ./installer.nix { }).zfsroot;
 
   expand-partitions = makeTest {