about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/x-windows.chapter.md10
-rw-r--r--nixos/doc/manual/default.nix7
-rw-r--r--nixos/doc/manual/development/writing-nixos-tests.section.md2
-rw-r--r--nixos/doc/manual/installation/installing.chapter.md3
-rw-r--r--nixos/doc/manual/release-notes/rl-2405.section.md71
-rw-r--r--nixos/lib/make-iso9660-image.nix25
-rw-r--r--nixos/lib/make-iso9660-image.sh5
-rw-r--r--nixos/lib/make-options-doc/default.nix99
-rw-r--r--nixos/lib/systemd-lib.nix15
-rw-r--r--nixos/lib/test-driver/test_driver/machine.py1
-rw-r--r--nixos/lib/utils.nix4
-rw-r--r--nixos/modules/config/nix.nix11
-rw-r--r--nixos/modules/config/resolvconf.nix2
-rw-r--r--nixos/modules/config/users-groups.nix36
-rw-r--r--nixos/modules/hardware/video/nvidia.nix13
-rw-r--r--nixos/modules/hardware/video/webcam/ipu6.nix5
-rw-r--r--nixos/modules/i18n/input-method/fcitx5.nix4
-rw-r--r--nixos/modules/image/repart-image.nix85
-rw-r--r--nixos/modules/image/repart.nix42
-rw-r--r--nixos/modules/installer/cd-dvd/iso-image.nix11
-rw-r--r--nixos/modules/installer/tools/nixos-generate-config.pl2
-rw-r--r--nixos/modules/misc/mandoc.nix34
-rw-r--r--nixos/modules/module-list.nix12
-rw-r--r--nixos/modules/profiles/all-hardware.nix9
-rw-r--r--nixos/modules/profiles/macos-builder.nix5
-rw-r--r--nixos/modules/programs/_1password-gui.nix8
-rw-r--r--nixos/modules/programs/clash-verge.nix7
-rw-r--r--nixos/modules/programs/starship.nix29
-rw-r--r--nixos/modules/programs/steam.nix17
-rw-r--r--nixos/modules/security/pam.nix2
-rw-r--r--nixos/modules/services/audio/spotifyd.nix4
-rw-r--r--nixos/modules/services/audio/wyoming/faster-whisper.nix17
-rw-r--r--nixos/modules/services/backup/bacula.nix222
-rw-r--r--nixos/modules/services/continuous-integration/hydra/default.nix34
-rw-r--r--nixos/modules/services/databases/lldap.nix17
-rw-r--r--nixos/modules/services/databases/postgresql.md2
-rw-r--r--nixos/modules/services/databases/postgresql.nix2
-rw-r--r--nixos/modules/services/desktop-managers/plasma6.nix (renamed from nixos/modules/services/x11/desktop-managers/plasma6.nix)21
-rw-r--r--nixos/modules/services/desktops/flatpak.nix2
-rw-r--r--nixos/modules/services/desktops/pipewire/pipewire.nix149
-rw-r--r--nixos/modules/services/desktops/pipewire/wireplumber.nix113
-rw-r--r--nixos/modules/services/development/hoogle.nix15
-rw-r--r--nixos/modules/services/display-managers/greetd.nix2
-rw-r--r--nixos/modules/services/matrix/synapse.nix3
-rw-r--r--nixos/modules/services/misc/etebase-server.nix2
-rw-r--r--nixos/modules/services/misc/gitlab.nix11
-rw-r--r--nixos/modules/services/misc/llama-cpp.nix2
-rw-r--r--nixos/modules/services/misc/ollama.nix60
-rw-r--r--nixos/modules/services/misc/paperless.nix16
-rw-r--r--nixos/modules/services/misc/tandoor-recipes.nix6
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix7
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/artifactory.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/bind.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/bird.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/bitcoin.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/buildkite-agent.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/collectd.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/domain.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/fastly.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/flow.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/fritz.nix97
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/graphite.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/idrac.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/imap-mailstat.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/influxdb.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/ipmi.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/jitsi.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/json.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/junos-czerwonk.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/kea.nix22
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/keylight.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/knot.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/lnd.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/mail.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/minio.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/modemmanager.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/nginx.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/nginxlog.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/node.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/nut.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/pgbouncer.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/php-fpm.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/pihole.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/ping.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/postfix.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/postgres.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/process.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/pve.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/py-air-control.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/redis.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/restic.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/rtl_433.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/scaphandre.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/script.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/shelly.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/smartctl.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/smokeping.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/snmp.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/sql.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/statsd.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/tor.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/unbound.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/unifi.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/unpoller.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/varnish.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/zfs.nix2
-rw-r--r--nixos/modules/services/monitoring/scrutiny.nix85
-rw-r--r--nixos/modules/services/networking/dnscache.nix6
-rw-r--r--nixos/modules/services/networking/dnsproxy.nix106
-rw-r--r--nixos/modules/services/networking/microsocks.nix146
-rw-r--r--nixos/modules/services/networking/mihomo.nix118
-rw-r--r--nixos/modules/services/networking/murmur.nix2
-rw-r--r--nixos/modules/services/networking/mycelium.nix133
-rw-r--r--nixos/modules/services/networking/nebula.nix34
-rw-r--r--nixos/modules/services/networking/networkmanager.nix30
-rw-r--r--nixos/modules/services/networking/scion/scion-control.nix69
-rw-r--r--nixos/modules/services/networking/scion/scion-daemon.nix64
-rw-r--r--nixos/modules/services/networking/scion/scion-dispatcher.nix74
-rw-r--r--nixos/modules/services/networking/scion/scion-router.nix49
-rw-r--r--nixos/modules/services/networking/scion/scion.nix39
-rw-r--r--nixos/modules/services/networking/tinyproxy.nix1
-rw-r--r--nixos/modules/services/networking/unbound.nix9
-rw-r--r--nixos/modules/services/security/esdm.nix101
-rw-r--r--nixos/modules/services/security/oauth2_proxy_nginx.nix35
-rw-r--r--nixos/modules/services/security/yubikey-agent.nix2
-rw-r--r--nixos/modules/services/video/photonvision.nix64
-rw-r--r--nixos/modules/services/web-apps/engelsystem.nix1
-rw-r--r--nixos/modules/services/web-apps/gotosocial.nix2
-rw-r--r--nixos/modules/services/web-apps/hedgedoc.nix8
-rw-r--r--nixos/modules/services/web-apps/kavita.nix63
-rw-r--r--nixos/modules/services/web-apps/komga.nix145
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix1
-rw-r--r--nixos/modules/services/web-apps/pretix.nix581
-rw-r--r--nixos/modules/services/web-apps/slskd.nix326
-rw-r--r--nixos/modules/services/web-apps/suwayomi-server.md3
-rw-r--r--nixos/modules/services/web-apps/suwayomi-server.nix11
-rw-r--r--nixos/modules/services/web-servers/garage.nix14
-rw-r--r--nixos/modules/services/web-servers/stargazer.nix8
-rw-r--r--nixos/modules/services/x11/desktop-managers/budgie.nix9
-rw-r--r--nixos/modules/services/x11/desktop-managers/deepin.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/default.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/lxqt.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/mate.nix126
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/xfce.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix131
-rw-r--r--nixos/modules/services/x11/window-managers/nimdow.nix11
-rw-r--r--nixos/modules/services/x11/window-managers/xmonad.nix2
-rw-r--r--nixos/modules/services/x11/xserver.nix4
-rw-r--r--nixos/modules/system/boot/binfmt.nix1
-rw-r--r--nixos/modules/system/boot/kernel.nix5
-rw-r--r--nixos/modules/system/boot/luksroot.nix6
-rw-r--r--nixos/modules/system/boot/networkd.nix4
-rw-r--r--nixos/modules/system/boot/plymouth.nix6
-rw-r--r--nixos/modules/system/boot/stage-1.nix10
-rw-r--r--nixos/modules/system/boot/systemd.nix2
-rw-r--r--nixos/modules/system/boot/systemd/initrd.nix47
-rw-r--r--nixos/modules/system/boot/timesyncd.nix3
-rw-r--r--nixos/modules/system/boot/uki.nix17
-rw-r--r--nixos/modules/tasks/filesystems/envfs.nix4
-rw-r--r--nixos/modules/testing/test-instrumentation.nix10
-rw-r--r--nixos/modules/virtualisation/amazon-ec2-amis.nix68
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix4
-rw-r--r--nixos/modules/virtualisation/incus.nix162
-rw-r--r--nixos/modules/virtualisation/nixos-containers.nix6
-rw-r--r--nixos/release-combined.nix5
-rw-r--r--nixos/tests/all-tests.nix21
-rw-r--r--nixos/tests/armagetronad.nix12
-rw-r--r--nixos/tests/budgie.nix51
-rw-r--r--nixos/tests/drawterm.nix15
-rw-r--r--nixos/tests/etcd/etcd-cluster.nix (renamed from nixos/tests/etcd-cluster.nix)2
-rw-r--r--nixos/tests/etcd/etcd.nix (renamed from nixos/tests/etcd.nix)2
-rw-r--r--nixos/tests/firefoxpwa.nix36
-rw-r--r--nixos/tests/freetube.nix2
-rw-r--r--nixos/tests/goss.nix8
-rw-r--r--nixos/tests/hibernate.nix2
-rw-r--r--nixos/tests/incus/container.nix15
-rw-r--r--nixos/tests/incus/default.nix20
-rw-r--r--nixos/tests/incus/lxd-to-incus.nix2
-rw-r--r--nixos/tests/incus/openvswitch.nix65
-rw-r--r--nixos/tests/incus/storage.nix46
-rw-r--r--nixos/tests/installer-systemd-stage-1.nix1
-rw-r--r--nixos/tests/installer.nix42
-rw-r--r--nixos/tests/k3s/multi-node.nix4
-rw-r--r--nixos/tests/k3s/single-node.nix4
-rw-r--r--nixos/tests/kavita.nix46
-rw-r--r--nixos/tests/kea.nix26
-rw-r--r--nixos/tests/keycloak.nix32
-rw-r--r--nixos/tests/krb5/default.nix3
-rw-r--r--nixos/tests/ladybird.nix2
-rw-r--r--nixos/tests/make-test-python.nix2
-rw-r--r--nixos/tests/mate-wayland.nix63
-rw-r--r--nixos/tests/mate.nix9
-rw-r--r--nixos/tests/mihomo.nix44
-rw-r--r--nixos/tests/miriway.nix4
-rw-r--r--nixos/tests/monetdb.nix77
-rw-r--r--nixos/tests/mycelium/default.nix57
-rw-r--r--nixos/tests/mycelium/peer1.key1
-rw-r--r--nixos/tests/mycelium/peer2.key1
-rw-r--r--nixos/tests/nebula.nix6
-rw-r--r--nixos/tests/nimdow.nix25
-rw-r--r--nixos/tests/nix-config.nix18
-rw-r--r--nixos/tests/nix-ld.nix52
-rw-r--r--nixos/tests/nixops/default.nix18
-rw-r--r--nixos/tests/ollama.nix56
-rw-r--r--nixos/tests/opensearch.nix30
-rw-r--r--nixos/tests/pg_anonymizer.nix94
-rw-r--r--nixos/tests/pgvecto-rs.nix76
-rw-r--r--nixos/tests/photonvision.nix21
-rw-r--r--nixos/tests/privoxy.nix13
-rw-r--r--nixos/tests/prometheus-exporters.nix48
-rw-r--r--nixos/tests/redlib.nix20
-rw-r--r--nixos/tests/scion/freestanding-deployment/README.rst12
-rw-r--r--nixos/tests/scion/freestanding-deployment/default.nix172
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology1.json51
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology2.json51
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology3.json60
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology4.json40
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology5.json40
-rw-r--r--nixos/tests/systemd-machinectl.nix41
-rw-r--r--nixos/tests/systemd-user-linger.nix39
-rw-r--r--nixos/tests/tracee.nix93
-rw-r--r--nixos/tests/ustreamer.nix75
-rw-r--r--nixos/tests/vscodium.nix2
-rw-r--r--nixos/tests/web-apps/gotosocial.nix2
-rw-r--r--nixos/tests/web-apps/pretix.nix47
242 files changed, 5246 insertions, 1192 deletions
diff --git a/nixos/doc/manual/configuration/x-windows.chapter.md b/nixos/doc/manual/configuration/x-windows.chapter.md
index bf1872ae01ace..0e8e38b83dcdc 100644
--- a/nixos/doc/manual/configuration/x-windows.chapter.md
+++ b/nixos/doc/manual/configuration/x-windows.chapter.md
@@ -146,14 +146,12 @@ default because it's not free software. You can enable it as follows:
 services.xserver.videoDrivers = [ "nvidia" ];
 ```
 
-Or if you have an older card, you may have to use one of the legacy
-drivers:
+If you have an older card, you may have to use one of the legacy drivers:
 
 ```nix
-services.xserver.videoDrivers = [ "nvidiaLegacy470" ];
-services.xserver.videoDrivers = [ "nvidiaLegacy390" ];
-services.xserver.videoDrivers = [ "nvidiaLegacy340" ];
-services.xserver.videoDrivers = [ "nvidiaLegacy304" ];
+hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages.legacy_470;
+hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages.legacy_390;
+hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages.legacy_340;
 ```
 
 You may need to reboot after enabling this driver to prevent a clash
diff --git a/nixos/doc/manual/default.nix b/nixos/doc/manual/default.nix
index a368b16201f8f..5f51bb53ad7fc 100644
--- a/nixos/doc/manual/default.nix
+++ b/nixos/doc/manual/default.nix
@@ -105,7 +105,9 @@ in rec {
       mkdir -p $dst
 
       cp ${../../../doc/style.css} $dst/style.css
-      cp ${../../../doc/overrides.css} $dst/overrides.css
+      cp ${../../../doc/anchor.min.js} $dst/anchor.min.js
+      cp ${../../../doc/anchor-use.js} $dst/anchor-use.js
+
       cp -r ${pkgs.documentation-highlighter} $dst/highlightjs
 
       ${prepareManualFromMD}
@@ -115,10 +117,11 @@ in rec {
         --revision ${lib.escapeShellArg revision} \
         --generator "nixos-render-docs ${lib.version}" \
         --stylesheet style.css \
-        --stylesheet overrides.css \
         --stylesheet highlightjs/mono-blue.css \
         --script ./highlightjs/highlight.pack.js \
         --script ./highlightjs/loader.js \
+        --script ./anchor.min.js \
+        --script ./anchor-use.js \
         --toc-depth 1 \
         --chunk-toc-depth 1 \
         ./manual.md \
diff --git a/nixos/doc/manual/development/writing-nixos-tests.section.md b/nixos/doc/manual/development/writing-nixos-tests.section.md
index 84b247fd2042f..50886376c2409 100644
--- a/nixos/doc/manual/development/writing-nixos-tests.section.md
+++ b/nixos/doc/manual/development/writing-nixos-tests.section.md
@@ -261,7 +261,7 @@ added using the parameter `extraPythonPackages`. For example, you could add
 
   testScript = ''
     import numpy as np
-    assert str(np.zeros(4) == "array([0., 0., 0., 0.])")
+    assert str(np.zeros(4)) == "[0. 0. 0. 0.]"
   '';
 }
 ```
diff --git a/nixos/doc/manual/installation/installing.chapter.md b/nixos/doc/manual/installation/installing.chapter.md
index 815bcc071cd9c..c7deb07352f1c 100644
--- a/nixos/doc/manual/installation/installing.chapter.md
+++ b/nixos/doc/manual/installation/installing.chapter.md
@@ -272,6 +272,9 @@ update /etc/fstab.
     # parted /dev/sda -- mkpart ESP fat32 1MB 512MB
     # parted /dev/sda -- set 3 esp on
     ```
+    ::: {.note}
+    In case you decided to not create a swap partition, replace `3` by `2`. To be sure of the id number of ESP, run `parted --list`.
+    :::
 
 Once complete, you can follow with
 [](#sec-installation-manual-partitioning-formatting).
diff --git a/nixos/doc/manual/release-notes/rl-2405.section.md b/nixos/doc/manual/release-notes/rl-2405.section.md
index 72389518b1ea3..4ad01e2b158c1 100644
--- a/nixos/doc/manual/release-notes/rl-2405.section.md
+++ b/nixos/doc/manual/release-notes/rl-2405.section.md
@@ -32,6 +32,11 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - Julia environments can now be built with arbitrary packages from the ecosystem using the `.withPackages` function. For example: `julia.withPackages ["Plots"]`.
 
+- The PipeWire and WirePlumber modules have removed support for using
+`environment.etc."pipewire/..."` and `environment.etc."wireplumber/..."`.
+Use `services.pipewire.extraConfig` or `services.pipewire.configPackages` for PipeWire and
+`services.pipewire.wireplumber.configPackages` for WirePlumber instead."
+
 - A new option `systemd.sysusers.enable` was added. If enabled, users and
   groups are created with systemd-sysusers instead of with a custom perl script.
 
@@ -42,6 +47,12 @@ In addition to numerous new and upgraded packages, this release has the followin
 - A new option `system.etc.overlay.enable` was added. If enabled, `/etc` is
   mounted via an overlayfs instead of being created by a custom perl script.
 
+- NixOS AMIs are now uploaded regularly to a new AWS Account.
+  Instructions on how to use them can be found on <https://nixos.github.io/amis>.
+  We are working on integration the data into the NixOS homepage.
+  The list in `nixos/modules/virtualisation/amazon-ec2-amis.nix` will stop
+  being updated and will be removed in the future.
+
 - It is now possible to have a completely perlless system (i.e. a system
   without perl). Previously, the NixOS activation depended on two perl scripts
   which can now be replaced via an opt-in mechanism. To make your system
@@ -62,6 +73,8 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - [Guix](https://guix.gnu.org), a functional package manager inspired by Nix. Available as [services.guix](#opt-services.guix.enable).
 
+- [PhotonVision](https://photonvision.org/), a free, fast, and easy-to-use computer vision solution for the FIRST® Robotics Competition.
+
 - [pyLoad](https://pyload.net/), a FOSS download manager written in Python. Available as [services.pyload](#opt-services.pyload.enable)
 
 - [maubot](https://github.com/maubot/maubot), a plugin-based Matrix bot framework. Available as [services.maubot](#opt-services.maubot.enable).
@@ -72,10 +85,14 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - [pretalx](https://github.com/pretalx/pretalx), a conference planning tool. Available as [services.pretalx](#opt-services.pretalx.enable).
 
+- [dnsproxy](https://github.com/AdguardTeam/dnsproxy), a simple DNS proxy with DoH, DoT, DoQ and DNSCrypt support. Available as [services.dnsproxy](#opt-services.dnsproxy.enable).
+
 - [rspamd-trainer](https://gitlab.com/onlime/rspamd-trainer), script triggered by a helper which reads mails from a specific mail inbox and feeds them into rspamd for spam/ham training.
 
 - [ollama](https://ollama.ai), server for running large language models locally.
 
+- [Mihomo](https://github.com/MetaCubeX/mihomo), a rule-based proxy in Go. Available as [services.mihomo.enable](#opt-services.mihomo.enable).
+
 - [hebbot](https://github.com/haecker-felix/hebbot), a Matrix bot to generate "This Week in X" like blog posts. Available as [services.hebbot](#opt-services.hebbot.enable).
 
 - [Python Matter Server](https://github.com/home-assistant-libs/python-matter-server), a
@@ -97,8 +114,14 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - [Monado](https://monado.freedesktop.org/), an open source XR runtime. Available as [services.monado](#opt-services.monado.enable).
 
+- [Pretix](https://pretix.eu/about/en/), an open source ticketing software for events. Available as [services.pretix]($opt-services-pretix.enable).
+
+- [microsocks](https://github.com/rofl0r/microsocks), a tiny, portable SOCKS5 server with very moderate resource usage. Available as [services.microsocks]($opt-services-microsocks.enable).
+
 - [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable).
 
+- [fritz-exporter](https://github.com/pdreker/fritz_exporter), a Prometheus exporter for extracting metrics from [FRITZ!](https://avm.de/produkte/) devices. Available as [services.prometheus.exporters.fritz](#opt-services.prometheus.exporters.fritz.enable).
+
 - [armagetronad](https://wiki.armagetronad.org), a mid-2000s 3D lightcycle game widely played at iD Tech Camps. You can define multiple servers using `services.armagetronad.<server>.enable`.
 
 - [TuxClocker](https://github.com/Lurkki14/tuxclocker), a hardware control and monitoring program. Available as [programs.tuxclocker](#opt-programs.tuxclocker.enable).
@@ -117,13 +140,15 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
 
+- `k3s`: was updated to version [v1.29](https://github.com/k3s-io/k3s/releases/tag/v1.29.1%2Bk3s2), all previous versions (k3s_1_26, k3s_1_27, k3s_1_28) will be removed. See [changelog and upgrade notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#urgent-upgrade-notes) for more information.
+
 - `himalaya` was updated to `v1.0.0-beta.3`, which introduces breaking changes. Check out the [release note](https://github.com/soywod/himalaya/releases/tag/v1.0.0-beta.3) for details.
 
 - The `power.ups` module now generates `upsd.conf`, `upsd.users` and `upsmon.conf` automatically from a set of new configuration options. This breaks compatibility with existing `power.ups` setups where these files were created manually. Back up these files before upgrading NixOS.
 
-- `unrar` was updated to v7. See [changelog](https://www.rarlab.com/unrar7notes.htm) for more information.
+- `pdns` was updated to version [v4.9.x](https://doc.powerdns.com/authoritative/changelog/4.9.html), which introduces breaking changes. Check out the [Upgrade Notes](https://doc.powerdns.com/authoritative/upgrading.html#to-4-9-0) for details.
 
-- `k3s` was updated to [v1.29](https://github.com/k3s-io/k3s/releases/tag/v1.29.1%2Bk3s2). See [changelog and upgrade notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#urgent-upgrade-notes) for more information.
+- `unrar` was updated to v7. See [changelog](https://www.rarlab.com/unrar7notes.htm) for more information.
 
 - `k9s` was updated to v0.31. There have been various breaking changes in the config file format,
   check out the changelog of [v0.29](https://github.com/derailed/k9s/releases/tag/v0.29.0),
@@ -131,6 +156,12 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
   [v0.31](https://github.com/derailed/k9s/releases/tag/v0.31.0) for details. It is recommended
   to back up your current configuration and let k9s recreate the new base configuration.
 
+- NixOS AMIs are now uploaded regularly to a new AWS Account.
+  Instructions on how to use them can be found on <https://nixos.github.io/amis>.
+  We are working on integration the data into the NixOS homepage.
+  The list in `nixos/modules/virtualisation/amazon-ec2-amis.nix` will stop
+  being updated and will be removed in the future.
+
 - The option `services.postgresql.ensureUsers._.ensurePermissions` has been removed as it's
   not declarative and is broken with newer postgresql versions. Consider using
   [](#opt-services.postgresql.ensureUsers._.ensureDBOwnership)
@@ -138,6 +169,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - `idris2` was updated to v0.7.0. This version introduces breaking changes. Check out the [changelog](https://github.com/idris-lang/Idris2/blob/v0.7.0/CHANGELOG.md#v070) for details.
 
+- `nvtop` family of packages was reorganized into nested attrset. `nvtop` has been renamed to `nvtopPackages.full`, and all `nvtop-{amd,nvidia,intel,msm}` packages are now named as `nvtopPackages.{amd,nvidia,intel,msm}`
+
 - `neo4j` has been updated to 5, you may want to read the [release notes for Neo4j 5](https://neo4j.com/release-notes/database/neo4j-5/)
 
 - `services.neo4j.allowUpgrade` was removed and no longer has any effect. Neo4j 5 supports automatic rolling upgrades.
@@ -155,6 +188,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - Invidious has changed its default database username from `kemal` to `invidious`. Setups involving an externally provisioned database (i.e. `services.invidious.database.createLocally == false`) should adjust their configuration accordingly. The old `kemal` user will not be removed automatically even when the database is provisioned automatically.(https://github.com/NixOS/nixpkgs/pull/265857)
 
+- `writeReferencesToFile` is deprecated in favour of the new trivial build helper `writeClosure`. The latter accepts a list of paths and has an unambiguous name and cleaner implementation.
+
 - `inetutils` now has a lower priority to avoid shadowing the commonly used `util-linux`. If one wishes to restore the default priority, simply use `lib.setPrio 5 inetutils` or override with `meta.priority = 5`.
 
 - `paperless`' `services.paperless.extraConfig` setting has been removed and converted to the freeform type and option named `services.paperless.settings`.
@@ -302,13 +337,23 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
   - The `erlang_node_short_name`, `erlang_node_name`, `port` and `options` configuration parameters are gone, and have been replaced with an `environment` parameter.
     Use the appropriate [environment variables](https://hexdocs.pm/livebook/readme.html#environment-variables) inside `environment` to configure the service instead.
 
+- The `crystal` package has been updated to 1.11.x, which has some breaking changes.
+  Refer to crystal's changelog for more information. ([v1.10](https://github.com/crystal-lang/crystal/blob/master/CHANGELOG.md#1100-2023-10-09), [v1.11](https://github.com/crystal-lang/crystal/blob/master/CHANGELOG.md#1110-2024-01-08))
+
 ## Other Notable Changes {#sec-release-24.05-notable-changes}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
 
 - `addDriverRunpath` has been added to facilitate the deprecation of the old `addOpenGLRunpath` setuphook. This change is motivated by the evolution of the setuphook to include all hardware acceleration.
 
-- Cinnamon has been updated to 6.0. Please beware that the [Wayland session](https://blog.linuxmint.com/?p=4591) is still experimental in this release.
+- Cinnamon has been updated to 6.0. Please beware that the [Wayland session](https://blog.linuxmint.com/?p=4591) is still experimental in this release and could potentially [affect Xorg sessions](https://blog.linuxmint.com/?p=4639). We suggest a reboot when switching between sessions.
+
+- MATE has been updated to 1.28.
+  - To properly support panel plugins built with Wayland (in-process) support, we are introducing `services.xserver.desktopManager.mate.extraPanelApplets` option, please use that for installing panel applets.
+  - Similarly, please use `services.xserver.desktopManager.mate.extraCajaExtensions` option for installing Caja extensions.
+  - To use the Wayland session, enable `services.xserver.desktopManager.mate.enableWaylandSession`. This is opt-in for now as it is in early stage and introduces a new set of Wayfire closure. Due to [known issues with LightDM](https://github.com/canonical/lightdm/issues/63), we suggest using SDDM for display manager.
+
+- The Budgie module installs gnome-terminal by default (instead of mate-terminal).
 
 - New `boot.loader.systemd-boot.xbootldrMountPoint` allows setting up a separate [XBOOTLDR partition](https://uapi-group.org/specifications/specs/boot_loader_specification/) to store boot files. Useful on systems with a small EFI System partition that cannot be easily repartitioned.
 
@@ -335,6 +380,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - [Lilypond](https://lilypond.org/index.html) and [Denemo](https://www.denemo.org) are now compiled with Guile 3.0.
 
+- The EC2 image module now enables the [Amazon SSM Agent](https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-agent.html) by default.
+
 - The following options of the Nextcloud module were moved into [`services.nextcloud.settings`](#opt-services.nextcloud.settings) and renamed to match the name from Nextcloud's `config.php`:
   - `logLevel` -> [`loglevel`](#opt-services.nextcloud.settings.loglevel),
   - `logType` -> [`log_type`](#opt-services.nextcloud.settings.log_type),
@@ -377,6 +424,9 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - [Nginx virtual hosts](#opt-services.nginx.virtualHosts) using `forceSSL` or
   `globalRedirect` can now have redirect codes other than 301 through
+
+- `bacula` now allows to configure `TLS` for encrypted communication.
+
   `redirectCode`.
 
 - `libjxl` 0.9.0 [dropped support for the butteraugli API](https://github.com/libjxl/libjxl/pull/2576). You will no longer be able to set `enableButteraugli` on `libaom`.
@@ -396,6 +446,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - `nextcloud-setup.service` no longer changes the group of each file & directory inside `/var/lib/nextcloud/{config,data,store-apps}` if one of these directories has the wrong owner group. This was part of transitioning the group used for `/var/lib/nextcloud`, but isn't necessary anymore.
 
+- `services.kavita` now uses the freeform option `services.kavita.settings` for the application settings file.
+  The options `services.kavita.ipAdresses` and `services.kavita.port` now exist at `services.kavita.settings.IpAddresses`
+  and `services.kavita.settings.IpAddresses`.
+
 - The `krb5` module has been rewritten and moved to `security.krb5`, moving all options but `security.krb5.enable` and `security.krb5.package` into `security.krb5.settings`.
 
 - Gitea 1.21 upgrade has several breaking changes, including:
@@ -410,6 +464,12 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - The module `services.github-runner` has been removed. To configure a single GitHub Actions Runner refer to `services.github-runners.*`. Note that this will trigger a new runner registration.
 
+- The `services.slskd` has been refactored to include more configuation options in
+  the freeform `services.slskd.settings` option, and some defaults (including listen ports)
+  have been changed to match the upstream defaults. Additionally, disk logging is now
+  disabled by default, and the log rotation timer has been removed.
+  The nginx virtualhost option is now of the `vhost-options` type.
+
 - The `btrbk` module now automatically selects and provides required compression
   program depending on the configured `stream_compress` option. Since this
   replaces the need for the `extraPackages` option, this option will be
@@ -427,4 +487,7 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 - QtMultimedia has changed its default backend to `QT_MEDIA_BACKEND=ffmpeg` (previously `gstreamer` on Linux or `darwin` on MacOS).
   The previous native backends remain available but are now minimally maintained. Refer to [upstream documentation](https://doc.qt.io/qt-6/qtmultimedia-index.html#ffmpeg-as-the-default-backend) for further details about each platform.
 
-- The oil shell is now using the c++ version by default. The python based build is still available as `oil-python`
+- The oil shell's c++ version is now available as `oils-for-unix`. The python version is still available as `oil`
+
+- `documentation.man.mandoc` now by default uses `MANPATH` to set the directories where mandoc will search for manual pages.
+  This enables mandoc to find manual pages in Nix profiles. To set the manual search paths via the `mandoc.conf` configuration file like before, use `documentation.man.mandoc.settings.manpath` instead.
diff --git a/nixos/lib/make-iso9660-image.nix b/nixos/lib/make-iso9660-image.nix
index 2f7dcf519a16f..ec520f5706822 100644
--- a/nixos/lib/make-iso9660-image.nix
+++ b/nixos/lib/make-iso9660-image.nix
@@ -1,4 +1,4 @@
-{ stdenv, closureInfo, xorriso, syslinux, libossp_uuid
+{ lib, stdenv, callPackage, closureInfo, xorriso, syslinux, libossp_uuid, squashfsTools
 
 , # The file name of the resulting ISO image.
   isoName ? "cd.iso"
@@ -16,6 +16,17 @@
   # symlink to `object' that will be added to the CD.
   storeContents ? []
 
+, # In addition to `contents', the closure of the store paths listed
+  # in `squashfsContents' is compressed as squashfs and the result is
+  # placed in /nix-store.squashfs on the CD.
+  # FIXME: This is a performance optimization to avoid Hydra copying
+  # the squashfs between builders and should be removed when Hydra
+  # is smarter about scheduling.
+  squashfsContents ? []
+
+, # Compression settings for squashfs
+  squashfsCompression ? "xz -Xdict-size 100%"
+
 , # Whether this should be an El-Torito bootable CD.
   bootable ? false
 
@@ -45,12 +56,20 @@ assert bootable -> bootImage != "";
 assert efiBootable -> efiBootImage != "";
 assert usbBootable -> isohybridMbrImage != "";
 
+let
+  needSquashfs = squashfsContents != [];
+  makeSquashfsDrv = callPackage ./make-squashfs.nix {
+    storeContents = squashfsContents;
+    comp = squashfsCompression;
+  };
+in
 stdenv.mkDerivation {
   name = isoName;
   __structuredAttrs = true;
 
   buildCommandPath = ./make-iso9660-image.sh;
-  nativeBuildInputs = [ xorriso syslinux zstd libossp_uuid ];
+  nativeBuildInputs = [ xorriso syslinux zstd libossp_uuid ]
+    ++ lib.optionals needSquashfs makeSquashfsDrv.nativeBuildInputs;
 
   inherit isoName bootable bootImage compressImage volumeID efiBootImage efiBootable isohybridMbrImage usbBootable;
 
@@ -60,6 +79,8 @@ stdenv.mkDerivation {
   objects = map (x: x.object) storeContents;
   symlinks = map (x: x.symlink) storeContents;
 
+  squashfsCommand = lib.optionalString needSquashfs makeSquashfsDrv.buildCommand;
+
   # For obtaining the closure of `storeContents'.
   closureInfo = closureInfo { rootPaths = map (x: x.object) storeContents; };
 }
diff --git a/nixos/lib/make-iso9660-image.sh b/nixos/lib/make-iso9660-image.sh
index 34febe9cfe0e6..5881195e461f8 100644
--- a/nixos/lib/make-iso9660-image.sh
+++ b/nixos/lib/make-iso9660-image.sh
@@ -68,6 +68,11 @@ for i in $(< $closureInfo/store-paths); do
     addPath "${i:1}" "$i"
 done
 
+# If needed, build a squashfs and add that
+if [[ -n "$squashfsCommand" ]]; then
+    (out="nix-store.squashfs" eval "$squashfsCommand")
+    addPath "nix-store.squashfs" "nix-store.squashfs"
+fi
 
 # Also include a manifest of the closures in a format suitable for
 # nix-store --load-db.
diff --git a/nixos/lib/make-options-doc/default.nix b/nixos/lib/make-options-doc/default.nix
index 284934a7608ef..09a4022845e04 100644
--- a/nixos/lib/make-options-doc/default.nix
+++ b/nixos/lib/make-options-doc/default.nix
@@ -1,20 +1,95 @@
-/* Generate JSON, XML and DocBook documentation for given NixOS options.
+/**
+  Generates documentation for [nix modules](https://nix.dev/tutorials/module-system/module-system.html).
 
-   Minimal example:
+  It uses the declared `options` to generate documentation in various formats.
 
-    { pkgs,  }:
+  # Outputs
 
-    let
-      eval = import (pkgs.path + "/nixos/lib/eval-config.nix") {
-        baseModules = [
-          ../module.nix
-        ];
-        modules = [];
-      };
-    in pkgs.nixosOptionsDoc {
-      options = eval.options;
+  This function returns an attribute set with the following entries.
+
+  ## optionsCommonMark
+
+  Documentation in CommonMark text format.
+
+  ## optionsJSON
+
+  All options in a JSON format suitable for further automated processing.
+
+  `example.json`
+  ```json
+  {
+    ...
+    "fileSystems.<name>.options": {
+      "declarations": ["nixos/modules/tasks/filesystems.nix"],
+      "default": {
+        "_type": "literalExpression",
+        "text": "[\n  \"defaults\"\n]"
+      },
+      "description": "Options used to mount the file system.",
+      "example": {
+        "_type": "literalExpression",
+        "text": "[\n  \"data=journal\"\n]"
+      },
+      "loc": ["fileSystems", "<name>", "options"],
+      "readOnly": false,
+      "type": "non-empty (list of string (with check: non-empty))"
+      "relatedPackages": "- [`pkgs.tmux`](\n    https://search.nixos.org/packages?show=tmux&sort=relevance&query=tmux\n  )\n",
+    },
+    ...
+  }
+  ```
+
+  ## optionsDocBook
+
+  deprecated since 23.11 and will be removed in 24.05.
+
+  ## optionsAsciiDoc
+
+  Documentation rendered as AsciiDoc. This is useful for e.g. man pages.
+
+  > Note: NixOS itself uses this ouput to to build the configuration.nix man page"
+
+  ## optionsNix
+
+  All options as a Nix attribute set value, with the same schema as `optionsJSON`.
+
+  # Example
+
+  ## Example: NixOS configuration
+
+  ```nix
+  let
+    # Evaluate a NixOS configuration
+    eval = import (pkgs.path + "/nixos/lib/eval-config.nix") {
+      # Overriden explicitly here, this would include all modules from NixOS otherwise.
+      # See: docs of eval-config.nix for more details
+      baseModules = [];
+      modules = [
+        ./module.nix
+      ];
+    };
+  in
+    pkgs.nixosOptionsDoc {
+      inherit (eval) options;
     }
+  ```
+
+  ## Example: non-NixOS modules
+
+  `nixosOptionsDoc` can also be used to build documentation for non-NixOS modules.
 
+  ```nix
+  let
+    eval = lib.evalModules {
+      modules = [
+        ./module.nix
+      ];
+    };
+  in
+    pkgs.nixosOptionsDoc {
+      inherit (eval) options;
+    }
+  ```
 */
 { pkgs
 , lib
diff --git a/nixos/lib/systemd-lib.nix b/nixos/lib/systemd-lib.nix
index ef218e674ebf1..c00b2d0f207c6 100644
--- a/nixos/lib/systemd-lib.nix
+++ b/nixos/lib/systemd-lib.nix
@@ -73,13 +73,26 @@ in rec {
     optional (attr ? ${name} && (! isMacAddress attr.${name} && attr.${name} != "none"))
       "Systemd ${group} field `${name}` must be a valid MAC address or the special value `none`.";
 
-
+  isNumberOrRangeOf = check: v:
+    if isInt v
+    then check v
+    else let
+      parts = splitString "-" v;
+      lower = toIntBase10 (head parts);
+      upper = if tail parts != [] then toIntBase10 (head (tail parts)) else lower;
+    in
+      length parts <= 2 && lower <= upper && check lower && check upper;
   isPort = i: i >= 0 && i <= 65535;
+  isPortOrPortRange = isNumberOrRangeOf isPort;
 
   assertPort = name: group: attr:
     optional (attr ? ${name} && ! isPort attr.${name})
       "Error on the systemd ${group} field `${name}': ${attr.name} is not a valid port number.";
 
+  assertPortOrPortRange = name: group: attr:
+    optional (attr ? ${name} && ! isPortOrPortRange attr.${name})
+      "Error on the systemd ${group} field `${name}': ${attr.name} is not a valid port number or range of port numbers.";
+
   assertValueOneOf = name: values: group: attr:
     optional (attr ? ${name} && !elem attr.${name} values)
       "Systemd ${group} field `${name}' cannot have value `${toString attr.${name}}'.";
diff --git a/nixos/lib/test-driver/test_driver/machine.py b/nixos/lib/test-driver/test_driver/machine.py
index df8628bce9568..c117aab7c4011 100644
--- a/nixos/lib/test-driver/test_driver/machine.py
+++ b/nixos/lib/test-driver/test_driver/machine.py
@@ -1250,6 +1250,5 @@ class Machine:
             check_return=False,
             check_output=False,
         )
-        self.wait_for_console_text(r"systemd\[1\]:.*Switching root\.")
         self.connected = False
         self.connect()
diff --git a/nixos/lib/utils.nix b/nixos/lib/utils.nix
index 49ba2e5c83868..22a2c79843c66 100644
--- a/nixos/lib/utils.nix
+++ b/nixos/lib/utils.nix
@@ -64,8 +64,8 @@ rec {
     let
       s = if builtins.isPath arg then "${arg}"
         else if builtins.isString arg then arg
-        else if builtins.isInt arg || builtins.isFloat arg then toString arg
-        else throw "escapeSystemdExecArg only allows strings, paths and numbers";
+        else if builtins.isInt arg || builtins.isFloat arg || lib.isDerivation arg then toString arg
+        else throw "escapeSystemdExecArg only allows strings, paths, numbers and derivations";
     in
       replaceStrings [ "%" "$" ] [ "%%" "$$" ] (builtins.toJSON s);
 
diff --git a/nixos/modules/config/nix.nix b/nixos/modules/config/nix.nix
index e6a74bbb73fcc..a40953a3a3c92 100644
--- a/nixos/modules/config/nix.nix
+++ b/nixos/modules/config/nix.nix
@@ -14,8 +14,10 @@ let
     concatStringsSep
     boolToString
     escape
+    filterAttrs
     floatToString
     getVersion
+    hasPrefix
     isBool
     isDerivation
     isFloat
@@ -95,14 +97,19 @@ let
 
       mkKeyValuePairs = attrs: concatStringsSep "\n" (mapAttrsToList mkKeyValue attrs);
 
+      isExtra = key: hasPrefix "extra-" key;
+
     in
     pkgs.writeTextFile {
       name = "nix.conf";
+      # workaround for https://github.com/NixOS/nix/issues/9487
+      # extra-* settings must come after their non-extra counterpart
       text = ''
         # WARNING: this file is generated from the nix.* options in
         # your NixOS configuration, typically
         # /etc/nixos/configuration.nix.  Do not edit it!
-        ${mkKeyValuePairs cfg.settings}
+        ${mkKeyValuePairs (filterAttrs (key: value: !(isExtra key)) cfg.settings)}
+        ${mkKeyValuePairs (filterAttrs (key: value: isExtra key) cfg.settings)}
         ${cfg.extraOptions}
       '';
       checkPhase = lib.optionalString cfg.checkConfig (
@@ -345,7 +352,7 @@ in
             show-trace = true;
 
             system-features = [ "big-parallel" "kvm" "recursive-nix" ];
-            sandbox-paths = { "/bin/sh" = "''${pkgs.busybox-sandbox-shell.out}/bin/busybox"; };
+            sandbox-paths = [ "/bin/sh=''${pkgs.busybox-sandbox-shell.out}/bin/busybox" ];
           }
         '';
         description = lib.mdDoc ''
diff --git a/nixos/modules/config/resolvconf.nix b/nixos/modules/config/resolvconf.nix
index e9ae4d651d264..3b8cc0cb8f42e 100644
--- a/nixos/modules/config/resolvconf.nix
+++ b/nixos/modules/config/resolvconf.nix
@@ -28,6 +28,8 @@ let
     '' + optionalString cfg.useLocalResolver ''
       # This hosts runs a full-blown DNS resolver.
       name_servers='127.0.0.1'
+    '' + optionalString (cfg.useLocalResolver && config.networking.enableIPv6) ''
+      name_servers='::1'
     '' + cfg.extraConfig;
 
 in
diff --git a/nixos/modules/config/users-groups.nix b/nixos/modules/config/users-groups.nix
index 02cd1a17f538a..f9750b7263cac 100644
--- a/nixos/modules/config/users-groups.nix
+++ b/nixos/modules/config/users-groups.nix
@@ -496,6 +496,7 @@ let
     in
       filter types.shellPackage.check shells;
 
+  lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
 in {
   imports = [
     (mkAliasOptionModuleMD [ "users" "extraUsers" ] [ "users" "users" ])
@@ -695,24 +696,31 @@ in {
       '';
     } else ""; # keep around for backwards compatibility
 
-    system.activationScripts.update-lingering = let
-      lingerDir = "/var/lib/systemd/linger";
-      lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
-      lingeringUsersFile = builtins.toFile "lingering-users"
-        (concatStrings (map (s: "${s}\n")
-          (sort (a: b: a < b) lingeringUsers)));  # this sorting is important for `comm` to work correctly
-    in stringAfter [ "users" ] ''
-      if [ -e ${lingerDir} ] ; then
+    systemd.services.linger-users = lib.mkIf ((builtins.length lingeringUsers) > 0) {
+      wantedBy = ["multi-user.target"];
+      after = ["systemd-logind.service"];
+      requires = ["systemd-logind.service"];
+
+      script = let
+        lingerDir = "/var/lib/systemd/linger";
+        lingeringUsersFile = builtins.toFile "lingering-users"
+          (concatStrings (map (s: "${s}\n")
+            (sort (a: b: a < b) lingeringUsers)));  # this sorting is important for `comm` to work correctly
+      in ''
+        mkdir -vp ${lingerDir}
         cd ${lingerDir}
-        for user in ${lingerDir}/*; do
-          if ! id "$user" >/dev/null 2>&1; then
+        for user in $(ls); do
+          if ! id "$user" >/dev/null; then
+            echo "Removing linger for missing user $user"
             rm --force -- "$user"
           fi
         done
-        ls ${lingerDir} | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
-        ls ${lingerDir} | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
-      fi
-    '';
+        ls | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
+        ls | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
+      '';
+
+      serviceConfig.Type = "oneshot";
+    };
 
     # Warn about user accounts with deprecated password hashing schemes
     # This does not work when the users and groups are created by
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index 3b983f768f91a..352c8d8ead54d 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -396,6 +396,9 @@ in {
             modules = [nvidia_x11.bin];
             display = !offloadCfg.enable;
             deviceSection =
+              ''
+                Option "SidebandSocketPath" "/run/nvidia-xdriver/"
+              '' +
               lib.optionalString primeEnabled
               ''
                 BusID "${pCfg.nvidiaBusId}"
@@ -533,8 +536,14 @@ in {
 
         hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
 
-        systemd.tmpfiles.rules =
-          lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+        systemd.tmpfiles.rules = [
+          # Remove the following log message:
+          #    (WW) NVIDIA: Failed to bind sideband socket to
+          #    (WW) NVIDIA:     '/var/run/nvidia-xdriver-b4f69129' Permission denied
+          #
+          # https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115
+          "d /run/nvidia-xdriver 0770 root users"
+        ] ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
           "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
 
         boot = {
diff --git a/nixos/modules/hardware/video/webcam/ipu6.nix b/nixos/modules/hardware/video/webcam/ipu6.nix
index c2dbdc217bd60..a7767e446bd4f 100644
--- a/nixos/modules/hardware/video/webcam/ipu6.nix
+++ b/nixos/modules/hardware/video/webcam/ipu6.nix
@@ -30,7 +30,10 @@ in
       ipu6-drivers
     ];
 
-    hardware.firmware = [ pkgs.ipu6-camera-bins ];
+    hardware.firmware = with pkgs; [
+      ipu6-camera-bins
+      ivsc-firmware
+    ];
 
     services.udev.extraRules = ''
       SUBSYSTEM=="intel-ipu6-psys", MODE="0660", GROUP="video"
diff --git a/nixos/modules/i18n/input-method/fcitx5.nix b/nixos/modules/i18n/input-method/fcitx5.nix
index ee8d2652b1c72..7553362205203 100644
--- a/nixos/modules/i18n/input-method/fcitx5.nix
+++ b/nixos/modules/i18n/input-method/fcitx5.nix
@@ -32,8 +32,8 @@ in
       };
       plasma6Support = mkOption {
         type = types.bool;
-        default = config.services.xserver.desktopManager.plasma6.enable;
-        defaultText = literalExpression "config.services.xserver.desktopManager.plasma6.enable";
+        default = config.services.desktopManager.plasma6.enable;
+        defaultText = literalExpression "config.services.desktopManager.plasma6.enable";
         description = lib.mdDoc ''
           Use qt6 versions of fcitx5 packages.
           Required for configuring fcitx5 in KDE System Settings.
diff --git a/nixos/modules/image/repart-image.nix b/nixos/modules/image/repart-image.nix
index 5ae523c43f589..83e766268cf04 100644
--- a/nixos/modules/image/repart-image.nix
+++ b/nixos/modules/image/repart-image.nix
@@ -2,8 +2,8 @@
 # NixOS module that can be imported.
 
 { lib
+, stdenvNoCC
 , runCommand
-, runCommandLocal
 , python3
 , black
 , ruff
@@ -26,15 +26,18 @@
 , xz
 
   # arguments
+, name
+, version
 , imageFileBasename
 , compression
 , fileSystems
-, partitions
+, partitionsJSON
 , split
 , seed
 , definitionsDirectory
 , sectorSize
 , mkfsEnv ? {}
+, createEmpty ? true
 }:
 
 let
@@ -52,11 +55,6 @@ let
     mypy --strict $out
   '';
 
-  amendedRepartDefinitions = runCommandLocal "amended-repart.d" {} ''
-    definitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
-    cp -r $definitions $out
-  '';
-
   fileSystemToolMapping = {
     "vfat" = [ dosfstools mtools ];
     "ext4" = [ e2fsprogs.bin ];
@@ -78,53 +76,88 @@ let
     "xz" = "xz --keep --verbose --threads=0 -${toString compression.level}";
   }."${compression.algorithm}";
 in
-
-runCommand imageFileBasename
-{
+  stdenvNoCC.mkDerivation (finalAttrs:
+  (if (version != null)
+  then { pname = name; inherit version; }
+  else { inherit name;  }
+  ) // {
   __structuredAttrs = true;
 
   nativeBuildInputs = [
     systemd
     fakeroot
     util-linux
+  ] ++ lib.optionals (compression.enable) [
     compressionPkg
   ] ++ fileSystemTools;
 
   env = mkfsEnv;
 
+  inherit partitionsJSON definitionsDirectory;
+
+  # relative path to the repart definitions that are read by systemd-repart
+  finalRepartDefinitions = "repart.d";
+
   systemdRepartFlags = [
     "--dry-run=no"
-    "--empty=create"
     "--size=auto"
     "--seed=${seed}"
-    "--definitions=${amendedRepartDefinitions}"
+    "--definitions=${finalAttrs.finalRepartDefinitions}"
     "--split=${lib.boolToString split}"
     "--json=pretty"
+  ] ++ lib.optionals createEmpty [
+    "--empty=create"
   ] ++ lib.optionals (sectorSize != null) [
     "--sector-size=${toString sectorSize}"
   ];
 
-  passthru = {
-    inherit amendRepartDefinitions amendedRepartDefinitions;
-  };
-} ''
-  mkdir -p $out
-  cd $out
+  dontUnpack = true;
+  dontConfigure = true;
+  doCheck = false;
+
+  patchPhase = ''
+    runHook prePatch
+
+    amendedRepartDefinitionsDir=$(${amendRepartDefinitions} $partitionsJSON $definitionsDirectory)
+    ln -vs $amendedRepartDefinitionsDir $finalRepartDefinitions
 
-  echo "Building image with systemd-repart..."
-  unshare --map-root-user fakeroot systemd-repart \
-    ''${systemdRepartFlags[@]} \
-    ${imageFileBasename}.raw \
-    | tee repart-output.json
+    runHook postPatch
+  '';
+
+  buildPhase = ''
+    runHook preBuild
+
+    echo "Building image with systemd-repart..."
+    unshare --map-root-user fakeroot systemd-repart \
+      ''${systemdRepartFlags[@]} \
+      ${imageFileBasename}.raw \
+      | tee repart-output.json
+
+    runHook postBuild
+  '';
+
+  installPhase = ''
+    runHook preInstall
 
+    mkdir -p $out
+  ''
   # Compression is implemented in the same derivation as opposed to in a
   # separate derivation to allow users to save disk space. Disk images are
   # already very space intensive so we want to allow users to mitigate this.
-  if ${lib.boolToString compression.enable}; then
+  + lib.optionalString compression.enable
+  ''
     for f in ${imageFileBasename}*; do
       echo "Compressing $f with ${compression.algorithm}..."
       # Keep the original file when compressing and only delete it afterwards
       ${compressionCommand} $f && rm $f
     done
-  fi
-''
+  '' + ''
+    mv -v repart-output.json ${imageFileBasename}* $out
+
+    runHook postInstall
+  '';
+
+  passthru = {
+    inherit amendRepartDefinitions;
+  };
+})
diff --git a/nixos/modules/image/repart.nix b/nixos/modules/image/repart.nix
index 90c9c7e51dfa3..1a43297f4b432 100644
--- a/nixos/modules/image/repart.nix
+++ b/nixos/modules/image/repart.nix
@@ -211,6 +211,15 @@ in
       '';
     };
 
+    finalPartitions = lib.mkOption {
+      type = lib.types.attrs;
+      internal = true;
+      readOnly = true;
+      description = lib.mdDoc ''
+        Convenience option to access partitions with added closures.
+      '';
+    };
+
   };
 
   config = {
@@ -224,6 +233,16 @@ in
             "zstd" = ".zst";
             "xz" = ".xz";
           }."${cfg.compression.algorithm}";
+
+        makeClosure = paths: pkgs.closureInfo { rootPaths = paths; };
+
+        # Add the closure of the provided Nix store paths to cfg.partitions so
+        # that amend-repart-definitions.py can read it.
+        addClosure = _name: partitionConfig: partitionConfig // (
+          lib.optionalAttrs
+            (partitionConfig.storePaths or [ ] != [ ])
+            { closure = "${makeClosure partitionConfig.storePaths}/store-paths"; }
+        );
       in
       {
         name = lib.mkIf (config.system.image.id != null) (lib.mkOptionDefault config.system.image.id);
@@ -239,6 +258,8 @@ in
             "xz" = 3;
           }."${cfg.compression.algorithm}";
         };
+
+        finalPartitions = lib.mapAttrs addClosure cfg.partitions;
       };
 
     system.build.image =
@@ -247,36 +268,25 @@ in
           (f: f != null)
           (lib.mapAttrsToList (_n: v: v.repartConfig.Format or null) cfg.partitions);
 
-        makeClosure = paths: pkgs.closureInfo { rootPaths = paths; };
-
-        # Add the closure of the provided Nix store paths to cfg.partitions so
-        # that amend-repart-definitions.py can read it.
-        addClosure = _name: partitionConfig: partitionConfig // (
-          lib.optionalAttrs
-            (partitionConfig.storePaths or [ ] != [ ])
-            { closure = "${makeClosure partitionConfig.storePaths}/store-paths"; }
-        );
-
-        finalPartitions = lib.mapAttrs addClosure cfg.partitions;
 
         format = pkgs.formats.ini { };
 
         definitionsDirectory = utils.systemdUtils.lib.definitions
           "repart.d"
           format
-          (lib.mapAttrs (_n: v: { Partition = v.repartConfig; }) finalPartitions);
+          (lib.mapAttrs (_n: v: { Partition = v.repartConfig; }) cfg.finalPartitions);
 
-        partitions = pkgs.writeText "partitions.json" (builtins.toJSON finalPartitions);
+        partitionsJSON = pkgs.writeText "partitions.json" (builtins.toJSON cfg.finalPartitions);
 
         mkfsEnv = mkfsOptionsToEnv cfg.mkfsOptions;
       in
       pkgs.callPackage ./repart-image.nix {
         systemd = cfg.package;
-        inherit (cfg) imageFileBasename compression split seed sectorSize;
-        inherit fileSystems definitionsDirectory partitions mkfsEnv;
+        inherit (cfg) name version imageFileBasename compression split seed sectorSize;
+        inherit fileSystems definitionsDirectory partitionsJSON mkfsEnv;
       };
 
-    meta.maintainers = with lib.maintainers; [ nikstur ];
+    meta.maintainers = with lib.maintainers; [ nikstur willibutz ];
 
   };
 }
diff --git a/nixos/modules/installer/cd-dvd/iso-image.nix b/nixos/modules/installer/cd-dvd/iso-image.nix
index 6adb94e09aff3..f5b6af3a6b7ff 100644
--- a/nixos/modules/installer/cd-dvd/iso-image.nix
+++ b/nixos/modules/installer/cd-dvd/iso-image.nix
@@ -811,12 +811,6 @@ in
       optional config.isoImage.includeSystemBuildDependencies
         config.system.build.toplevel.drvPath;
 
-    # Create the squashfs image that contains the Nix store.
-    system.build.squashfsStore = pkgs.callPackage ../../../lib/make-squashfs.nix {
-      storeContents = config.isoImage.storeContents;
-      comp = config.isoImage.squashfsCompression;
-    };
-
     # Individual files to be included on the CD, outside of the Nix
     # store on the CD.
     isoImage.contents =
@@ -827,9 +821,6 @@ in
         { source = config.system.build.initialRamdisk + "/" + config.system.boot.loader.initrdFile;
           target = "/boot/" + config.system.boot.loader.initrdFile;
         }
-        { source = config.system.build.squashfsStore;
-          target = "/nix-store.squashfs";
-        }
         { source = pkgs.writeText "version" config.system.nixos.label;
           target = "/version.txt";
         }
@@ -878,6 +869,8 @@ in
       bootable = config.isoImage.makeBiosBootable;
       bootImage = "/isolinux/isolinux.bin";
       syslinux = if config.isoImage.makeBiosBootable then pkgs.syslinux else null;
+      squashfsContents = config.isoImage.storeContents;
+      squashfsCompression = config.isoImage.squashfsCompression;
     } // optionalAttrs (config.isoImage.makeUsbBootable && config.isoImage.makeBiosBootable) {
       usbBootable = true;
       isohybridMbrImage = "${pkgs.syslinux}/share/syslinux/isohdpfx.bin";
diff --git a/nixos/modules/installer/tools/nixos-generate-config.pl b/nixos/modules/installer/tools/nixos-generate-config.pl
index 2f9edba4f0c9c..ef25b8b296e6e 100644
--- a/nixos/modules/installer/tools/nixos-generate-config.pl
+++ b/nixos/modules/installer/tools/nixos-generate-config.pl
@@ -257,7 +257,7 @@ foreach my $path (glob "/sys/class/{block,mmc_host}/*") {
 
 # Add bcache module, if needed.
 my @bcacheDevices = glob("/dev/bcache*");
-@bcacheDevices = grep(!qr#dev/bcachefs.*#, @bcacheDevices);
+@bcacheDevices = grep(!m#dev/bcachefs.*#, @bcacheDevices);
 if (scalar @bcacheDevices > 0) {
     push @initrdAvailableKernelModules, "bcache";
 }
diff --git a/nixos/modules/misc/mandoc.nix b/nixos/modules/misc/mandoc.nix
index 73646a60aabb2..706e2ac2c2836 100644
--- a/nixos/modules/misc/mandoc.nix
+++ b/nixos/modules/misc/mandoc.nix
@@ -17,6 +17,8 @@ let
       )
       output
   );
+
+  makeLeadingSlashes = map (path: if builtins.substring 0 1 path != "/" then "/${path}" else path);
 in
 {
   meta.maintainers = [ lib.maintainers.sternenseemann ];
@@ -29,6 +31,7 @@ in
         type = with lib.types; listOf str;
         default = [ "share/man" ];
         example = lib.literalExpression "[ \"share/man\" \"share/man/fr\" ]";
+        apply = makeLeadingSlashes;
         description = ''
           Change the paths included in the MANPATH environment variable,
           i. e. the directories where {manpage}`man(1)`
@@ -41,6 +44,28 @@ in
         '';
       };
 
+      cachePath = lib.mkOption {
+        type = with lib.types; listOf str;
+        default = cfg.manPath;
+        defaultText = lib.literalExpression "config.documentation.man.mandoc.manPath";
+        example = lib.literalExpression "[ \"share/man\" \"share/man/fr\" ]";
+        apply = makeLeadingSlashes;
+        description = ''
+          Change the paths where mandoc {manpage}`makewhatis(8)`generates the
+          manual page index caches. {option}`documentation.man.generateCaches`
+          should be enabled to allow cache generation. This list should only
+          include the paths to manpages installed in the system configuration,
+          i. e. /run/current-system/sw/share/man. {manpage}`makewhatis(8)`
+          creates a database in each directory using the files
+          `mansection/[arch/]title.section` and `catsection/[arch/]title.0`
+          in it. If a directory contains no manual pages, no database is
+          created in that directory.
+          This option only needs to be set manually if extra paths should be
+          indexed or {option}`documentation.man.manPath` contains paths that
+          can't be indexed.
+        '';
+      };
+
       package = lib.mkOption {
         type = lib.types.package;
         default = pkgs.mandoc;
@@ -178,19 +203,14 @@ in
       # TODO(@sternenseemman): fix symlinked directories not getting indexed,
       # see: https://inbox.vuxu.org/mandoc-tech/20210906171231.GF83680@athene.usta.de/T/#e85f773c1781e3fef85562b2794f9cad7b2909a3c
       extraSetup = lib.mkIf config.documentation.man.generateCaches ''
-        for man_path in ${
-          lib.concatMapStringsSep " " (path:
-            "$out/" + lib.escapeShellArg path
-            ) cfg.manPath} ${lib.concatMapStringsSep " " (path:
-            lib.escapeShellArg path) cfg.settings.manpath
-          }
+        for man_path in ${lib.concatMapStringsSep " " (path: "$out" + lib.escapeShellArg path) cfg.cachePath}
         do
           [[ -d "$man_path" ]] && ${makewhatis} -T utf8 $man_path
         done
       '';
 
       # tell mandoc the paths containing man pages
-      profileRelativeSessionVariables."MANPATH" = map (path: if builtins.substring 0 1 path != "/" then "/${path}" else path) cfg.manPath;
+      profileRelativeSessionVariables."MANPATH" = lib.mkIf (cfg.manPath != [ ]) cfg.manPath;
     };
   };
 }
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index eabb22a446b0a..d89d294b0469f 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -204,6 +204,7 @@
   ./programs/i3lock.nix
   ./programs/iotop.nix
   ./programs/java.nix
+  ./programs/joycond-cemuhook.nix
   ./programs/k3b.nix
   ./programs/k40-whisperer.nix
   ./programs/kbdlight.nix
@@ -944,6 +945,7 @@
   ./services/networking/dnscrypt-wrapper.nix
   ./services/networking/dnsdist.nix
   ./services/networking/dnsmasq.nix
+  ./services/networking/dnsproxy.nix
   ./services/networking/doh-proxy-rust.nix
   ./services/networking/ejabberd.nix
   ./services/networking/envoy.nix
@@ -1019,6 +1021,8 @@
   ./services/networking/lxd-image-server.nix
   ./services/networking/magic-wormhole-mailbox-server.nix
   ./services/networking/matterbridge.nix
+  ./services/networking/microsocks.nix
+  ./services/networking/mihomo.nix
   ./services/networking/minidlna.nix
   ./services/networking/miniupnpd.nix
   ./services/networking/miredo.nix
@@ -1035,6 +1039,7 @@
   ./services/networking/multipath.nix
   ./services/networking/murmur.nix
   ./services/networking/mxisd.nix
+  ./services/networking/mycelium.nix
   ./services/networking/namecoind.nix
   ./services/networking/nar-serve.nix
   ./services/networking/nat.nix
@@ -1103,6 +1108,11 @@
   ./services/networking/rpcbind.nix
   ./services/networking/rxe.nix
   ./services/networking/sabnzbd.nix
+  ./services/networking/scion/scion.nix
+  ./services/networking/scion/scion-control.nix
+  ./services/networking/scion/scion-daemon.nix
+  ./services/networking/scion/scion-dispatcher.nix
+  ./services/networking/scion/scion-router.nix
   ./services/networking/seafile.nix
   ./services/networking/searx.nix
   ./services/networking/shadowsocks.nix
@@ -1271,6 +1281,7 @@
   ./services/video/go2rtc/default.nix
   ./services/video/frigate.nix
   ./services/video/mirakurun.nix
+  ./services/video/photonvision.nix
   ./services/video/replay-sorcery.nix
   ./services/video/mediamtx.nix
   ./services/video/unifi-video.nix
@@ -1359,6 +1370,7 @@
   ./services/web-apps/plausible.nix
   ./services/web-apps/powerdns-admin.nix
   ./services/web-apps/pretalx.nix
+  ./services/web-apps/pretix.nix
   ./services/web-apps/prosody-filer.nix
   ./services/web-apps/rimgo.nix
   ./services/web-apps/sftpgo.nix
diff --git a/nixos/modules/profiles/all-hardware.nix b/nixos/modules/profiles/all-hardware.nix
index 4857ea4dbeae8..249b767593f2d 100644
--- a/nixos/modules/profiles/all-hardware.nix
+++ b/nixos/modules/profiles/all-hardware.nix
@@ -58,15 +58,7 @@ in
       # Hyper-V support.
       "hv_storvsc"
     ] ++ lib.optionals pkgs.stdenv.hostPlatform.isAarch [
-      # Most of the following falls into two categories:
-      #  - early KMS / early display
-      #  - early storage (e.g. USB) support
-
-      # Allows using framebuffer configured by the initial boot firmware
-      "simplefb"
-
       # Allwinner support
-
       # Required for early KMS
       "sun4i-drm"
       "sun8i-mixer" # Audio, but required for kms
@@ -75,7 +67,6 @@ in
       "pwm-sun4i"
 
       # Broadcom
-
       "vc4"
     ] ++ lib.optionals pkgs.stdenv.isAarch64 [
       # Most of the following falls into two categories:
diff --git a/nixos/modules/profiles/macos-builder.nix b/nixos/modules/profiles/macos-builder.nix
index 6c2602881d6b5..786e26cf98f7f 100644
--- a/nixos/modules/profiles/macos-builder.nix
+++ b/nixos/modules/profiles/macos-builder.nix
@@ -145,6 +145,8 @@ in
         # This installCredentials script is written so that it's as easy as
         # possible for a user to audit before confirming the `sudo`
         installCredentials = hostPkgs.writeShellScript "install-credentials" ''
+          set -euo pipefail
+
           KEYS="''${1}"
           INSTALL=${hostPkgs.coreutils}/bin/install
           "''${INSTALL}" -g nixbld -m 600 "''${KEYS}/${user}_${keyType}" ${privateKey}
@@ -154,6 +156,9 @@ in
         hostPkgs = config.virtualisation.host.pkgs;
 
         script = hostPkgs.writeShellScriptBin "create-builder" (
+          ''
+            set -euo pipefail
+          '' +
           # When running as non-interactively as part of a DarwinConfiguration the working directory
           # must be set to a writeable directory.
         (if cfg.workingDirectory != "." then ''
diff --git a/nixos/modules/programs/_1password-gui.nix b/nixos/modules/programs/_1password-gui.nix
index 83ef6037fb5a3..eb2effee4326f 100644
--- a/nixos/modules/programs/_1password-gui.nix
+++ b/nixos/modules/programs/_1password-gui.nix
@@ -51,14 +51,6 @@ in
           setuid = false;
           setgid = true;
         };
-
-        "1Password-KeyringHelper" = {
-          source = "${package}/share/1password/1Password-KeyringHelper";
-          owner = "root";
-          group = "onepassword";
-          setuid = true;
-          setgid = true;
-        };
       };
 
     };
diff --git a/nixos/modules/programs/clash-verge.nix b/nixos/modules/programs/clash-verge.nix
index 57a1c0377edbf..e1afafa7cadc3 100644
--- a/nixos/modules/programs/clash-verge.nix
+++ b/nixos/modules/programs/clash-verge.nix
@@ -3,6 +3,7 @@
 {
   options.programs.clash-verge = {
     enable = lib.mkEnableOption (lib.mdDoc "Clash Verge");
+    package = lib.mkPackageOption pkgs "clash-verge" {};
     autoStart = lib.mkEnableOption (lib.mdDoc "Clash Verge auto launch");
     tunMode = lib.mkEnableOption (lib.mdDoc "Clash Verge TUN mode");
   };
@@ -14,10 +15,10 @@
     lib.mkIf cfg.enable {
 
       environment.systemPackages = [
-        pkgs.clash-verge
+        cfg.package
         (lib.mkIf cfg.autoStart (pkgs.makeAutostartItem {
           name = "clash-verge";
-          package = pkgs.clash-verge;
+          package = cfg.package;
         }))
       ];
 
@@ -25,7 +26,7 @@
         owner = "root";
         group = "root";
         capabilities = "cap_net_bind_service,cap_net_admin=+ep";
-        source = "${lib.getExe pkgs.clash-verge}";
+        source = "${lib.getExe cfg.package}";
       };
     };
 
diff --git a/nixos/modules/programs/starship.nix b/nixos/modules/programs/starship.nix
index 34f6f0882c617..7f8d9eb3363d7 100644
--- a/nixos/modules/programs/starship.nix
+++ b/nixos/modules/programs/starship.nix
@@ -12,7 +12,7 @@ let
       nativeBuildInputs = [ pkgs.yq ];
     } ''
     tomlq -s -t 'reduce .[] as $item ({}; . * $item)' \
-      ${lib.concatStringsSep " " (map (f: "${pkgs.starship}/share/starship/presets/${f}.toml") cfg.presets)} \
+      ${lib.concatStringsSep " " (map (f: "${cfg.package}/share/starship/presets/${f}.toml") cfg.presets)} \
       ${userSettingsFile} \
       > $out
   '';
@@ -26,23 +26,20 @@ let
 in
 {
   options.programs.starship = {
-    enable = lib.mkEnableOption (lib.mdDoc "the Starship shell prompt");
+    enable = lib.mkEnableOption "the Starship shell prompt";
 
-    interactiveOnly = lib.mkOption {
-      default = true;
-      example = false;
-      type = lib.types.bool;
-      description = lib.mdDoc ''
-        Whether to enable starship only when the shell is interactive.
-        Some plugins require this to be set to false to function correctly.
-      '';
-    };
+    package = lib.mkPackageOption pkgs "starship" { };
+
+    interactiveOnly = lib.mkEnableOption ''
+      starship only when the shell is interactive.
+      Some plugins require this to be set to false to function correctly
+    '' // { default = true; };
 
     presets = lib.mkOption {
       default = [ ];
       example = [ "nerd-font-symbols" ];
       type = with lib.types; listOf str;
-      description = lib.mdDoc ''
+      description = ''
         Presets files to be merged with settings in order.
       '';
     };
@@ -50,7 +47,7 @@ in
     settings = lib.mkOption {
       inherit (settingsFormat) type;
       default = { };
-      description = lib.mdDoc ''
+      description = ''
         Configuration included in `starship.toml`.
 
         See https://starship.rs/config/#prompt for documentation.
@@ -68,7 +65,7 @@ in
         if [[ ! -f "$HOME/.config/starship.toml" ]]; then
           export STARSHIP_CONFIG=${settingsFile}
         fi
-        eval "$(${pkgs.starship}/bin/starship init bash)"
+        eval "$(${cfg.package}/bin/starship init bash)"
       fi
     '';
 
@@ -81,7 +78,7 @@ in
         if not test -f "$HOME/.config/starship.toml";
           set -x STARSHIP_CONFIG ${settingsFile}
         end
-        eval (${pkgs.starship}/bin/starship init fish)
+        eval (${cfg.package}/bin/starship init fish)
       end
     '';
 
@@ -94,7 +91,7 @@ in
         if [[ ! -f "$HOME/.config/starship.toml" ]]; then
           export STARSHIP_CONFIG=${settingsFile}
         fi
-        eval "$(${pkgs.starship}/bin/starship init zsh)"
+        eval "$(${cfg.package}/bin/starship init zsh)"
       fi
     '';
   };
diff --git a/nixos/modules/programs/steam.nix b/nixos/modules/programs/steam.nix
index 31803f061dce2..bab9bf8107b6e 100644
--- a/nixos/modules/programs/steam.nix
+++ b/nixos/modules/programs/steam.nix
@@ -44,8 +44,10 @@ in {
       '';
       apply = steam: steam.override (prev: {
         extraEnv = (lib.optionalAttrs (cfg.extraCompatPackages != [ ]) {
-            STEAM_EXTRA_COMPAT_TOOLS_PATHS = makeBinPath cfg.extraCompatPackages;
-          }) // (prev.extraEnv or {});
+          STEAM_EXTRA_COMPAT_TOOLS_PATHS = makeSearchPathOutput "steamcompattool" "" cfg.extraCompatPackages;
+        }) // (optionalAttrs cfg.extest.enable {
+          LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
+        }) // (prev.extraEnv or {});
         extraLibraries = pkgs: let
           prevLibs = if prev ? extraLibraries then prev.extraLibraries pkgs else [ ];
           additionalLibs = with config.hardware.opengl;
@@ -59,8 +61,6 @@ in {
           # use the setuid wrapped bubblewrap
           bubblewrap = "${config.security.wrapperDir}/..";
         };
-      } // optionalAttrs cfg.extest.enable {
-        extraEnv.LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
       });
       description = lib.mdDoc ''
         The Steam package to use. Additional libraries are added from the system
@@ -74,10 +74,17 @@ in {
     extraCompatPackages = mkOption {
       type = types.listOf types.package;
       default = [ ];
+      example = literalExpression ''
+        with pkgs; [
+          proton-ge-bin
+        ]
+      '';
       description = lib.mdDoc ''
         Extra packages to be used as compatibility tools for Steam on Linux. Packages will be included
         in the `STEAM_EXTRA_COMPAT_TOOLS_PATHS` environmental variable. For more information see
-        <https://github.com/ValveSoftware/steam-for-linux/issues/6310">.
+        https://github.com/ValveSoftware/steam-for-linux/issues/6310.
+
+        These packages must be Steam compatibility tools that have a `steamcompattool` output.
       '';
     };
 
diff --git a/nixos/modules/security/pam.nix b/nixos/modules/security/pam.nix
index 560e5eff5c39a..26dc724ae1596 100644
--- a/nixos/modules/security/pam.nix
+++ b/nixos/modules/security/pam.nix
@@ -683,7 +683,7 @@ let
           (let dp9ik = config.security.pam.dp9ik; in { name = "p9"; enable = dp9ik.enable; control = dp9ik.control; modulePath = "${pkgs.pam_dp9ik}/lib/security/pam_p9.so"; args = [
             dp9ik.authserver
           ]; })
-          { name = "fprintd"; enable = cfg.fprintAuth; control = "sufficient"; modulePath = "${pkgs.fprintd}/lib/security/pam_fprintd.so"; }
+          { name = "fprintd"; enable = cfg.fprintAuth; control = "sufficient"; modulePath = "${config.services.fprintd.package}/lib/security/pam_fprintd.so"; }
         ] ++
           # Modules in this block require having the password set in PAM_AUTHTOK.
           # pam_unix is marked as 'sufficient' on NixOS which means nothing will run
diff --git a/nixos/modules/services/audio/spotifyd.nix b/nixos/modules/services/audio/spotifyd.nix
index 1194b6f200d70..04bb523e25b1b 100644
--- a/nixos/modules/services/audio/spotifyd.nix
+++ b/nixos/modules/services/audio/spotifyd.nix
@@ -24,7 +24,7 @@ in
         type = types.lines;
         description = lib.mdDoc ''
           (Deprecated) Configuration for Spotifyd. For syntax and directives, see
-          <https://github.com/Spotifyd/spotifyd#Configuration>.
+          <https://docs.spotifyd.rs/config/File.html>.
         '';
       };
 
@@ -34,7 +34,7 @@ in
         example = { global.bitrate = 320; };
         description = lib.mdDoc ''
           Configuration for Spotifyd. For syntax and directives, see
-          <https://github.com/Spotifyd/spotifyd#Configuration>.
+          <https://docs.spotifyd.rs/config/File.html>.
         '';
       };
     };
diff --git a/nixos/modules/services/audio/wyoming/faster-whisper.nix b/nixos/modules/services/audio/wyoming/faster-whisper.nix
index dd7f62744cd02..0c36e8c9ab059 100644
--- a/nixos/modules/services/audio/wyoming/faster-whisper.nix
+++ b/nixos/modules/services/audio/wyoming/faster-whisper.nix
@@ -37,22 +37,13 @@ in
             enable = mkEnableOption (mdDoc "Wyoming faster-whisper server");
 
             model = mkOption {
-              # Intersection between available and referenced models here:
-              # https://github.com/rhasspy/models/releases/tag/v1.0
-              # https://github.com/rhasspy/rhasspy3/blob/wyoming-v1/programs/asr/faster-whisper/server/wyoming_faster_whisper/download.py#L17-L27
-              type = enum [
-                "tiny"
-                "tiny-int8"
-                "base"
-                "base-int8"
-                "small"
-                "small-int8"
-                "medium-int8"
-              ];
+              type = str;
               default = "tiny-int8";
-              example = "medium-int8";
+              example = "Systran/faster-distil-whisper-small.en";
               description = mdDoc ''
                 Name of the voice model to use.
+
+                Check the [2.0.0 release notes](https://github.com/rhasspy/wyoming-faster-whisper/releases/tag/v2.0.0) for possible values.
               '';
             };
 
diff --git a/nixos/modules/services/backup/bacula.nix b/nixos/modules/services/backup/bacula.nix
index 5a75a46e5259a..39975adf59092 100644
--- a/nixos/modules/services/backup/bacula.nix
+++ b/nixos/modules/services/backup/bacula.nix
@@ -4,11 +4,36 @@
 # TODO: test configuration when building nixexpr (use -t parameter)
 # TODO: support sqlite3 (it's deprecate?) and mysql
 
-with lib;
 
 let
+  inherit (lib)
+    concatStringsSep
+    literalExpression
+    mapAttrsToList
+    mdDoc
+    mkIf
+    mkOption
+    optional
+    optionalString
+    types
+    ;
   libDir = "/var/lib/bacula";
 
+  yes_no = bool: if bool then "yes" else "no";
+  tls_conf = tls_cfg: optionalString tls_cfg.enable (
+    concatStringsSep
+      "\n"
+      (
+      ["TLS Enable = yes;"]
+      ++ optional (tls_cfg.require != null) "TLS Require = ${yes_no tls_cfg.require};"
+      ++ optional (tls_cfg.certificate != null) ''TLS Certificate = "${tls_cfg.certificate}";''
+      ++ [''TLS Key = "${tls_cfg.key}";'']
+      ++ optional (tls_cfg.verifyPeer != null) "TLS Verify Peer = ${yes_no tls_cfg.verifyPeer};"
+      ++ optional (tls_cfg.allowedCN != [ ]) "TLS Allowed CN = ${concatStringsSep " " (tls_cfg.allowedCN)};"
+      ++ optional (tls_cfg.caCertificateFile != null) ''TLS CA Certificate File = "${tls_cfg.caCertificateFile}";''
+      )
+  );
+
   fd_cfg = config.services.bacula-fd;
   fd_conf = pkgs.writeText "bacula-fd.conf"
     ''
@@ -18,6 +43,7 @@ let
         WorkingDirectory = ${libDir};
         Pid Directory = /run;
         ${fd_cfg.extraClientConfig}
+        ${tls_conf fd_cfg.tls}
       }
 
       ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
@@ -25,6 +51,7 @@ let
         Name = "${name}";
         Password = ${value.password};
         Monitor = ${value.monitor};
+        ${tls_conf value.tls}
       }
       '') fd_cfg.director)}
 
@@ -44,6 +71,7 @@ let
         WorkingDirectory = ${libDir};
         Pid Directory = /run;
         ${sd_cfg.extraStorageConfig}
+        ${tls_conf sd_cfg.tls}
       }
 
       ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
@@ -70,6 +98,7 @@ let
         Name = "${name}";
         Password = ${value.password};
         Monitor = ${value.monitor};
+        ${tls_conf value.tls}
       }
       '') sd_cfg.director)}
 
@@ -90,6 +119,7 @@ let
       Working Directory = ${libDir};
       Pid Directory = /run/;
       QueryFile = ${pkgs.bacula}/etc/query.sql;
+      ${tls_conf dir_cfg.tls}
       ${dir_cfg.extraDirectorConfig}
     }
 
@@ -108,13 +138,99 @@ let
     ${dir_cfg.extraConfig}
     '';
 
-  directorOptions = {...}:
+  linkOption = name: destination: "[${name}](#opt-${builtins.replaceStrings [ "<" ">"] ["_" "_"] destination})";
+  tlsLink = destination: submodulePath: linkOption "${submodulePath}.${destination}" "${submodulePath}.${destination}";
+
+  tlsOptions = submodulePath: {...}:
+  {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Specifies if TLS should be enabled.
+          If this set to `false` TLS will be completely disabled, even if ${tlsLink "tls.require" submodulePath} is true.
+        '';
+      };
+      require = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = mdDoc ''
+          Require TLS or TLS-PSK encryption.
+          This directive is ignored unless one of ${tlsLink "tls.enable" submodulePath} is true or TLS PSK Enable is set to `yes`.
+          If TLS is not required while TLS or TLS-PSK are enabled, then the Bacula component
+          will connect with other components either with or without TLS or TLS-PSK
+
+          If ${tlsLink "tls.enable" submodulePath} or TLS-PSK is enabled and TLS is required, then the Bacula
+          component will refuse any connection request that does not use TLS.
+        '';
+      };
+      certificate = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = mdDoc ''
+          The full path to the PEM encoded TLS certificate.
+          It will be used as either a client or server certificate,
+          depending on the connection direction.
+          This directive is required in a server context, but it may
+          not be specified in a client context if ${tlsLink "tls.verifyPeer" submodulePath} is
+          `false` in the corresponding server context.
+        '';
+      };
+      key = mkOption {
+        type = types.path;
+        description = mdDoc ''
+          The path of a PEM encoded TLS private key.
+          It must correspond to the TLS certificate.
+        '';
+      };
+      verifyPeer = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = mdDoc ''
+          Verify peer certificate.
+          Instructs server to request and verify the client's X.509 certificate.
+          Any client certificate signed by a known-CA will be accepted.
+          Additionally, the client's X509 certificate Common Name must meet the value of the Address directive.
+          If ${tlsLink "tls.allowedCN" submodulePath} is used,
+          the client's x509 certificate Common Name must also correspond to
+          one of the CN specified in the ${tlsLink "tls.allowedCN" submodulePath} directive.
+          This directive is valid only for a server and not in client context.
+
+          Standard from Bacula is `true`.
+        '';
+      };
+      allowedCN = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = mdDoc ''
+          Common name attribute of allowed peer certificates.
+          This directive is valid for a server and in a client context.
+          If this directive is specified, the peer certificate will be verified against this list.
+          In the case this directive is configured on a server side, the allowed
+          CN list will not be checked if ${tlsLink "tls.verifyPeer" submodulePath} is false.
+        '';
+      };
+      caCertificateFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = mdDoc ''
+          The path specifying a PEM encoded TLS CA certificate(s).
+          Multiple certificates are permitted in the file.
+          One of TLS CA Certificate File or TLS CA Certificate Dir are required in a server context, unless
+          ${tlsLink "tls.verifyPeer" submodulePath} is false, and are always required in a client context.
+        '';
+      };
+    };
+  };
+
+  directorOptions = submodulePath:{...}:
   {
     options = {
       password = mkOption {
         type = types.str;
         # TODO: required?
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Specifies the password that must be supplied for the default Bacula
           Console to be authorized. The same password must appear in the
           Director resource of the Console configuration file. For added
@@ -135,7 +251,7 @@ let
         type = types.enum [ "no" "yes" ];
         default = "no";
         example = "yes";
-        description = lib.mdDoc ''
+        description = mdDoc ''
           If Monitor is set to `no`, this director will have
           full access to this Storage daemon. If Monitor is set to
           `yes`, this director will only be able to fetch the
@@ -146,6 +262,13 @@ let
           security problems.
         '';
       };
+
+      tls = mkOption {
+        type = types.submodule (tlsOptions "${submodulePath}.director.<name>");
+        description = mdDoc ''
+          TLS Options for the Director in this Configuration.
+        '';
+      };
     };
   };
 
@@ -154,7 +277,7 @@ let
     options = {
       changerDevice = mkOption {
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The specified name-string must be the generic SCSI device name of the
           autochanger that corresponds to the normal read/write Archive Device
           specified in the Device resource. This generic SCSI device name
@@ -173,7 +296,7 @@ let
 
       changerCommand = mkOption {
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The name-string specifies an external program to be called that will
           automatically change volumes as required by Bacula. Normally, this
           directive will be specified only in the AutoChanger resource, which
@@ -195,14 +318,14 @@ let
       };
 
       devices = mkOption {
-        description = lib.mdDoc "";
+        description = mdDoc "";
         type = types.listOf types.str;
       };
 
       extraAutochangerConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Autochanger directive.
         '';
         example = ''
@@ -219,7 +342,7 @@ let
       archiveDevice = mkOption {
         # TODO: required?
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The specified name-string gives the system file name of the storage
           device managed by this storage daemon. This will usually be the
           device file name of a removable storage device (tape drive), for
@@ -236,7 +359,7 @@ let
       mediaType = mkOption {
         # TODO: required?
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The specified name-string names the type of media supported by this
           device, for example, `DLT7000`. Media type names are
           arbitrary in that you set them to anything you want, but they must be
@@ -274,7 +397,7 @@ let
       extraDeviceConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Device directive.
         '';
         example = ''
@@ -295,7 +418,7 @@ in {
       enable = mkOption {
         type = types.bool;
         default = false;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Whether to enable the Bacula File Daemon.
         '';
       };
@@ -304,7 +427,7 @@ in {
         default = "${config.networking.hostName}-fd";
         defaultText = literalExpression ''"''${config.networking.hostName}-fd"'';
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The client name that must be used by the Director when connecting.
           Generally, it is a good idea to use a name related to the machine so
           that error messages can be easily identified if you have multiple
@@ -315,7 +438,7 @@ in {
       port = mkOption {
         default = 9102;
         type = types.port;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This specifies the port number on which the Client listens for
           Director connections. It must agree with the FDPort specified in
           the Client resource of the Director's configuration file.
@@ -324,16 +447,26 @@ in {
 
       director = mkOption {
         default = {};
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This option defines director resources in Bacula File Daemon.
         '';
-        type = with types; attrsOf (submodule directorOptions);
+        type = types.attrsOf (types.submodule (directorOptions "services.bacula-fd"));
       };
 
+
+      tls = mkOption {
+        type = types.submodule (tlsOptions "services.bacula-fd");
+        default = { };
+        description = mdDoc ''
+          TLS Options for the File Daemon.
+          Important notice: The backup won't be encrypted.
+        '';
+       };
+
       extraClientConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Client directive.
         '';
         example = ''
@@ -345,7 +478,7 @@ in {
       extraMessagesConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Messages directive.
         '';
         example = ''
@@ -358,7 +491,7 @@ in {
       enable = mkOption {
         type = types.bool;
         default = false;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Whether to enable Bacula Storage Daemon.
         '';
       };
@@ -367,7 +500,7 @@ in {
         default = "${config.networking.hostName}-sd";
         defaultText = literalExpression ''"''${config.networking.hostName}-sd"'';
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Specifies the Name of the Storage daemon.
         '';
       };
@@ -375,7 +508,7 @@ in {
       port = mkOption {
         default = 9103;
         type = types.port;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Specifies port number on which the Storage daemon listens for
           Director connections.
         '';
@@ -383,32 +516,32 @@ in {
 
       director = mkOption {
         default = {};
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This option defines Director resources in Bacula Storage Daemon.
         '';
-        type = with types; attrsOf (submodule directorOptions);
+        type = types.attrsOf (types.submodule (directorOptions "services.bacula-sd"));
       };
 
       device = mkOption {
         default = {};
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This option defines Device resources in Bacula Storage Daemon.
         '';
-        type = with types; attrsOf (submodule deviceOptions);
+        type = types.attrsOf (types.submodule deviceOptions);
       };
 
       autochanger = mkOption {
         default = {};
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This option defines Autochanger resources in Bacula Storage Daemon.
         '';
-        type = with types; attrsOf (submodule autochangerOptions);
+        type = types.attrsOf (types.submodule autochangerOptions);
       };
 
       extraStorageConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Storage directive.
         '';
         example = ''
@@ -420,13 +553,21 @@ in {
       extraMessagesConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Messages directive.
         '';
         example = ''
           console = all
         '';
       };
+      tls = mkOption {
+        type = types.submodule (tlsOptions "services.bacula-sd");
+        default = { };
+        description = mdDoc ''
+          TLS Options for the Storage Daemon.
+          Important notice: The backup won't be encrypted.
+        '';
+       };
 
     };
 
@@ -434,7 +575,7 @@ in {
       enable = mkOption {
         type = types.bool;
         default = false;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Whether to enable Bacula Director Daemon.
         '';
       };
@@ -443,7 +584,7 @@ in {
         default = "${config.networking.hostName}-dir";
         defaultText = literalExpression ''"''${config.networking.hostName}-dir"'';
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The director name used by the system administrator. This directive is
           required.
         '';
@@ -452,7 +593,7 @@ in {
       port = mkOption {
         default = 9101;
         type = types.port;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Specify the port (a positive integer) on which the Director daemon
           will listen for Bacula Console connections. This same port number
           must be specified in the Director resource of the Console
@@ -465,7 +606,7 @@ in {
       password = mkOption {
         # TODO: required?
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
            Specifies the password that must be supplied for a Director.
         '';
       };
@@ -473,7 +614,7 @@ in {
       extraMessagesConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Messages directive.
         '';
         example = ''
@@ -484,7 +625,7 @@ in {
       extraDirectorConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Director directive.
         '';
         example = ''
@@ -496,13 +637,22 @@ in {
       extraConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration for Bacula Director Daemon.
         '';
         example = ''
           TODO
         '';
       };
+
+      tls = mkOption {
+        type = types.submodule (tlsOptions "services.bacula-dir");
+        default = { };
+        description = mdDoc ''
+          TLS Options for the Director.
+          Important notice: The backup won't be encrypted.
+        '';
+       };
     };
   };
 
diff --git a/nixos/modules/services/continuous-integration/hydra/default.nix b/nixos/modules/services/continuous-integration/hydra/default.nix
index b1d44e67658bd..10e1f0532c849 100644
--- a/nixos/modules/services/continuous-integration/hydra/default.nix
+++ b/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -178,6 +178,24 @@ in
         description = lib.mdDoc "Whether to run the server in debug mode.";
       };
 
+      maxServers = mkOption {
+        type = types.int;
+        default = 25;
+        description = lib.mdDoc "Maximum number of starman workers to spawn.";
+      };
+
+      minSpareServers = mkOption {
+        type = types.int;
+        default = 4;
+        description = lib.mdDoc "Minimum number of spare starman workers to keep.";
+      };
+
+      maxSpareServers = mkOption {
+        type = types.int;
+        default = 5;
+        description = lib.mdDoc "Maximum number of spare starman workers to keep.";
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         description = lib.mdDoc "Extra lines for the Hydra configuration.";
@@ -224,6 +242,16 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.maxServers != 0 && cfg.maxSpareServers != 0 && cfg.minSpareServers != 0;
+        message = "services.hydra.{minSpareServers,maxSpareServers,minSpareServers} cannot be 0";
+      }
+      {
+        assertion = cfg.minSpareServers < cfg.maxSpareServers;
+        message = "services.hydra.minSpareServers cannot be bigger than servives.hydra.maxSpareServers";
+      }
+    ];
 
     users.groups.hydra = {
       gid = config.ids.gids.hydra;
@@ -258,7 +286,7 @@ in
         using_frontend_proxy = 1
         base_uri = ${cfg.hydraURL}
         notification_sender = ${cfg.notificationSender}
-        max_servers = 25
+        max_servers = ${toString cfg.maxServers}
         ${optionalString (cfg.logo != null) ''
           hydra_logo = ${cfg.logo}
         ''}
@@ -359,8 +387,8 @@ in
         serviceConfig =
           { ExecStart =
               "@${hydra-package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
-              + "-p ${toString cfg.port} --max_spare_servers 5 --max_servers 25 "
-              + "--max_requests 100 ${optionalString cfg.debugServer "-d"}";
+              + "-p ${toString cfg.port} --min_spare_servers ${toString cfg.minSpareServers} --max_spare_servers ${toString cfg.maxSpareServers} "
+              + "--max_servers ${toString cfg.maxServers} --max_requests 100 ${optionalString cfg.debugServer "-d"}";
             User = "hydra-www";
             PermissionsStartOnly = true;
             Restart = "always";
diff --git a/nixos/modules/services/databases/lldap.nix b/nixos/modules/services/databases/lldap.nix
index e821da8e58aa3..033de7af886f2 100644
--- a/nixos/modules/services/databases/lldap.nix
+++ b/nixos/modules/services/databases/lldap.nix
@@ -107,10 +107,25 @@ in
       wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
+      # lldap defaults to a hardcoded `jwt_secret` value if none is provided, which is bad, because
+      # an attacker could create a valid admin jwt access token fairly trivially.
+      # Because there are 3 different ways `jwt_secret` can be provided, we check if any one of them is present,
+      # and if not, bootstrap a secret in `/var/lib/lldap/jwt_secret_file` and give that to lldap.
+      script = lib.optionalString (!cfg.settings ? jwt_secret) ''
+        if [[ -z "$LLDAP_JWT_SECRET_FILE" ]] && [[ -z "$LLDAP_JWT_SECRET" ]]; then
+          if [[ ! -e "./jwt_secret_file" ]]; then
+            ${lib.getExe pkgs.openssl} rand -base64 -out ./jwt_secret_file 32
+          fi
+          export LLDAP_JWT_SECRET_FILE="./jwt_secret_file"
+        fi
+      '' + ''
+         ${lib.getExe cfg.package} run --config-file ${format.generate "lldap_config.toml" cfg.settings}
+      '';
       serviceConfig = {
-        ExecStart = "${lib.getExe cfg.package} run --config-file ${format.generate "lldap_config.toml" cfg.settings}";
         StateDirectory = "lldap";
+        StateDirectoryMode = "0750";
         WorkingDirectory = "%S/lldap";
+        UMask = "0027";
         User = "lldap";
         Group = "lldap";
         DynamicUser = true;
diff --git a/nixos/modules/services/databases/postgresql.md b/nixos/modules/services/databases/postgresql.md
index 7d141f12b5dea..3ff1f00fa9cfb 100644
--- a/nixos/modules/services/databases/postgresql.md
+++ b/nixos/modules/services/databases/postgresql.md
@@ -277,7 +277,7 @@ self: super: {
 Here's a recipe on how to override a particular plugin through an overlay:
 ```
 self: super: {
-  postgresql_15 = super.postgresql_15.override { this = self.postgresql_15; } // {
+  postgresql_15 = super.postgresql_15// {
     pkgs = super.postgresql_15.pkgs // {
       pg_repack = super.postgresql_15.pkgs.pg_repack.overrideAttrs (_: {
         name = "pg_repack-v20181024";
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index c4e76c82ba5c7..c3f3b98ae5e75 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -14,7 +14,7 @@ let
       #     package = pkgs.postgresql_<major>;
       #   };
       # works.
-      base = if cfg.enableJIT && !cfg.package.jitSupport then cfg.package.withJIT else cfg.package;
+      base = if cfg.enableJIT then cfg.package.withJIT else cfg.package;
     in
     if cfg.extraPlugins == []
       then base
diff --git a/nixos/modules/services/x11/desktop-managers/plasma6.nix b/nixos/modules/services/desktop-managers/plasma6.nix
index 26e485a61be29..e20b431f0b58e 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma6.nix
+++ b/nixos/modules/services/desktop-managers/plasma6.nix
@@ -5,8 +5,7 @@
   utils,
   ...
 }: let
-  xcfg = config.services.xserver;
-  cfg = xcfg.desktopManager.plasma6;
+  cfg = config.services.desktopManager.plasma6;
 
   inherit (pkgs) kdePackages;
   inherit (lib) literalExpression mkDefault mkIf mkOption mkPackageOptionMD types;
@@ -17,7 +16,7 @@
   '';
 in {
   options = {
-    services.xserver.desktopManager.plasma6 = {
+    services.desktopManager.plasma6 = {
       enable = mkOption {
         type = types.bool;
         default = false;
@@ -44,6 +43,12 @@ in {
     };
   };
 
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma6" "enable" ] [ "services" "desktopManager" "plasma6" "enable" ])
+    (lib.mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma6" "enableQt5Integration" ] [ "services" "desktopManager" "plasma6" "enableQt5Integration" ])
+    (lib.mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma6" "notoPackage" ] [ "services" "desktopManager" "plasma6" "notoPackage" ])
+  ];
+
   config = mkIf cfg.enable {
     assertions = [
       {
@@ -161,10 +166,11 @@ in {
     in
       requiredPackages
       ++ utils.removePackagesByName optionalPackages config.environment.plasma6.excludePackages
-      ++ lib.optionals config.services.xserver.desktopManager.plasma6.enableQt5Integration [
+      ++ lib.optionals config.services.desktopManager.plasma6.enableQt5Integration [
         breeze.qt5
         plasma-integration.qt5
         pkgs.plasma5Packages.kwayland-integration
+        pkgs.plasma5Packages.kio
         kio-extras-kf5
       ]
       # Optional hardware support features
@@ -175,7 +181,7 @@ in {
       ++ lib.optional config.powerManagement.enable powerdevil
       ++ lib.optional config.services.colord.enable colord-kde
       ++ lib.optional config.services.hardware.bolt.enable plasma-thunderbolt
-      ++ lib.optionals config.services.samba.enable [kdenetwork-filesharing pkgs.samba]
+      ++ lib.optional config.services.samba.enable kdenetwork-filesharing
       ++ lib.optional config.services.xserver.wacom.enable wacomtablet
       ++ lib.optional config.services.flatpak.enable flatpak-kcm;
 
@@ -185,7 +191,7 @@ in {
       "/libexec" # for drkonqi
     ];
 
-    environment.etc."X11/xkb".source = xcfg.xkb.dir;
+    environment.etc."X11/xkb".source = config.services.xserver.xkb.dir;
 
     # Add ~/.config/kdedefaults to XDG_CONFIG_DIRS for shells, since Plasma sets that.
     # FIXME: maybe we should append to XDG_CONFIG_DIRS in /etc/set-environment instead?
@@ -210,7 +216,7 @@ in {
       serif = ["Noto Serif"];
     };
 
-    programs.gnupg.agent.pinentryPackage = pkgs.pinentry-qt;
+    programs.gnupg.agent.pinentryPackage = mkDefault pkgs.pinentry-qt;
     programs.ssh.askPassword = mkDefault "${kdePackages.ksshaskpass.out}/bin/ksshaskpass";
 
     # Enable helpful DBus services.
@@ -247,6 +253,7 @@ in {
     services.xserver.displayManager.sddm = {
       package = kdePackages.sddm;
       theme = mkDefault "breeze";
+      wayland.compositor = "kwin";
       extraPackages = with kdePackages; [
         breeze-icons
         kirigami
diff --git a/nixos/modules/services/desktops/flatpak.nix b/nixos/modules/services/desktops/flatpak.nix
index 4c26e6874023a..62ef38a3d5548 100644
--- a/nixos/modules/services/desktops/flatpak.nix
+++ b/nixos/modules/services/desktops/flatpak.nix
@@ -32,6 +32,8 @@ in {
 
     security.polkit.enable = true;
 
+    fonts.fontDir.enable = true;
+
     services.dbus.packages = [ pkgs.flatpak ];
 
     systemd.packages = [ pkgs.flatpak ];
diff --git a/nixos/modules/services/desktops/pipewire/pipewire.nix b/nixos/modules/services/desktops/pipewire/pipewire.nix
index 09448833620c6..5c6eba889ebed 100644
--- a/nixos/modules/services/desktops/pipewire/pipewire.nix
+++ b/nixos/modules/services/desktops/pipewire/pipewire.nix
@@ -1,14 +1,21 @@
 # PipeWire service.
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (builtins) attrNames concatMap length;
+  inherit (lib) maintainers teams;
+  inherit (lib.attrsets) attrByPath attrsToList concatMapAttrs filterAttrs;
+  inherit (lib.lists) flatten optional optionals;
+  inherit (lib.modules) mkIf mkRemovedOptionModule;
+  inherit (lib.options) literalExpression mkEnableOption mkOption mkPackageOption;
+  inherit (lib.strings) concatMapStringsSep hasPrefix optionalString;
+  inherit (lib.types) attrsOf bool listOf package;
+
   json = pkgs.formats.json {};
   mapToFiles = location: config: concatMapAttrs (name: value: { "share/pipewire/${location}.conf.d/${name}.conf" = json.generate "${name}" value; }) config;
   extraConfigPkgFromFiles = locations: filesSet: pkgs.runCommand "pipewire-extra-config" { } ''
-    mkdir -p ${lib.concatMapStringsSep " " (l: "$out/share/pipewire/${l}.conf.d") locations}
-    ${lib.concatMapStringsSep ";" ({name, value}: "ln -s ${value} $out/${name}") (lib.attrsToList filesSet)}
+    mkdir -p ${concatMapStringsSep " " (l: "$out/share/pipewire/${l}.conf.d") locations}
+    ${concatMapStringsSep ";" ({name, value}: "ln -s ${value} $out/${name}") (attrsToList filesSet)}
   '';
   cfg = config.services.pipewire;
   enable32BitAlsaPlugins = cfg.alsa.support32Bit
@@ -40,15 +47,15 @@ let
     name = "pipewire-configs";
     paths = configPackages
       ++ [ extraConfigPkg ]
-      ++ lib.optionals cfg.wireplumber.enable cfg.wireplumber.configPackages;
+      ++ optionals cfg.wireplumber.enable cfg.wireplumber.configPackages;
     pathsToLink = [ "/share/pipewire" ];
   };
 
-  requiredLv2Packages = lib.flatten
+  requiredLv2Packages = flatten
     (
-      lib.concatMap
+      concatMap
       (p:
-        lib.attrByPath ["passthru" "requiredLv2Packages"] [] p
+        attrByPath ["passthru" "requiredLv2Packages"] [] p
       )
       configPackages
     );
@@ -59,50 +66,58 @@ let
     pathsToLink = [ "/lib/lv2" ];
   };
 in {
-  meta.maintainers = teams.freedesktop.members ++ [ lib.maintainers.k900 ];
+  meta.maintainers = teams.freedesktop.members ++ [ maintainers.k900 ];
 
   ###### interface
   options = {
     services.pipewire = {
-      enable = mkEnableOption (lib.mdDoc "PipeWire service");
+      enable = mkEnableOption "PipeWire service";
 
       package = mkPackageOption pkgs "pipewire" { };
 
       socketActivation = mkOption {
         default = true;
-        type = types.bool;
-        description = lib.mdDoc ''
+        type = bool;
+        description = ''
           Automatically run PipeWire when connections are made to the PipeWire socket.
         '';
       };
 
       audio = {
-        enable = lib.mkOption {
-          type = lib.types.bool;
+        enable = mkOption {
+          type = bool;
           # this is for backwards compatibility
           default = cfg.alsa.enable || cfg.jack.enable || cfg.pulse.enable;
-          defaultText = lib.literalExpression "config.services.pipewire.alsa.enable || config.services.pipewire.jack.enable || config.services.pipewire.pulse.enable";
-          description = lib.mdDoc "Whether to use PipeWire as the primary sound server";
+          defaultText = literalExpression "config.services.pipewire.alsa.enable || config.services.pipewire.jack.enable || config.services.pipewire.pulse.enable";
+          description = "Whether to use PipeWire as the primary sound server";
         };
       };
 
       alsa = {
-        enable = mkEnableOption (lib.mdDoc "ALSA support");
-        support32Bit = mkEnableOption (lib.mdDoc "32-bit ALSA support on 64-bit systems");
+        enable = mkEnableOption "ALSA support";
+        support32Bit = mkEnableOption "32-bit ALSA support on 64-bit systems";
       };
 
       jack = {
-        enable = mkEnableOption (lib.mdDoc "JACK audio emulation");
+        enable = mkEnableOption "JACK audio emulation";
+      };
+
+      raopOpenFirewall = mkOption {
+        type = bool;
+        default = false;
+        description = ''
+          Opens UDP/6001-6002, required by RAOP/Airplay for timing and control data.
+        '';
       };
 
       pulse = {
-        enable = mkEnableOption (lib.mdDoc "PulseAudio server emulation");
+        enable = mkEnableOption "PulseAudio server emulation";
       };
 
-      systemWide = lib.mkOption {
-        type = lib.types.bool;
+      systemWide = mkOption {
+        type = bool;
         default = false;
-        description = lib.mdDoc ''
+        description = ''
           If true, a system-wide PipeWire service and socket is enabled
           allowing all users in the "pipewire" group to use it simultaneously.
           If false, then user units are used instead, restricting access to
@@ -116,7 +131,7 @@ in {
 
       extraConfig = {
         pipewire = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-clock-rate" = {
@@ -130,7 +145,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire server.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire.conf.d`.
@@ -149,7 +164,7 @@ in {
           '';
         };
         client = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-no-resample" = {
@@ -158,7 +173,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire client library, used by most applications.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client.conf.d`.
@@ -169,7 +184,7 @@ in {
           '';
         };
         client-rt = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-alsa-linear-volume" = {
@@ -178,7 +193,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire client library, used by real-time applications and legacy ALSA clients.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client-rt.conf.d`.
@@ -190,7 +205,7 @@ in {
           '';
         };
         jack = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "20-hide-midi" = {
@@ -199,7 +214,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire JACK server and client library.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/jack.conf.d`.
@@ -210,7 +225,7 @@ in {
           '';
         };
         pipewire-pulse = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "15-force-s16-info" = {
@@ -224,7 +239,7 @@ in {
               }];
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire PulseAudio server.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire-pulse.conf.d`.
@@ -240,10 +255,32 @@ in {
         };
       };
 
-      configPackages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      configPackages = mkOption {
+        type = listOf package;
         default = [];
-        description = lib.mdDoc ''
+        example = literalExpression ''[
+          (pkgs.writeTextDir "share/pipewire/pipewire.conf.d/10-loopback.conf" '''
+            context.modules = [
+            {   name = libpipewire-module-loopback
+                args = {
+                  node.description = "Scarlett Focusrite Line 1"
+                  capture.props = {
+                      audio.position = [ FL ]
+                      stream.dont-remix = true
+                      node.target = "alsa_input.usb-Focusrite_Scarlett_Solo_USB_Y7ZD17C24495BC-00.analog-stereo"
+                      node.passive = true
+                  }
+                  playback.props = {
+                      node.name = "SF_mono_in_1"
+                      media.class = "Audio/Source"
+                      audio.position = [ MONO ]
+                  }
+                }
+            }
+            ]
+          ''')
+        ]'';
+        description = ''
           List of packages that provide PipeWire configuration, in the form of
           `share/pipewire/*/*.conf` files.
 
@@ -252,11 +289,11 @@ in {
         '';
       };
 
-      extraLv2Packages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      extraLv2Packages = mkOption {
+        type = listOf package;
         default = [];
-        example = lib.literalExpression "[ pkgs.lsp-plugins ]";
-        description = lib.mdDoc ''
+        example = literalExpression "[ pkgs.lsp-plugins ]";
+        description = ''
           List of packages that provide LV2 plugins in `lib/lv2` that should
           be made available to PipeWire for [filter chains][wiki-filter-chain].
 
@@ -271,11 +308,11 @@ in {
   };
 
   imports = [
-    (lib.mkRemovedOptionModule ["services" "pipewire" "config"] ''
+    (mkRemovedOptionModule ["services" "pipewire" "config"] ''
       Overriding default PipeWire configuration through NixOS options never worked correctly and is no longer supported.
       Please create drop-in configuration files via `services.pipewire.extraConfig` instead.
     '')
-    (lib.mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
+    (mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
       pipewire-media-session is no longer supported upstream and has been removed.
       Please switch to `services.pipewire.wireplumber` instead.
     '')
@@ -298,12 +335,12 @@ in {
         message = "Using PipeWire's ALSA/PulseAudio compatibility layers requires running PipeWire as the sound server. Set `services.pipewire.audio.enable` to true.";
       }
       {
-        assertion = builtins.length
-          (builtins.attrNames
+        assertion = length
+          (attrNames
             (
-              lib.filterAttrs
+              filterAttrs
                 (name: value:
-                  lib.hasPrefix "pipewire/" name || name == "pipewire"
+                  hasPrefix "pipewire/" name || name == "pipewire"
                 )
                 config.environment.etc
             )) == 1;
@@ -312,7 +349,7 @@ in {
     ];
 
     environment.systemPackages = [ cfg.package ]
-                                 ++ lib.optional cfg.jack.enable jack-libs;
+                                 ++ optional cfg.jack.enable jack-libs;
 
     systemd.packages = [ cfg.package ];
 
@@ -328,16 +365,16 @@ in {
     systemd.user.sockets.pipewire.enable = !cfg.systemWide;
     systemd.user.services.pipewire.enable = !cfg.systemWide;
 
-    systemd.services.pipewire.environment.LV2_PATH = lib.mkIf cfg.systemWide "${lv2Plugins}/lib/lv2";
-    systemd.user.services.pipewire.environment.LV2_PATH = lib.mkIf (!cfg.systemWide) "${lv2Plugins}/lib/lv2";
+    systemd.services.pipewire.environment.LV2_PATH = mkIf cfg.systemWide "${lv2Plugins}/lib/lv2";
+    systemd.user.services.pipewire.environment.LV2_PATH = mkIf (!cfg.systemWide) "${lv2Plugins}/lib/lv2";
 
     # Mask pw-pulse if it's not wanted
     systemd.user.services.pipewire-pulse.enable = cfg.pulse.enable;
     systemd.user.sockets.pipewire-pulse.enable = cfg.pulse.enable;
 
-    systemd.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.sockets.pipewire.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire-pulse.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
 
     services.udev.packages = [ cfg.package ];
 
@@ -369,16 +406,18 @@ in {
     };
 
     environment.sessionVariables.LD_LIBRARY_PATH =
-      lib.mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
+      mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
+
+    networking.firewall.allowedUDPPorts = mkIf cfg.raopOpenFirewall [ 6001 6002 ];
 
-    users = lib.mkIf cfg.systemWide {
+    users = mkIf cfg.systemWide {
       users.pipewire = {
         uid = config.ids.uids.pipewire;
         group = "pipewire";
         extraGroups = [
           "audio"
           "video"
-        ] ++ lib.optional config.security.rtkit.enable "rtkit";
+        ] ++ optional config.security.rtkit.enable "rtkit";
         description = "PipeWire system service user";
         isSystemUser = true;
         home = "/var/lib/pipewire";
diff --git a/nixos/modules/services/desktops/pipewire/wireplumber.nix b/nixos/modules/services/desktops/pipewire/wireplumber.nix
index 009d68bd4f28d..6ab62eb03c25f 100644
--- a/nixos/modules/services/desktops/pipewire/wireplumber.nix
+++ b/nixos/modules/services/desktops/pipewire/wireplumber.nix
@@ -1,46 +1,65 @@
 { config, lib, pkgs, ... }:
 
 let
+  inherit (builtins) attrNames concatMap length;
+  inherit (lib) maintainers;
+  inherit (lib.attrsets) attrByPath filterAttrs;
+  inherit (lib.lists) flatten optional;
+  inherit (lib.modules) mkIf;
+  inherit (lib.options) literalExpression mkOption;
+  inherit (lib.strings) hasPrefix;
+  inherit (lib.types) bool listOf package;
+
   pwCfg = config.services.pipewire;
   cfg = pwCfg.wireplumber;
   pwUsedForAudio = pwCfg.audio.enable;
 in
 {
-  meta.maintainers = [ lib.maintainers.k900 ];
+  meta.maintainers = [ maintainers.k900 ];
 
   options = {
     services.pipewire.wireplumber = {
-      enable = lib.mkOption {
-        type = lib.types.bool;
-        default = config.services.pipewire.enable;
-        defaultText = lib.literalExpression "config.services.pipewire.enable";
-        description = lib.mdDoc "Whether to enable WirePlumber, a modular session / policy manager for PipeWire";
+      enable = mkOption {
+        type = bool;
+        default = pwCfg.enable;
+        defaultText = literalExpression "config.services.pipewire.enable";
+        description = "Whether to enable WirePlumber, a modular session / policy manager for PipeWire";
       };
 
-      package = lib.mkOption {
-        type = lib.types.package;
+      package = mkOption {
+        type = package;
         default = pkgs.wireplumber;
-        defaultText = lib.literalExpression "pkgs.wireplumber";
-        description = lib.mdDoc "The WirePlumber derivation to use.";
+        defaultText = literalExpression "pkgs.wireplumber";
+        description = "The WirePlumber derivation to use.";
       };
 
-      configPackages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      configPackages = mkOption {
+        type = listOf package;
         default = [ ];
-        description = lib.mdDoc ''
+        example = literalExpression ''[
+          (pkgs.writeTextDir "share/wireplumber/wireplumber.conf.d/10-bluez.conf" '''
+            monitor.bluez.properties = {
+              bluez5.roles = [ a2dp_sink a2dp_source bap_sink bap_source hsp_hs hsp_ag hfp_hf hfp_ag ]
+              bluez5.codecs = [ sbc sbc_xq aac ]
+              bluez5.enable-sbc-xq = true
+              bluez5.hfphsp-backend = "native"
+            }
+          ''')
+        ]'';
+        description = ''
           List of packages that provide WirePlumber configuration, in the form of
-          `share/wireplumber/*/*.lua` files.
+          `share/wireplumber/*/*.conf` files.
 
           LV2 dependencies will be picked up from config packages automatically
           via `passthru.requiredLv2Packages`.
         '';
       };
 
-      extraLv2Packages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      extraLv2Packages = mkOption {
+        type = listOf package;
         default = [];
-        example = lib.literalExpression "[ pkgs.lsp-plugins ]";
-        description = lib.mdDoc ''
+        example = literalExpression "[ pkgs.lsp-plugins ]";
+        description = ''
           List of packages that provide LV2 plugins in `lib/lv2` that should
           be made available to WirePlumber for [filter chains][wiki-filter-chain].
 
@@ -56,24 +75,30 @@ in
 
   config =
     let
-      pwNotForAudioConfigPkg = pkgs.writeTextDir "share/wireplumber/main.lua.d/80-pw-not-for-audio.lua" ''
-        -- PipeWire is not used for audio, so prevent it from grabbing audio devices
-        alsa_monitor.enable = function() end
-      '';
-      systemwideConfigPkg = pkgs.writeTextDir "share/wireplumber/main.lua.d/80-systemwide.lua" ''
-        -- When running system-wide, these settings need to be disabled (they
-        -- use functions that aren't available on the system dbus).
-        alsa_monitor.properties["alsa.reserve"] = false
-        default_access.properties["enable-flatpak-portal"] = false
+      pwNotForAudioConfigPkg = pkgs.writeTextDir "share/wireplumber/wireplumber.conf.d/90-nixos-no-audio.conf" ''
+        # PipeWire is not used for audio, so WirePlumber should not be handling it
+        wireplumber.profiles = {
+          main = {
+            hardware.audio = disabled
+            hardware.bluetooth = disabled
+          }
+        }
       '';
-      systemwideBluetoothConfigPkg = pkgs.writeTextDir "share/wireplumber/bluetooth.lua.d/80-systemwide.lua" ''
-        -- When running system-wide, logind-integration needs to be disabled.
-        bluez_monitor.properties["with-logind"] = false
+
+      systemwideConfigPkg = pkgs.writeTextDir "share/wireplumber/wireplumber.conf.d/90-nixos-systemwide.conf" ''
+        # When running system-wide, we don't have logind to call ReserveDevice,
+        # And bluetooth logind integration needs to be disabled
+        wireplumber.profiles = {
+          main = {
+            support.reserve-device = disabled
+            monitor.bluez.seat-monitoring = disabled
+          }
+        }
       '';
 
       configPackages = cfg.configPackages
-          ++ lib.optional (!pwUsedForAudio) pwNotForAudioConfigPkg
-          ++ lib.optionals config.services.pipewire.systemWide [ systemwideConfigPkg systemwideBluetoothConfigPkg ];
+          ++ optional (!pwUsedForAudio) pwNotForAudioConfigPkg
+          ++ optional pwCfg.systemWide systemwideConfigPkg;
 
       configs = pkgs.buildEnv {
         name = "wireplumber-configs";
@@ -81,11 +106,11 @@ in
         pathsToLink = [ "/share/wireplumber" ];
       };
 
-      requiredLv2Packages = lib.flatten
+      requiredLv2Packages = flatten
         (
-          lib.concatMap
+          concatMap
             (p:
-              lib.attrByPath ["passthru" "requiredLv2Packages"] [] p
+              attrByPath ["passthru" "requiredLv2Packages"] [] p
             )
             configPackages
         );
@@ -96,19 +121,19 @@ in
         pathsToLink = [ "/lib/lv2" ];
       };
     in
-    lib.mkIf cfg.enable {
+    mkIf cfg.enable {
       assertions = [
         {
           assertion = !config.hardware.bluetooth.hsphfpd.enable;
           message = "Using WirePlumber conflicts with hsphfpd, as it provides the same functionality. `hardware.bluetooth.hsphfpd.enable` needs be set to false";
         }
         {
-          assertion = builtins.length
-            (builtins.attrNames
+          assertion = length
+            (attrNames
               (
-                lib.filterAttrs
+                filterAttrs
                   (name: value:
-                    lib.hasPrefix "wireplumber/" name || name == "wireplumber"
+                    hasPrefix "wireplumber/" name || name == "wireplumber"
                   )
                   config.environment.etc
               )) == 1;
@@ -122,19 +147,19 @@ in
 
       systemd.packages = [ cfg.package ];
 
-      systemd.services.wireplumber.enable = config.services.pipewire.systemWide;
-      systemd.user.services.wireplumber.enable = !config.services.pipewire.systemWide;
+      systemd.services.wireplumber.enable = pwCfg.systemWide;
+      systemd.user.services.wireplumber.enable = !pwCfg.systemWide;
 
       systemd.services.wireplumber.wantedBy = [ "pipewire.service" ];
       systemd.user.services.wireplumber.wantedBy = [ "pipewire.service" ];
 
-      systemd.services.wireplumber.environment = lib.mkIf config.services.pipewire.systemWide {
+      systemd.services.wireplumber.environment = mkIf pwCfg.systemWide {
         # Force WirePlumber to use system dbus.
         DBUS_SESSION_BUS_ADDRESS = "unix:path=/run/dbus/system_bus_socket";
         LV2_PATH = "${lv2Plugins}/lib/lv2";
       };
 
       systemd.user.services.wireplumber.environment.LV2_PATH =
-        lib.mkIf (!config.services.pipewire.systemWide) "${lv2Plugins}/lib/lv2";
+        mkIf (!pwCfg.systemWide) "${lv2Plugins}/lib/lv2";
     };
 }
diff --git a/nixos/modules/services/development/hoogle.nix b/nixos/modules/services/development/hoogle.nix
index 88dd01fd8aab2..c90bb7f019021 100644
--- a/nixos/modules/services/development/hoogle.nix
+++ b/nixos/modules/services/development/hoogle.nix
@@ -56,6 +56,16 @@ in {
       description = lib.mdDoc "Set the host to bind on.";
       default = "127.0.0.1";
     };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--no-security-headers" ];
+      description = lib.mdDoc ''
+        Additional command-line arguments to pass to
+        {command}`hoogle server`
+      '';
+    };
   };
 
   config = mkIf cfg.enable {
@@ -66,7 +76,10 @@ in {
 
       serviceConfig = {
         Restart = "always";
-        ExecStart = ''${hoogleEnv}/bin/hoogle server --local --port ${toString cfg.port} --home ${cfg.home} --host ${cfg.host}'';
+        ExecStart = ''
+          ${hoogleEnv}/bin/hoogle server --local --port ${toString cfg.port} --home ${cfg.home} --host ${cfg.host} \
+            ${concatStringsSep " " cfg.extraOptions}
+        '';
 
         DynamicUser = true;
 
diff --git a/nixos/modules/services/display-managers/greetd.nix b/nixos/modules/services/display-managers/greetd.nix
index c2d345152de93..5ce67c3fb3fd2 100644
--- a/nixos/modules/services/display-managers/greetd.nix
+++ b/nixos/modules/services/display-managers/greetd.nix
@@ -61,6 +61,8 @@ in
     systemd.services."autovt@${tty}".enable = false;
 
     systemd.services.greetd = {
+      aliases = [ "display-manager.service" ];
+
       unitConfig = {
         Wants = [
           "systemd-user-sessions.service"
diff --git a/nixos/modules/services/matrix/synapse.nix b/nixos/modules/services/matrix/synapse.nix
index e3f9c7742cc7d..7291c0fcbcdda 100644
--- a/nixos/modules/services/matrix/synapse.nix
+++ b/nixos/modules/services/matrix/synapse.nix
@@ -1232,7 +1232,8 @@ in {
             ProtectKernelTunables = true;
             ProtectProc = "invisible";
             ProtectSystem = "strict";
-            ReadWritePaths = [ cfg.dataDir cfg.settings.media_store_path ];
+            ReadWritePaths = [ cfg.dataDir cfg.settings.media_store_path ] ++
+              (map (listener: dirOf listener.path) (filter (listener: listener.path != null) cfg.settings.listeners));
             RemoveIPC = true;
             RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
             RestrictNamespaces = true;
diff --git a/nixos/modules/services/misc/etebase-server.nix b/nixos/modules/services/misc/etebase-server.nix
index f5a5e8a780d48..6ec3807f0fb20 100644
--- a/nixos/modules/services/misc/etebase-server.nix
+++ b/nixos/modules/services/misc/etebase-server.nix
@@ -177,6 +177,8 @@ in
 
     systemd.tmpfiles.rules = [
       "d '${cfg.dataDir}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
+    ] ++ lib.optionals (cfg.unixSocket != null) [
+      "d '${builtins.dirOf cfg.unixSocket}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
     ];
 
     systemd.services.etebase-server = {
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index ec347a75f063e..e95ab0a112bc5 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -1439,6 +1439,8 @@ in {
         nodejs
         gnupg
 
+        "${cfg.packages.gitlab}/share/gitlab/vendor/gems/sidekiq-${cfg.packages.gitlab.rubyEnv.gems.sidekiq.version}"
+
         # Needed for GitLab project imports
         gnutar
         gzip
@@ -1452,7 +1454,12 @@ in {
         TimeoutSec = "infinity";
         Restart = "always";
         WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
-        ExecStart="${cfg.packages.gitlab.rubyEnv}/bin/sidekiq -C \"${cfg.packages.gitlab}/share/gitlab/config/sidekiq_queues.yml\" -e production";
+        ExecStart = utils.escapeSystemdExecArgs [
+          "${cfg.packages.gitlab}/share/gitlab/bin/sidekiq-cluster"
+          "-e" "production"
+          "-r" "."
+          "*" # all queue groups
+        ];
       };
     };
 
@@ -1550,7 +1557,7 @@ in {
         gnutar
         gzip
         openssh
-        gitlab-workhorse
+        cfg.packages.gitlab-workhorse
       ];
       serviceConfig = {
         Type = "simple";
diff --git a/nixos/modules/services/misc/llama-cpp.nix b/nixos/modules/services/misc/llama-cpp.nix
index 4d76456fb2fd5..305d4538e89a0 100644
--- a/nixos/modules/services/misc/llama-cpp.nix
+++ b/nixos/modules/services/misc/llama-cpp.nix
@@ -56,7 +56,7 @@ in {
       serviceConfig = {
         Type = "idle";
         KillSignal = "SIGINT";
-        ExecStart = "${cfg.package}/bin/llama-cpp-server --log-disable --host ${cfg.host} --port ${builtins.toString cfg.port} -m ${cfg.model} ${utils.escapeSystemdExecArgs cfg.extraFlags}";
+        ExecStart = "${cfg.package}/bin/llama-server --log-disable --host ${cfg.host} --port ${builtins.toString cfg.port} -m ${cfg.model} ${utils.escapeSystemdExecArgs cfg.extraFlags}";
         Restart = "on-failure";
         RestartSec = 300;
 
diff --git a/nixos/modules/services/misc/ollama.nix b/nixos/modules/services/misc/ollama.nix
index 3ac3beb4de078..7a5661510e251 100644
--- a/nixos/modules/services/misc/ollama.nix
+++ b/nixos/modules/services/misc/ollama.nix
@@ -13,48 +13,60 @@ in
 {
   options = {
     services.ollama = {
-      enable = lib.mkEnableOption (
-        lib.mdDoc "Server for local large language models"
-      );
+      enable = lib.mkEnableOption "ollama server for local large language models";
+      package = lib.mkPackageOption pkgs "ollama" { };
       listenAddress = lib.mkOption {
         type = types.str;
         default = "127.0.0.1:11434";
-        description = lib.mdDoc ''
-          Specifies the bind address on which the ollama server HTTP interface listens.
+        example = "0.0.0.0:11111";
+        description = ''
+          The address which the ollama server HTTP interface binds and listens to.
         '';
       };
       acceleration = lib.mkOption {
         type = types.nullOr (types.enum [ "rocm" "cuda" ]);
         default = null;
         example = "rocm";
-        description = lib.mdDoc ''
-          Specifies the interface to use for hardware acceleration.
+        description = ''
+          What interface to use for hardware acceleration.
 
           - `rocm`: supported by modern AMD GPUs
           - `cuda`: supported by modern NVIDIA GPUs
         '';
       };
-      package = lib.mkPackageOption pkgs "ollama" { };
+      environmentVariables = lib.mkOption {
+        type = types.attrsOf types.str;
+        default = { };
+        example = {
+          HOME = "/tmp";
+          OLLAMA_LLM_LIBRARY = "cpu";
+        };
+        description = ''
+          Set arbitrary environment variables for the ollama service.
+
+          Be aware that these are only seen by the ollama server (systemd service),
+          not normal invocations like `ollama run`.
+          Since `ollama run` is mostly a shell around the ollama server, this is usually sufficient.
+        '';
+      };
     };
   };
 
   config = lib.mkIf cfg.enable {
-    systemd = {
-      services.ollama = {
-        wantedBy = [ "multi-user.target" ];
-        description = "Server for local large language models";
-        after = [ "network.target" ];
-        environment = {
-          HOME = "%S/ollama";
-          OLLAMA_MODELS = "%S/ollama/models";
-          OLLAMA_HOST = cfg.listenAddress;
-        };
-        serviceConfig = {
-          ExecStart = "${lib.getExe ollamaPackage} serve";
-          WorkingDirectory = "/var/lib/ollama";
-          StateDirectory = [ "ollama" ];
-          DynamicUser = true;
-        };
+    systemd.services.ollama = {
+      description = "Server for local large language models";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment = cfg.environmentVariables // {
+        HOME = "%S/ollama";
+        OLLAMA_MODELS = "%S/ollama/models";
+        OLLAMA_HOST = cfg.listenAddress;
+      };
+      serviceConfig = {
+        ExecStart = "${lib.getExe ollamaPackage} serve";
+        WorkingDirectory = "%S/ollama";
+        StateDirectory = [ "ollama" ];
+        DynamicUser = true;
       };
     };
 
diff --git a/nixos/modules/services/misc/paperless.nix b/nixos/modules/services/misc/paperless.nix
index 9314c4f3848d8..9301d1f687254 100644
--- a/nixos/modules/services/misc/paperless.nix
+++ b/nixos/modules/services/misc/paperless.nix
@@ -27,6 +27,8 @@ let
       name = "paperless_ngx_nltk_data";
       paths = pkg.nltkData;
     };
+  } // optionalAttrs (cfg.openMPThreadingWorkaround) {
+    OMP_NUM_THREADS = "1";
   } // (lib.mapAttrs (_: s:
     if (lib.isAttrs s || lib.isList s) then builtins.toJSON s
     else if lib.isBool s then lib.boolToString s
@@ -199,6 +201,20 @@ in
     };
 
     package = mkPackageOption pkgs "paperless-ngx" { };
+
+    openMPThreadingWorkaround = mkEnableOption ''
+      a workaround for document classifier timeouts.
+
+      Paperless uses OpenBLAS via scikit-learn for document classification.
+
+      The default is to use threading for OpenMP but this would cause the
+      document classifier to spin on one core seemingly indefinitely if there
+      are large amounts of classes per classification; causing it to
+      effectively never complete due to running into timeouts.
+
+      This sets `OMP_NUM_THREADS` to `1` in order to mitigate the issue. See
+      https://github.com/NixOS/nixpkgs/issues/240591 for more information.
+    '' // mkOption { default = true; };
   };
 
   config = mkIf cfg.enable {
diff --git a/nixos/modules/services/misc/tandoor-recipes.nix b/nixos/modules/services/misc/tandoor-recipes.nix
index a8300ecd52337..1b1fde78ad0a5 100644
--- a/nixos/modules/services/misc/tandoor-recipes.nix
+++ b/nixos/modules/services/misc/tandoor-recipes.nix
@@ -20,7 +20,10 @@ let
   manage = pkgs.writeShellScript "manage" ''
     set -o allexport # Export the following env vars
     ${lib.toShellVars env}
-    exec ${pkg}/bin/tandoor-recipes "$@"
+    eval "$(${config.systemd.package}/bin/systemctl show -pUID,GID,MainPID tandoor-recipes.service)"
+    exec ${pkgs.util-linux}/bin/nsenter \
+      -t $MainPID -m -S $UID -G $GID \
+      ${pkg}/bin/tandoor-recipes "$@"
   '';
 in
 {
@@ -82,6 +85,7 @@ in
         Restart = "on-failure";
 
         User = "tandoor_recipes";
+        Group = "tandoor_recipes";
         DynamicUser = true;
         StateDirectory = "tandoor-recipes";
         WorkingDirectory = "/var/lib/tandoor-recipes";
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 6be6ba7edf721..8c5ec2992eda1 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -1,4 +1,4 @@
-{ config, pkgs, lib, options, ... }:
+{ config, pkgs, lib, options, utils, ... }:
 
 let
   inherit (lib) concatStrings foldl foldl' genAttrs literalExpression maintainers
@@ -35,6 +35,7 @@ let
     "dovecot"
     "fastly"
     "flow"
+    "fritz"
     "fritzbox"
     "graphite"
     "idrac"
@@ -94,10 +95,10 @@ let
     "zfs"
   ]
     (name:
-      import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options; }
+      import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options utils; }
     )) // (mapAttrs
     (name: params:
-      import (./. + "/exporters/${params.name}.nix") { inherit config lib pkgs options; type = params.type ; })
+      import (./. + "/exporters/${params.name}.nix") { inherit config lib pkgs options utils; type = params.type ; })
     {
       exportarr-bazarr = {
         name = "exportarr";
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix b/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix
index a8a9f84ea8eaf..de6cda18bc374 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/artifactory.nix b/nixos/modules/services/monitoring/prometheus/exporters/artifactory.nix
index bc67fe59b3b83..b3afdb5966861 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/artifactory.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/artifactory.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/bind.nix b/nixos/modules/services/monitoring/prometheus/exporters/bind.nix
index bd2003f06504e..100446c1a4ebb 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/bind.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/bind.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/bird.nix b/nixos/modules/services/monitoring/prometheus/exporters/bird.nix
index 5f6c36f4c5671..fc52135e3b45e 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/bird.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/bird.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/bitcoin.nix b/nixos/modules/services/monitoring/prometheus/exporters/bitcoin.nix
index 330d541264488..45f00a04a86c5 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/bitcoin.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/bitcoin.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix b/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
index ce2c391de5232..e8399e1bec800 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/buildkite-agent.nix b/nixos/modules/services/monitoring/prometheus/exporters/buildkite-agent.nix
index 0515b72b13f9f..6bfadc3b76320 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/buildkite-agent.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/buildkite-agent.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix b/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix
index f67596f05a3a1..3b2b123bbd078 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix b/nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix
index 437cece588a78..a4a917b473ce0 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix b/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
index ece42a34cb06b..4cfee7c54a41d 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/domain.nix b/nixos/modules/services/monitoring/prometheus/exporters/domain.nix
index 61e2fc80afde3..b2c8e6664c0f6 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/domain.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/domain.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix b/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix
index 6fb438353a4c4..df6b1ef3200cb 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix b/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix
index 8511abbee1bd0..c632b02902627 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options, type }:
+{ config, lib, pkgs, options, type, ... }:
 
 let
   cfg = config.services.prometheus.exporters."exportarr-${type}";
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix b/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix
index 2a8b7fc0818d5..097ea39594788 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix
@@ -2,6 +2,7 @@
 , lib
 , pkgs
 , options
+, ...
 }:
 
 let
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/flow.nix b/nixos/modules/services/monitoring/prometheus/exporters/flow.nix
index 81099aaf17042..42292abeada2e 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/flow.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/flow.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/fritz.nix b/nixos/modules/services/monitoring/prometheus/exporters/fritz.nix
new file mode 100644
index 0000000000000..c3a962b576a5b
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/fritz.nix
@@ -0,0 +1,97 @@
+{ config, lib, pkgs, utils, ... }:
+let
+  inherit (lib) mkOption types mdDoc;
+  cfg = config.services.prometheus.exporters.fritz;
+  yaml = pkgs.formats.yaml { };
+  configFile = yaml.generate "fritz-exporter.yaml" cfg.settings;
+in
+{
+  port = 9787;
+
+  extraOpts = {
+    settings = mkOption {
+      description = mdDoc "Configuration settings for fritz-exporter.";
+      type = types.submodule {
+        freeformType = yaml.type;
+
+        options = {
+          # Pull existing port option into config file.
+          port = mkOption {
+            type = types.port;
+            default = cfg.port;
+            internal = true;
+            visible = false;
+          };
+          # Pull existing listen address option into config file.
+          listen_address = mkOption {
+            type = types.str;
+            default = cfg.listenAddress;
+            internal = true;
+            visible = false;
+          };
+          log_level = mkOption {
+            type = types.enum [ "DEBUG" "INFO" "WARNING" "ERROR" "CRITICAL" ];
+            default = "INFO";
+            description = mdDoc ''
+              Log level to use for the exporter.
+            '';
+          };
+          devices = mkOption {
+            default = [];
+            description = "Fritz!-devices to monitor using the exporter.";
+            type = with types; listOf (submodule {
+              freeformType = yaml.type;
+
+              options = {
+                name = mkOption {
+                  type = types.str;
+                  default = "";
+                  description = mdDoc ''
+                    Name to use for the device.
+                  '';
+                };
+                hostname = mkOption {
+                  type = types.str;
+                  default = "fritz.box";
+                  description = mdDoc ''
+                    Hostname under which the target device is reachable.
+                  '';
+                };
+                username = mkOption {
+                  type = types.str;
+                  description = mdDoc ''
+                    Username to authenticate with the target device.
+                  '';
+                };
+                password_file = mkOption {
+                  type = types.path;
+                  description = mdDoc ''
+                    Path to a file which contains the password to authenticate with the target device.
+                    Needs to be readable by the user the exporter runs under.
+                  '';
+                };
+                host_info = mkOption {
+                  type = types.bool;
+                  description = mdDoc ''
+                    Enable extended host info for this device. *Warning*: This will heavily increase scrape time.
+                  '';
+                  default = false;
+                };
+              };
+            });
+          };
+        };
+      };
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = utils.escapeSystemdExecArgs ([
+        (lib.getExe pkgs.fritz-exporter)
+        "--config" configFile
+      ] ++ cfg.extraFlags);
+      DynamicUser = false;
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix b/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix
index dc53d21406ff2..7b881a8e2693c 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/graphite.nix b/nixos/modules/services/monitoring/prometheus/exporters/graphite.nix
index 34a8871042126..07c06afe14094 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/graphite.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/graphite.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 let
   cfg = config.services.prometheus.exporters.graphite;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/idrac.nix b/nixos/modules/services/monitoring/prometheus/exporters/idrac.nix
index f5604bc00ee04..78ae4826215ce 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/idrac.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/idrac.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 let
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/imap-mailstat.nix b/nixos/modules/services/monitoring/prometheus/exporters/imap-mailstat.nix
index c5024a258e719..68fc63e40fcd1 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/imap-mailstat.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/imap-mailstat.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/influxdb.nix b/nixos/modules/services/monitoring/prometheus/exporters/influxdb.nix
index 61c0c08d2250e..d0d7f16bdadf3 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/influxdb.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/influxdb.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/ipmi.nix b/nixos/modules/services/monitoring/prometheus/exporters/ipmi.nix
index 9adbe31d84d69..fe9734d33c7c7 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/ipmi.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/ipmi.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/jitsi.nix b/nixos/modules/services/monitoring/prometheus/exporters/jitsi.nix
index 0246027186023..bc670ba9cc0e6 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/jitsi.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/jitsi.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/json.nix b/nixos/modules/services/monitoring/prometheus/exporters/json.nix
index 473f3a7e47e39..7f78985d80cd4 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/json.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/json.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/junos-czerwonk.nix b/nixos/modules/services/monitoring/prometheus/exporters/junos-czerwonk.nix
index 15e0c9ecb1778..72119d17fcb71 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/junos-czerwonk.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/junos-czerwonk.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/kea.nix b/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
index 3abb6ff6bdf8b..ccfdd98b8db93 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
@@ -1,7 +1,8 @@
 { config
 , lib
 , pkgs
-, options
+, utils
+, ...
 }:
 
 with lib;
@@ -9,18 +10,22 @@ with lib;
 let
   cfg = config.services.prometheus.exporters.kea;
 in {
+  imports = [
+    (mkRenamedOptionModule [ "controlSocketPaths" ] [ "targets" ])
+  ];
   port = 9547;
   extraOpts = {
-    controlSocketPaths = mkOption {
+    targets = mkOption {
       type = types.listOf types.str;
       example = literalExpression ''
         [
           "/run/kea/kea-dhcp4.socket"
           "/run/kea/kea-dhcp6.socket"
+          "http://127.0.0.1:8547"
         ]
       '';
       description = lib.mdDoc ''
-        Paths to kea control sockets
+        Paths or URLs to the Kea control socket.
       '';
     };
   };
@@ -32,12 +37,11 @@ in {
     serviceConfig = {
       User = "kea";
       DynamicUser = true;
-      ExecStart = ''
-        ${pkgs.prometheus-kea-exporter}/bin/kea-exporter \
-          --address ${cfg.listenAddress} \
-          --port ${toString cfg.port} \
-          ${concatStringsSep " " cfg.controlSocketPaths}
-      '';
+      ExecStart = utils.escapeSystemdExecArgs ([
+        (lib.getExe pkgs.prometheus-kea-exporter)
+        "--address" cfg.listenAddress
+        "--port" cfg.port
+      ] ++ cfg.extraFlags ++ cfg.targets);
       RuntimeDirectory = "kea";
       RuntimeDirectoryPreserve = true;
       RestrictAddressFamilies = [
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix b/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix
index dfa56343b8717..afdb664a0de5e 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/knot.nix b/nixos/modules/services/monitoring/prometheus/exporters/knot.nix
index 7758487508033..0352aff8b0135 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/knot.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/knot.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix b/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix
index 9f914b1dc1464..66d9c02f904b7 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/mail.nix b/nixos/modules/services/monitoring/prometheus/exporters/mail.nix
index 15079f5841f43..8c88f47ab86a0 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/mail.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/mail.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix b/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix
index 54dab4b5581ae..a8dba75251d83 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/minio.nix b/nixos/modules/services/monitoring/prometheus/exporters/minio.nix
index 82cc3fc314f2f..e24d4f766e30e 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/minio.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/minio.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/modemmanager.nix b/nixos/modules/services/monitoring/prometheus/exporters/modemmanager.nix
index 222ea3e5384f6..0eb193c0021f2 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/modemmanager.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/modemmanager.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix b/nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix
index b36a09c609206..1ed6bbf0325d8 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix b/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix
index 849c514de6816..c6da052ccdf30 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 let
   cfg = config.services.prometheus.exporters.mysqld;
   inherit (lib) types mkOption mdDoc mkIf mkForce cli concatStringsSep optionalString escapeShellArgs;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix b/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
index 28a3eb6a134c0..82deea6864e8f 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix b/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
index 88dc79fc2503f..339749226aa45 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/nginxlog.nix b/nixos/modules/services/monitoring/prometheus/exporters/nginxlog.nix
index 674dc9dd41581..b79a034e1384e 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/nginxlog.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/nginxlog.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/node.nix b/nixos/modules/services/monitoring/prometheus/exporters/node.nix
index dd8602e2c63db..9b8a0d2c6bc27 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/node.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/node.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/nut.nix b/nixos/modules/services/monitoring/prometheus/exporters/nut.nix
index e58a394456a38..a14e379079b07 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/nut.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/nut.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/pgbouncer.nix b/nixos/modules/services/monitoring/prometheus/exporters/pgbouncer.nix
index 9e55cadae5237..9587403c78023 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/pgbouncer.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/pgbouncer.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/php-fpm.nix b/nixos/modules/services/monitoring/prometheus/exporters/php-fpm.nix
index 8238f1ac1856e..4ea5f64012c08 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/php-fpm.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/php-fpm.nix
@@ -2,6 +2,7 @@
 , lib
 , pkgs
 , options
+, ...
 }:
 
 let
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/pihole.nix b/nixos/modules/services/monitoring/prometheus/exporters/pihole.nix
index 6f403b3e58c81..4b7eca7493a69 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/pihole.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/pihole.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/ping.nix b/nixos/modules/services/monitoring/prometheus/exporters/ping.nix
index af78b6bef6258..bda5038a0c64a 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/ping.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/ping.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix b/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
index 9f402b123110a..ead8e806f85a8 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix b/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix
index 755d771ecdff4..514b2d0c8f2d9 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/process.nix b/nixos/modules/services/monitoring/prometheus/exporters/process.nix
index 278d6cd780748..86c71a88e28b0 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/process.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/process.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/pve.nix b/nixos/modules/services/monitoring/prometheus/exporters/pve.nix
index 83e740320df2d..96db49d9591f7 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/pve.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/pve.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 let
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/py-air-control.nix b/nixos/modules/services/monitoring/prometheus/exporters/py-air-control.nix
index f03b3c4df9165..60243e0ed0694 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/py-air-control.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/py-air-control.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/redis.nix b/nixos/modules/services/monitoring/prometheus/exporters/redis.nix
index befbcb21f7664..71f94a700efd9 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/redis.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/redis.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/restic.nix b/nixos/modules/services/monitoring/prometheus/exporters/restic.nix
index 977bd42e9812e..12962af5f111f 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/restic.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/restic.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix b/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix
index f9dcfad07d300..8169d4075a9f4 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/rtl_433.nix b/nixos/modules/services/monitoring/prometheus/exporters/rtl_433.nix
index 1f7235cb78303..42b659501161c 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/rtl_433.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/rtl_433.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 let
   cfg = config.services.prometheus.exporters.rtl_433;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix b/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
index b9ab305f7c082..0d937ac6673f8 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 let
   inherit (lib) mkOption types;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/scaphandre.nix b/nixos/modules/services/monitoring/prometheus/exporters/scaphandre.nix
index 3b6ebf65b0903..d4c929d88b9c8 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/scaphandre.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/scaphandre.nix
@@ -2,6 +2,7 @@
 , lib
 , pkgs
 , options
+, ...
 }:
 
 let
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/script.nix b/nixos/modules/services/monitoring/prometheus/exporters/script.nix
index eab0e1d8a6b51..f37fa456d27c5 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/script.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/script.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/shelly.nix b/nixos/modules/services/monitoring/prometheus/exporters/shelly.nix
index b9cfd1b1e84a9..1d2329dfbae18 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/shelly.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/shelly.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/smartctl.nix b/nixos/modules/services/monitoring/prometheus/exporters/smartctl.nix
index 50e1321a1e9ce..1040e9ecadbd5 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/smartctl.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/smartctl.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/smokeping.nix b/nixos/modules/services/monitoring/prometheus/exporters/smokeping.nix
index 459f5842f546f..2bacc9cd7cac8 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/smokeping.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/smokeping.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
index 452cb154bcf61..207446e39f49b 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/sql.nix b/nixos/modules/services/monitoring/prometheus/exporters/sql.nix
index 678bc348679db..dbfa69678a0c9 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/sql.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/sql.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 with lib;
 let
   cfg = config.services.prometheus.exporters.sql;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix b/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix
index d9d732d8c125a..94df86167e8ce 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix b/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix
index b1d6760b40b34..337ebd4ed66fe 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/tor.nix b/nixos/modules/services/monitoring/prometheus/exporters/tor.nix
index 7a9167110a279..b91f69aded3d5 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/tor.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/tor.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix b/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
index f2336429d42fe..2f4444a96c694 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
@@ -2,6 +2,7 @@
 , lib
 , pkgs
 , options
+, ...
 }:
 
 with lib;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix b/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix
index 70f26d9783be5..b7addcd568270 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/unpoller.nix b/nixos/modules/services/monitoring/prometheus/exporters/unpoller.nix
index 3b7f978528cd1..aff1197a8775e 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/unpoller.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/unpoller.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix b/nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix
index a019157c664be..7b21e5fc7cb7b 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix b/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
index a7e5b41dffc66..98fbba82c8e93 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix b/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix
index 9b7590314936e..127c8021a9f0f 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/zfs.nix b/nixos/modules/services/monitoring/prometheus/exporters/zfs.nix
index ff12a52d49a92..21f6354cc4a20 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/zfs.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/zfs.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, options }:
+{ config, lib, pkgs, options, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/monitoring/scrutiny.nix b/nixos/modules/services/monitoring/scrutiny.nix
index aef924ef840cc..031f5a30cada6 100644
--- a/nixos/modules/services/monitoring/scrutiny.nix
+++ b/nixos/modules/services/monitoring/scrutiny.nix
@@ -2,7 +2,7 @@
 let
   inherit (lib) maintainers;
   inherit (lib.meta) getExe;
-  inherit (lib.modules) mkIf;
+  inherit (lib.modules) mkIf mkMerge;
   inherit (lib.options) literalExpression mkEnableOption mkOption mkPackageOption;
   inherit (lib.types) bool enum nullOr port str submodule;
 
@@ -156,43 +156,47 @@ in
     };
   };
 
-  config = mkIf (cfg.enable || cfg.collector.enable) {
-    services.influxdb2.enable = cfg.influxdb.enable;
+  config = mkMerge [
+    (mkIf cfg.enable {
+      services.influxdb2.enable = cfg.influxdb.enable;
 
-    networking.firewall = mkIf cfg.openFirewall {
-      allowedTCPPorts = [ cfg.settings.web.listen.port ];
-    };
-
-    services.smartd = mkIf cfg.collector.enable {
-      enable = true;
-      extraOptions = [
-        "-A /var/log/smartd/"
-        "--interval=600"
-      ];
-    };
+      networking.firewall = mkIf cfg.openFirewall {
+        allowedTCPPorts = [ cfg.settings.web.listen.port ];
+      };
 
-    systemd = {
-      services = {
-        scrutiny = mkIf cfg.enable {
-          description = "Hard Drive S.M.A.R.T Monitoring, Historical Trends & Real World Failure Thresholds";
-          wantedBy = [ "multi-user.target" ];
-          after = [ "network.target" ];
-          environment = {
-            SCRUTINY_VERSION = "1";
-            SCRUTINY_WEB_DATABASE_LOCATION = "/var/lib/scrutiny/scrutiny.db";
-            SCRUTINY_WEB_SRC_FRONTEND_PATH = "${cfg.package}/share/scrutiny";
-          };
-          serviceConfig = {
-            DynamicUser = true;
-            ExecStart = "${getExe cfg.package} start --config ${settingsFormat.generate "scrutiny.yaml" cfg.settings}";
-            Restart = "always";
-            StateDirectory = "scrutiny";
-            StateDirectoryMode = "0750";
-          };
+      systemd.services.scrutiny = {
+        description = "Hard Drive S.M.A.R.T Monitoring, Historical Trends & Real World Failure Thresholds";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ] ++ lib.optional cfg.influxdb.enable "influxdb2.service";
+        wants = lib.optional cfg.influxdb.enable "influxdb2.service";
+        environment = {
+          SCRUTINY_VERSION = "1";
+          SCRUTINY_WEB_DATABASE_LOCATION = "/var/lib/scrutiny/scrutiny.db";
+          SCRUTINY_WEB_SRC_FRONTEND_PATH = "${cfg.package}/share/scrutiny";
         };
+        serviceConfig = {
+          DynamicUser = true;
+          ExecStart = "${getExe cfg.package} start --config ${settingsFormat.generate "scrutiny.yaml" cfg.settings}";
+          Restart = "always";
+          StateDirectory = "scrutiny";
+          StateDirectoryMode = "0750";
+        };
+      };
+    })
+    (mkIf cfg.collector.enable {
+      services.smartd = {
+        enable = true;
+        extraOptions = [
+          "-A /var/log/smartd/"
+          "--interval=600"
+        ];
+      };
 
-        scrutiny-collector = mkIf cfg.collector.enable {
+      systemd = {
+        services.scrutiny-collector = {
           description = "Scrutiny Collector Service";
+          after = lib.optional cfg.enable "scrutiny.service";
+          wants = lib.optional cfg.enable "scrutiny.service";
           environment = {
             COLLECTOR_VERSION = "1";
             COLLECTOR_API_ENDPOINT = cfg.collector.settings.api.endpoint;
@@ -201,20 +205,13 @@ in
             Type = "oneshot";
             ExecStart = "${getExe cfg.collector.package} run --config ${settingsFormat.generate "scrutiny-collector.yaml" cfg.collector.settings}";
           };
+          startAt = cfg.collector.schedule;
         };
-      };
 
-      timers = mkIf cfg.collector.enable {
-        scrutiny-collector = {
-          timerConfig = {
-            OnCalendar = cfg.collector.schedule;
-            Persistent = true;
-            Unit = "scrutiny-collector.service";
-          };
-        };
+        timers.scrutiny-collector.timerConfig.Persistent = true;
       };
-    };
-  };
+    })
+  ];
 
   meta.maintainers = [ maintainers.jnsgruk ];
 }
diff --git a/nixos/modules/services/networking/dnscache.nix b/nixos/modules/services/networking/dnscache.nix
index eff13f69f470a..4f5b77a5b6851 100644
--- a/nixos/modules/services/networking/dnscache.nix
+++ b/nixos/modules/services/networking/dnscache.nix
@@ -86,7 +86,11 @@ in {
 
   config = mkIf config.services.dnscache.enable {
     environment.systemPackages = [ pkgs.djbdns ];
-    users.users.dnscache.isSystemUser = true;
+    users.users.dnscache = {
+        isSystemUser = true;
+        group = "dnscache";
+    };
+    users.groups.dnscache = {};
 
     systemd.services.dnscache = {
       description = "djbdns dnscache server";
diff --git a/nixos/modules/services/networking/dnsproxy.nix b/nixos/modules/services/networking/dnsproxy.nix
new file mode 100644
index 0000000000000..f0be74d7591f4
--- /dev/null
+++ b/nixos/modules/services/networking/dnsproxy.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    escapeShellArgs
+    getExe
+    lists
+    literalExpression
+    maintainers
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    types;
+
+  cfg = config.services.dnsproxy;
+
+  yaml = pkgs.formats.yaml { };
+  configFile = yaml.generate "config.yaml" cfg.settings;
+
+  finalFlags = (lists.optional (cfg.settings != { }) "--config-path=${configFile}") ++ cfg.flags;
+in
+{
+
+  options.services.dnsproxy = {
+
+    enable = mkEnableOption (lib.mdDoc "dnsproxy");
+
+    package = mkPackageOption pkgs "dnsproxy" { };
+
+    settings = mkOption {
+      type = yaml.type;
+      default = { };
+      example = literalExpression ''
+        {
+          bootstrap = [
+            "8.8.8.8:53"
+          ];
+          listen-addrs = [
+            "0.0.0.0"
+          ];
+          listen-ports = [
+            53
+          ];
+          upstream = [
+            "1.1.1.1:53"
+          ];
+        }
+      '';
+      description = mdDoc ''
+        Contents of the `config.yaml` config file.
+        The `--config-path` argument will only be passed if this set is not empty.
+
+        See <https://github.com/AdguardTeam/dnsproxy/blob/master/config.yaml.dist>.
+      '';
+    };
+
+    flags = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--upstream=1.1.1.1:53" ];
+      description = lib.mdDoc ''
+        A list of extra command-line flags to pass to dnsproxy. For details on the
+        available options, see <https://github.com/AdguardTeam/dnsproxy#usage>.
+        Keep in mind that options passed through command-line flags override
+        config options.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.dnsproxy = {
+      description = "Simple DNS proxy with DoH, DoT, DoQ and DNSCrypt support";
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${getExe cfg.package} ${escapeShellArgs finalFlags}";
+        Restart = "always";
+        RestartSec = 10;
+        DynamicUser = true;
+
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        ProtectClock = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [ "@system-service" "~@privileged @resources" ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ diogotcorreia ];
+
+}
diff --git a/nixos/modules/services/networking/microsocks.nix b/nixos/modules/services/networking/microsocks.nix
new file mode 100644
index 0000000000000..be79a8495636f
--- /dev/null
+++ b/nixos/modules/services/networking/microsocks.nix
@@ -0,0 +1,146 @@
+{ config,
+  lib,
+  pkgs,
+  ...
+}:
+
+let
+  cfg = config.services.microsocks;
+
+  cmd =
+    if cfg.execWrapper != null
+    then "${cfg.execWrapper} ${cfg.package}/bin/microsocks"
+    else "${cfg.package}/bin/microsocks";
+  args =
+    [ "-i" cfg.ip "-p" (toString cfg.port) ]
+    ++ lib.optionals (cfg.authOnce) [ "-1" ]
+    ++ lib.optionals (cfg.disableLogging) [ "-q" ]
+    ++ lib.optionals (cfg.outgoingBindIp != null) [ "-b" cfg.outgoingBindIp ]
+    ++ lib.optionals (cfg.authUsername != null) [ "-u" cfg.authUsername ];
+in {
+  options.services.microsocks = {
+    enable = lib.mkEnableOption (lib.mdDoc "Tiny, portable SOCKS5 server with very moderate resource usage");
+    user = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "User microsocks runs as.";
+      type = lib.types.str;
+    };
+    group = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "Group microsocks runs as.";
+      type = lib.types.str;
+    };
+    package = lib.mkPackageOption pkgs "microsocks" {};
+    ip = lib.mkOption {
+      type = lib.types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        IP on which microsocks should listen. Defaults to 127.0.0.1 for
+        security reasons.
+      '';
+    };
+    port = lib.mkOption {
+      type = lib.types.port;
+      default = 1080;
+      description = lib.mdDoc "Port on which microsocks should listen.";
+    };
+    disableLogging = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "If true, microsocks will not log any messages to stdout/stderr.";
+    };
+    authOnce = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If true, once a specific ip address authed successfully with user/pass,
+        it is added to a whitelist and may use the proxy without auth.
+      '';
+    };
+    outgoingBindIp = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc "Specifies which ip outgoing connections are bound to";
+    };
+    authUsername = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = "alice";
+      description = lib.mdDoc "Optional username to use for authentication.";
+    };
+    authPasswordFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      example = "/run/secrets/microsocks-password";
+      description = lib.mdDoc "Path to a file containing the password for authentication.";
+    };
+    execWrapper = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = ''
+        ''${pkgs.mullvad-vpn}/bin/mullvad-exclude
+      '';
+      description = lib.mdDoc ''
+        An optional command to prepend to the microsocks command (such as proxychains, or a VPN exclude command).
+      '';
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.authUsername != null) == (cfg.authPasswordFile != null);
+        message = "Need to set both authUsername and authPasswordFile for microsocks";
+      }
+    ];
+    users = {
+      users = lib.mkIf (cfg.user == "microsocks") {
+        microsocks = {
+          group = cfg.group;
+          isSystemUser = true;
+        };
+      };
+      groups = lib.mkIf (cfg.group == "microsocks") {
+        microsocks = {};
+      };
+    };
+    systemd.services.microsocks = {
+      enable = true;
+      description = "a tiny socks server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "on-failure";
+        RestartSec = 10;
+        LoadCredential = lib.optionalString (cfg.authPasswordFile != null) "MICROSOCKS_PASSWORD_FILE:${cfg.authPasswordFile}";
+        MemoryDenyWriteExecute = true;
+        SystemCallArchitectures = "native";
+        PrivateTmp = true;
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        PrivateDevices = true;
+        RestrictSUIDSGID = true;
+        RestrictNamespaces = [
+          "cgroup"
+          "ipc"
+          "pid"
+          "user"
+          "uts"
+        ];
+      };
+      script =
+        if cfg.authPasswordFile != null
+        then ''
+          PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/MICROSOCKS_PASSWORD_FILE")
+          ${cmd} ${lib.escapeShellArgs args} -P "$PASSWORD"
+        ''
+        else ''
+          ${cmd} ${lib.escapeShellArgs args}
+        '';
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/mihomo.nix b/nixos/modules/services/networking/mihomo.nix
new file mode 100644
index 0000000000000..ae700603b5290
--- /dev/null
+++ b/nixos/modules/services/networking/mihomo.nix
@@ -0,0 +1,118 @@
+# NOTE:
+# cfg.configFile contains secrets such as proxy servers' credential!
+# we dont want plaintext secrets in world-readable `/nix/store`.
+
+{ lib
+, config
+, pkgs
+, ...
+}:
+let
+  cfg = config.services.mihomo;
+in
+{
+  options.services.mihomo = {
+    enable = lib.mkEnableOption "Mihomo, A rule-based proxy in Go.";
+
+    package = lib.mkPackageOption pkgs "mihomo" { };
+
+    configFile = lib.mkOption {
+      default = null;
+      type = lib.types.nullOr lib.types.path;
+      description = "Configuration file to use.";
+    };
+
+    webui = lib.mkOption {
+      default = null;
+      type = lib.types.nullOr lib.types.path;
+      description = ''
+        Local web interface to use.
+
+        You can also use the following website, just in case:
+        - metacubexd:
+          - https://d.metacubex.one
+          - https://metacubex.github.io/metacubexd
+          - https://metacubexd.pages.dev
+        - yacd:
+          - https://yacd.haishan.me
+        - clash-dashboard (buggy):
+          - https://clash.razord.top
+      '';
+    };
+
+    extraOpts = lib.mkOption {
+      default = null;
+      type = lib.types.nullOr lib.types.str;
+      description = "Extra command line options to use.";
+    };
+
+    tunMode = lib.mkEnableOption ''
+      necessary permission for Mihomo's systemd service for TUN mode to function properly.
+
+      Keep in mind, that you still need to enable TUN mode manually in Mihomo's configuration.
+    '';
+  };
+
+  config = lib.mkIf cfg.enable {
+    ### systemd service
+    systemd.services."mihomo" = {
+      description = "Mihomo daemon, A rule-based proxy in Go.";
+      documentation = [ "https://wiki.metacubex.one/" ];
+      requires = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig =
+        {
+          ExecStart = lib.concatStringsSep " " [
+            (lib.getExe cfg.package)
+            "-d /var/lib/private/mihomo"
+            (lib.optionalString (cfg.configFile != null) "-f \${CREDENTIALS_DIRECTORY}/config.yaml")
+            (lib.optionalString (cfg.webui != null) "-ext-ui ${cfg.webui}")
+            (lib.optionalString (cfg.extraOpts != null) cfg.extraOpts)
+          ];
+
+          DynamicUser = true;
+          StateDirectory = "mihomo";
+          LoadCredential = "config.yaml:${cfg.configFile}";
+
+          ### Hardening
+          AmbientCapabilities = "";
+          CapabilityBoundingSet = "";
+          DeviceAllow = "";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateMounts = true;
+          PrivateTmp = true;
+          PrivateUsers = true;
+          ProcSubset = "pid";
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          ProtectSystem = "strict";
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          RestrictNamespaces = true;
+          RestrictAddressFamilies = "AF_INET AF_INET6";
+          SystemCallArchitectures = "native";
+          SystemCallFilter = "@system-service bpf";
+          UMask = "0077";
+        }
+        // lib.optionalAttrs cfg.tunMode {
+          AmbientCapabilities = "CAP_NET_ADMIN";
+          CapabilityBoundingSet = "CAP_NET_ADMIN";
+          PrivateDevices = false;
+          PrivateUsers = false;
+          RestrictAddressFamilies = "AF_INET AF_INET6 AF_NETLINK";
+        };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ Guanran928 ];
+}
diff --git a/nixos/modules/services/networking/murmur.nix b/nixos/modules/services/networking/murmur.nix
index 5805f332a66fe..1fb5063e5ad8d 100644
--- a/nixos/modules/services/networking/murmur.nix
+++ b/nixos/modules/services/networking/murmur.nix
@@ -33,7 +33,7 @@ let
     sendversion=${boolToString cfg.sendVersion}
 
     ${optionalString (cfg.registerName != "") "registerName=${cfg.registerName}"}
-    ${optionalString (cfg.registerPassword == "") "registerPassword=${cfg.registerPassword}"}
+    ${optionalString (cfg.registerPassword != "") "registerPassword=${cfg.registerPassword}"}
     ${optionalString (cfg.registerUrl != "") "registerUrl=${cfg.registerUrl}"}
     ${optionalString (cfg.registerHostname != "") "registerHostname=${cfg.registerHostname}"}
 
diff --git a/nixos/modules/services/networking/mycelium.nix b/nixos/modules/services/networking/mycelium.nix
new file mode 100644
index 0000000000000..9c4bca7c68618
--- /dev/null
+++ b/nixos/modules/services/networking/mycelium.nix
@@ -0,0 +1,133 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.mycelium;
+in
+{
+  options.services.mycelium = {
+    enable = lib.mkEnableOption "mycelium network";
+    peers = lib.mkOption {
+      type = lib.types.listOf lib.types.str;
+      description = ''
+        List of peers to connect to, in the formats:
+         - `quic://[2001:0db8::1]:9651`
+         - `quic://192.0.2.1:9651`
+         - `tcp://[2001:0db8::1]:9651`
+         - `tcp://192.0.2.1:9651`
+
+        If addHostedPublicNodes is set to true, the hosted public nodes will also be added.
+      '';
+      default = [ ];
+    };
+    keyFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      description = ''
+        Optional path to a file containing the mycelium key material.
+        If unset, the default location (`/var/lib/mycelium/key.bin`) will be used.
+        If no key exist at this location, it will be generated on startup.
+      '';
+    };
+    openFirewall = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = "Open the firewall for mycelium";
+    };
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.mycelium;
+      defaultText = lib.literalExpression ''"''${pkgs.mycelium}"'';
+      description = "The mycelium package to use";
+    };
+    addHostedPublicNodes = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = ''
+        Adds the hosted peers from https://github.com/threefoldtech/mycelium#hosted-public-nodes.
+      '';
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = lib.optionals cfg.openFirewall [ 9651 ];
+    networking.firewall.allowedUDPPorts = lib.optionals cfg.openFirewall [ 9650 9651 ];
+
+    systemd.services.mycelium = {
+      description = "Mycelium network";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [
+        cfg.keyFile
+      ];
+
+      unitConfig.Documentation = "https://github.com/threefoldtech/mycelium";
+
+      serviceConfig = {
+        User = "mycelium";
+        DynamicUser = true;
+        StateDirectory = "mycelium";
+        ProtectHome = true;
+        ProtectSystem = true;
+        LoadCredential = lib.mkIf (cfg.keyFile != null) "keyfile:${cfg.keyFile}";
+        SyslogIdentifier = "mycelium";
+        AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+        MemoryDenyWriteExecute = true;
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged @keyring" ];
+        ExecStart = lib.concatStringsSep " " ([
+          (lib.getExe cfg.package)
+          (if (cfg.keyFile != null) then
+            "--key-file \${CREDENTIALS_DIRECTORY}/keyfile" else
+            "--key-file %S/mycelium/key.bin"
+          )
+          "--tun-name"
+          "mycelium"
+        ] ++
+        (lib.optional (cfg.addHostedPublicNodes || cfg.peers != [ ]) "--peers")
+        ++ cfg.peers ++ (lib.optionals cfg.addHostedPublicNodes [
+          "tcp://188.40.132.242:9651" # DE 01
+          "tcp://[2a01:4f8:221:1e0b::2]:9651"
+          "quic://188.40.132.242:9651"
+          "quic://[2a01:4f8:221:1e0b::2]:9651"
+
+          "tcp://136.243.47.186:9651" # DE 02
+          "tcp://[2a01:4f8:212:fa6::2]:9651"
+          "quic://136.243.47.186:9651"
+          "quic://[2a01:4f8:212:fa6::2]:9651"
+
+          "tcp://185.69.166.7:9651" # BE 03
+          "tcp://[2a02:1802:5e:0:8478:51ff:fee2:3331]:9651"
+          "quic://185.69.166.7:9651"
+          "quic://[2a02:1802:5e:0:8478:51ff:fee2:3331]:9651"
+
+          "tcp://185.69.166.8:9651" # BE 04
+          "tcp://[2a02:1802:5e:0:8c9e:7dff:fec9:f0d2]:9651"
+          "quic://185.69.166.8:9651"
+          "quic://[2a02:1802:5e:0:8c9e:7dff:fec9:f0d2]:9651"
+
+          "tcp://65.21.231.58:9651" # FI 05
+          "tcp://[2a01:4f9:6a:1dc5::2]:9651"
+          "quic://65.21.231.58:9651"
+          "quic://[2a01:4f9:6a:1dc5::2]:9651"
+
+          "tcp://65.109.18.113:9651" # FI 06
+          "tcp://[2a01:4f9:5a:1042::2]:9651"
+          "quic://65.109.18.113:9651"
+          "quic://[2a01:4f9:5a:1042::2]:9651"
+        ]));
+        Restart = "always";
+        RestartSec = 5;
+        TimeoutStopSec = 5;
+      };
+    };
+  };
+  meta = {
+    maintainers = with lib.maintainers; [ flokli lassulus ];
+  };
+}
+
diff --git a/nixos/modules/services/networking/nebula.nix b/nixos/modules/services/networking/nebula.nix
index e13876172dac6..2f9e41ae9c801 100644
--- a/nixos/modules/services/networking/nebula.nix
+++ b/nixos/modules/services/networking/nebula.nix
@@ -10,6 +10,15 @@ let
   format = pkgs.formats.yaml {};
 
   nameToId = netName: "nebula-${netName}";
+
+  resolveFinalPort = netCfg:
+    if netCfg.listen.port == null then
+      if (netCfg.isLighthouse || netCfg.isRelay) then
+        4242
+      else
+        0
+    else
+      netCfg.listen.port;
 in
 {
   # Interface
@@ -95,8 +104,15 @@ in
             };
 
             listen.port = mkOption {
-              type = types.port;
-              default = 4242;
+              type = types.nullOr types.port;
+              default = null;
+              defaultText = lib.literalExpression ''
+                if (config.services.nebula.networks.''${name}.isLighthouse ||
+                    config.services.nebula.networks.''${name}.isRelay) then
+                  4242
+                else
+                  0;
+              '';
               description = lib.mdDoc "Port number to listen on.";
             };
 
@@ -174,7 +190,7 @@ in
           };
           listen = {
             host = netCfg.listen.host;
-            port = netCfg.listen.port;
+            port = resolveFinalPort netCfg;
           };
           tun = {
             disabled = netCfg.tun.disable;
@@ -185,7 +201,15 @@ in
             outbound = netCfg.firewall.outbound;
           };
         } netCfg.settings;
-        configFile = format.generate "nebula-config-${netName}.yml" settings;
+        configFile = format.generate "nebula-config-${netName}.yml" (
+          warnIf
+            ((settings.lighthouse.am_lighthouse || settings.relay.am_relay) && settings.listen.port == 0)
+            ''
+              Nebula network '${netName}' is configured as a lighthouse or relay, and its port is ${builtins.toString settings.listen.port}.
+              You will likely experience connectivity issues: https://nebula.defined.net/docs/config/listen/#listenport
+            ''
+            settings
+          );
         in
         {
           # Create the systemd service for Nebula.
@@ -229,7 +253,7 @@ in
 
     # Open the chosen ports for UDP.
     networking.firewall.allowedUDPPorts =
-      unique (mapAttrsToList (netName: netCfg: netCfg.listen.port) enabledNetworks);
+      unique (filter (port: port > 0) (mapAttrsToList (netName: netCfg: resolveFinalPort netCfg) enabledNetworks));
 
     # Create the service users and groups.
     users.users = mkMerge (mapAttrsToList (netName: netCfg:
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index c96439cf2641a..573a02cbda9e1 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -101,7 +101,23 @@ let
     pre-down = "pre-down.d/";
   };
 
-  macAddressOpt = mkOption {
+  macAddressOptWifi = mkOption {
+    type = types.either types.str (types.enum [ "permanent" "preserve" "random" "stable" "stable-ssid" ]);
+    default = "preserve";
+    example = "00:11:22:33:44:55";
+    description = lib.mdDoc ''
+      Set the MAC address of the interface.
+
+      - `"XX:XX:XX:XX:XX:XX"`: MAC address of the interface
+      - `"permanent"`: Use the permanent MAC address of the device
+      - `"preserve"`: Don’t change the MAC address of the device upon activation
+      - `"random"`: Generate a randomized value upon each connect
+      - `"stable"`: Generate a stable, hashed MAC address
+      - `"stable-ssid"`: Generate a stable MAC addressed based on Wi-Fi network
+    '';
+  };
+
+  macAddressOptEth = mkOption {
     type = types.either types.str (types.enum [ "permanent" "preserve" "random" "stable" ]);
     default = "preserve";
     example = "00:11:22:33:44:55";
@@ -258,10 +274,10 @@ in
         '';
       };
 
-      ethernet.macAddress = macAddressOpt;
+      ethernet.macAddress = macAddressOptEth;
 
       wifi = {
-        macAddress = macAddressOpt;
+        macAddress = macAddressOptWifi;
 
         backend = mkOption {
           type = types.enum [ "wpa_supplicant" "iwd" ];
@@ -291,7 +307,7 @@ in
       };
 
       dns = mkOption {
-        type = types.enum [ "default" "dnsmasq" "unbound" "systemd-resolved" "none" ];
+        type = types.enum [ "default" "dnsmasq" "systemd-resolved" "none" ];
         default = "default";
         description = lib.mdDoc ''
           Set the DNS (`resolv.conf`) processing mode.
@@ -436,6 +452,7 @@ in
             And if you edit a declarative profile NetworkManager will move it to the persistent storage and treat it like a ad-hoc one,
             but there will be two profiles as soon as the systemd unit from this option runs again which can be confusing since NetworkManager tools will start displaying two profiles with the same name and probably a bit different settings depending on what you edited.
             A profile won't be deleted even if it's removed from the config until the system reboots because that's when NetworkManager clears it's temp directory.
+            If `networking.resolvconf.enable` is true, attributes affecting the name resolution (such as `ignore-auto-dns`) may not end up changing `/etc/resolv.conf` as expected when other name services (for example `networking.dhcpcd`) are enabled. Run `resolvconf -l` in the terminal to see what each service produces.
           '';
         };
         environmentFiles = mkOption {
@@ -583,6 +600,7 @@ in
       description = "Ensure that NetworkManager declarative profiles are created";
       wantedBy = [ "multi-user.target" ];
       before = [ "network-online.target" ];
+      after = [ "NetworkManager.service" ];
       script = let
         path = id: "/run/NetworkManager/system-connections/${id}.nmconnection";
       in ''
@@ -592,9 +610,7 @@ in
           ${pkgs.envsubst}/bin/envsubst -i ${ini.generate (lib.escapeShellArg profile.n) profile.v} > ${path (lib.escapeShellArg profile.n)}
         '') (lib.mapAttrsToList (n: v: { inherit n v; }) cfg.ensureProfiles.profiles)
       + ''
-        if systemctl is-active --quiet NetworkManager; then
-          ${pkgs.networkmanager}/bin/nmcli connection reload
-        fi
+        ${pkgs.networkmanager}/bin/nmcli connection reload
       '';
       serviceConfig = {
         EnvironmentFile = cfg.ensureProfiles.environmentFiles;
diff --git a/nixos/modules/services/networking/scion/scion-control.nix b/nixos/modules/services/networking/scion/scion-control.nix
new file mode 100644
index 0000000000000..fdf3a9ba3cc15
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion-control.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-control;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "cs";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    beacon_db = {
+      connection = "/var/lib/scion-control/control.beacon.db";
+    };
+    path_db = {
+      connection = "/var/lib/scion-control/control.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-control/control.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-control.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-control = {
+    enable = mkEnableOption (lib.mdDoc "the scion-control service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-control/control.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-control configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-control = {
+      description = "SCION Control Service";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = if (config.services.scion.scion-dispatcher.enable == true) then "scion" else null;
+        ExecStart = "${pkgs.scion}/bin/scion-control --config ${configFile}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        StateDirectory = "scion-control";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/scion/scion-daemon.nix b/nixos/modules/services/networking/scion/scion-daemon.nix
new file mode 100644
index 0000000000000..0bcc18771fc3c
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion-daemon.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-daemon;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "sd";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    path_db = {
+      connection = "/var/lib/scion-daemon/sd.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-daemon/sd.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-daemon.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-daemon = {
+    enable = mkEnableOption (lib.mdDoc "the scion-daemon service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-daemon/sd.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-daemon configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-daemon = {
+      description = "SCION Daemon";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-daemon --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-daemon";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/scion/scion-dispatcher.nix b/nixos/modules/services/networking/scion/scion-dispatcher.nix
new file mode 100644
index 0000000000000..bab1ec0a989b5
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion-dispatcher.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-dispatcher;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    dispatcher = {
+      id = "dispatcher";
+      socket_file_mode = "0770";
+      application_socket = "/dev/shm/dispatcher/default.sock";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-dispatcher.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-dispatcher = {
+    enable = mkEnableOption (lib.mdDoc "the scion-dispatcher service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          dispatcher = {
+            id = "dispatcher";
+            socket_file_mode = "0770";
+            application_socket = "/dev/shm/dispatcher/default.sock";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-dispatcher configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    # Needed for group ownership of the dispatcher socket
+    users.groups.scion = {};
+
+    # scion programs hardcode path to dispatcher in /run/shm, and is not
+    # configurable at runtime upstream plans to obsolete the dispatcher in
+    # favor of an SCMP daemon, at which point this can be removed.
+    system.activationScripts.scion-dispatcher = ''
+      ln -sf /dev/shm /run/shm
+    '';
+
+    systemd.services.scion-dispatcher = {
+      description = "SCION Dispatcher";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = "scion";
+        DynamicUser = true;
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        ExecStartPre = "${pkgs.coreutils}/bin/rm -rf /run/shm/dispatcher";
+        ExecStart = "${pkgs.scion}/bin/scion-dispatcher --config ${configFile}";
+        Restart = "on-failure";
+        StateDirectory = "scion-dispatcher";
+      };
+    };
+  };
+}
+
diff --git a/nixos/modules/services/networking/scion/scion-router.nix b/nixos/modules/services/networking/scion/scion-router.nix
new file mode 100644
index 0000000000000..cbe83c6dbf8d1
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion-router.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-router;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "br";
+      config_dir = "/etc/scion";
+    };
+  };
+  configFile = toml.generate "scion-router.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-router = {
+    enable = mkEnableOption (lib.mdDoc "the scion-router service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          general.id = "br";
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-router configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-router = {
+      description = "SCION Router";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-router --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-router";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/scion/scion.nix b/nixos/modules/services/networking/scion/scion.nix
new file mode 100644
index 0000000000000..704f942b5d9e3
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion.nix
@@ -0,0 +1,39 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion;
+in
+{
+  options.services.scion = {
+    enable = mkEnableOption (lib.mdDoc "all of the scion components and services");
+    bypassBootstrapWarning = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        bypass Nix warning about SCION PKI bootstrapping
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    services.scion = {
+      scion-dispatcher.enable = true;
+      scion-daemon.enable = true;
+      scion-router.enable = true;
+      scion-control.enable = true;
+    };
+    assertions = [
+      { assertion = cfg.bypassBootstrapWarning == true;
+        message = ''
+          SCION is a routing protocol and requires bootstrapping with a manual, imperative key signing ceremony. You may want to join an existing Isolation Domain (ISD) such as scionlab.org, or bootstrap your own. If you have completed and configured the public key infrastructure for SCION and are sure this process is complete, then add the following to your configuration:
+
+          services.scion.bypassBootstrapWarning = true;
+
+          refer to docs.scion.org for more information
+        '';
+      }
+    ];
+  };
+}
+
diff --git a/nixos/modules/services/networking/tinyproxy.nix b/nixos/modules/services/networking/tinyproxy.nix
index 8ff12b52f10ca..2b7509e99ca4d 100644
--- a/nixos/modules/services/networking/tinyproxy.nix
+++ b/nixos/modules/services/networking/tinyproxy.nix
@@ -7,6 +7,7 @@ let
   mkValueStringTinyproxy = with lib; v:
         if true  ==         v then "yes"
         else if false ==    v then "no"
+        else if types.path.check v then ''"${v}"''
         else generators.mkValueStringDefault {} v;
   mkKeyValueTinyproxy = {
     mkValueString ? mkValueStringDefault {}
diff --git a/nixos/modules/services/networking/unbound.nix b/nixos/modules/services/networking/unbound.nix
index 8438e472e11ee..242fcd500bb0b 100644
--- a/nixos/modules/services/networking/unbound.nix
+++ b/nixos/modules/services/networking/unbound.nix
@@ -76,12 +76,13 @@ in {
 
       checkconf = mkOption {
         type = types.bool;
-        default = !cfg.settings ? include;
-        defaultText = "!config.services.unbound.settings ? include";
+        default = !cfg.settings ? include && !cfg.settings ? remote-control;
+        defaultText = "!services.unbound.settings ? include && !services.unbound.settings ? remote-control";
         description = lib.mdDoc ''
           Wether to check the resulting config file with unbound checkconf for syntax errors.
 
-          If settings.include is used, then this options is disabled, as the import can likely not be resolved at build time.
+          If settings.include is used, this options is disabled, as the import can likely not be accessed at build time.
+          If settings.remote-control is used, this option is disabled, too as the control-key-file, server-cert-file and server-key-file cannot be accessed at build time.
         '';
       };
 
@@ -229,8 +230,6 @@ in {
       resolvconf = {
         useLocalResolver = mkDefault true;
       };
-
-      networkmanager.dns = "unbound";
     };
 
     environment.etc."unbound/unbound.conf".source = confFile;
diff --git a/nixos/modules/services/security/esdm.nix b/nixos/modules/services/security/esdm.nix
index 134b4be1a94c8..c34fba1b3c75b 100644
--- a/nixos/modules/services/security/esdm.nix
+++ b/nixos/modules/services/security/esdm.nix
@@ -4,49 +4,33 @@ let
   cfg = config.services.esdm;
 in
 {
+  imports = [
+    # removed option 'services.esdm.cuseRandomEnable'
+    (lib.mkRemovedOptionModule [ "services" "esdm" "cuseRandomEnable" ] ''
+      Use services.esdm.enableLinuxCompatServices instead.
+    '')
+    # removed option 'services.esdm.cuseUrandomEnable'
+    (lib.mkRemovedOptionModule [ "services" "esdm" "cuseUrandomEnable" ] ''
+      Use services.esdm.enableLinuxCompatServices instead.
+    '')
+    # removed option 'services.esdm.procEnable'
+    (lib.mkRemovedOptionModule [ "services" "esdm" "procEnable" ] ''
+      Use services.esdm.enableLinuxCompatServices instead.
+    '')
+    # removed option 'services.esdm.verbose'
+    (lib.mkRemovedOptionModule [ "services" "esdm" "verbose" ] ''
+      There is no replacement.
+    '')
+  ];
+
   options.services.esdm = {
     enable = lib.mkEnableOption (lib.mdDoc "ESDM service configuration");
     package = lib.mkPackageOption pkgs "esdm" { };
-    serverEnable = lib.mkOption {
-      type = lib.types.bool;
-      default = true;
-      description = lib.mdDoc ''
-        Enable option for ESDM server service. If serverEnable == false, then the esdm-server
-        will not start. Also the subsequent services esdm-cuse-random, esdm-cuse-urandom
-        and esdm-proc will not start as these have the entry Want=esdm-server.service.
-      '';
-    };
-    cuseRandomEnable = lib.mkOption {
-      type = lib.types.bool;
-      default = true;
-      description = lib.mdDoc ''
-        Enable option for ESDM cuse-random service. Determines if the esdm-cuse-random.service
-        is started.
-      '';
-    };
-    cuseUrandomEnable = lib.mkOption {
-      type = lib.types.bool;
-      default = true;
-      description = lib.mdDoc ''
-        Enable option for ESDM cuse-urandom service. Determines if the esdm-cuse-urandom.service
-        is started.
-      '';
-    };
-    procEnable = lib.mkOption {
+    enableLinuxCompatServices = lib.mkOption {
       type = lib.types.bool;
       default = true;
       description = lib.mdDoc ''
-        Enable option for ESDM proc service. Determines if the esdm-proc.service
-        is started.
-      '';
-    };
-    verbose = lib.mkOption {
-      type = lib.types.bool;
-      default = false;
-      description = lib.mdDoc ''
-        Enable verbose ExecStart for ESDM. If verbose == true, then the corresponding "ExecStart"
-        values of the 4 aforementioned services are overwritten with the option
-        for the highest verbosity.
+        Enable /dev/random, /dev/urandom and /proc/sys/kernel/random/* userspace wrapper.
       '';
     };
   };
@@ -55,46 +39,13 @@ in
     lib.mkMerge [
       ({
         systemd.packages = [ cfg.package ];
-      })
-      # It is necessary to set those options for these services to be started by systemd in NixOS
-      (lib.mkIf cfg.serverEnable {
         systemd.services."esdm-server".wantedBy = [ "basic.target" ];
-        systemd.services."esdm-server".serviceConfig = lib.mkIf cfg.verbose {
-          ExecStart = [
-            " " # unset previous value defined in 'esdm-server.service'
-            "${cfg.package}/bin/esdm-server -f -vvvvvv"
-          ];
-        };
-      })
-
-      (lib.mkIf cfg.cuseRandomEnable {
-        systemd.services."esdm-cuse-random".wantedBy = [ "basic.target" ];
-        systemd.services."esdm-cuse-random".serviceConfig = lib.mkIf cfg.verbose {
-          ExecStart = [
-            " " # unset previous value defined in 'esdm-cuse-random.service'
-            "${cfg.package}/bin/esdm-cuse-random -f -v 6"
-          ];
-        };
       })
-
-      (lib.mkIf cfg.cuseUrandomEnable {
-        systemd.services."esdm-cuse-urandom".wantedBy = [ "basic.target" ];
-        systemd.services."esdm-cuse-urandom".serviceConfig = lib.mkIf cfg.verbose {
-          ExecStart = [
-            " " # unset previous value defined in 'esdm-cuse-urandom.service'
-            "${config.services.esdm.package}/bin/esdm-cuse-urandom -f -v 6"
-          ];
-        };
-      })
-
-      (lib.mkIf cfg.procEnable {
-        systemd.services."esdm-proc".wantedBy = [ "basic.target" ];
-        systemd.services."esdm-proc".serviceConfig = lib.mkIf cfg.verbose {
-          ExecStart = [
-            " " # unset previous value defined in 'esdm-proc.service'
-            "${cfg.package}/bin/esdm-proc --relabel -f -o allow_other /proc/sys/kernel/random -v 6"
-          ];
-        };
+      # It is necessary to set those options for these services to be started by systemd in NixOS
+      (lib.mkIf cfg.enableLinuxCompatServices {
+        systemd.targets."esdm-linux-compat".wantedBy = [ "basic.target" ];
+        systemd.services."esdm-server-suspend".wantedBy = [ "sleep.target" "suspend.target" "hibernate.target" ];
+        systemd.services."esdm-server-resume".wantedBy = [ "sleep.target" "suspend.target" "hibernate.target" ];
       })
     ]);
 
diff --git a/nixos/modules/services/security/oauth2_proxy_nginx.nix b/nixos/modules/services/security/oauth2_proxy_nginx.nix
index b8e45f67cf783..dd3ded6259c4a 100644
--- a/nixos/modules/services/security/oauth2_proxy_nginx.nix
+++ b/nixos/modules/services/security/oauth2_proxy_nginx.nix
@@ -13,6 +13,17 @@ in
         The address of the reverse proxy endpoint for oauth2_proxy
       '';
     };
+
+    domain = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The domain under which the oauth2_proxy will be accesible and the path of cookies are set to.
+        This setting must be set to ensure back-redirects are working properly
+        if oauth2-proxy is configured with {option}`services.oauth2_proxy.cookie.domain`
+        or multiple {option}`services.oauth2_proxy.nginx.virtualHosts` that are not on the same domain.
+      '';
+    };
+
     virtualHosts = mkOption {
       type = types.listOf types.str;
       default = [];
@@ -21,22 +32,26 @@ in
       '';
     };
   };
+
   config.services.oauth2_proxy = mkIf (cfg.virtualHosts != [] && (hasPrefix "127.0.0.1:" cfg.proxy)) {
     enable = true;
   };
-  config.services.nginx = mkIf config.services.oauth2_proxy.enable (mkMerge
-  ((optional (cfg.virtualHosts != []) {
-    recommendedProxySettings = true; # needed because duplicate headers
-  }) ++ (map (vhost: {
-    virtualHosts.${vhost} = {
-      locations."/oauth2/" = {
+
+  config.services.nginx = mkIf (cfg.virtualHosts != [] && config.services.oauth2_proxy.enable) (mkMerge ([
+    {
+      virtualHosts.${cfg.domain}.locations."/oauth2/" = {
         proxyPass = cfg.proxy;
         extraConfig = ''
           proxy_set_header X-Scheme                $scheme;
           proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
         '';
       };
-      locations."/oauth2/auth" = {
+    }
+  ] ++ optional (cfg.virtualHosts != []) {
+    recommendedProxySettings = true; # needed because duplicate headers
+  } ++ (map (vhost: {
+    virtualHosts.${vhost}.locations = {
+      "/oauth2/auth" = {
         proxyPass = cfg.proxy;
         extraConfig = ''
           proxy_set_header X-Scheme         $scheme;
@@ -45,9 +60,10 @@ in
           proxy_pass_request_body           off;
         '';
       };
-      locations."/".extraConfig = ''
+      "@redirectToAuth2ProxyLogin".return = "307 https://${cfg.domain}/oauth2/start?rd=$scheme://$host$request_uri";
+      "/".extraConfig = ''
         auth_request /oauth2/auth;
-        error_page 401 = /oauth2/sign_in;
+        error_page 401 = @redirectToAuth2ProxyLogin;
 
         # pass information via X-User and X-Email headers to backend,
         # requires running with --set-xauthrequest flag
@@ -60,7 +76,6 @@ in
         auth_request_set $auth_cookie $upstream_http_set_cookie;
         add_header Set-Cookie $auth_cookie;
       '';
-
     };
   }) cfg.virtualHosts)));
 }
diff --git a/nixos/modules/services/security/yubikey-agent.nix b/nixos/modules/services/security/yubikey-agent.nix
index bf8432a00238f..3d5f84af2cf48 100644
--- a/nixos/modules/services/security/yubikey-agent.nix
+++ b/nixos/modules/services/security/yubikey-agent.nix
@@ -37,7 +37,7 @@ in
 
     # This overrides the systemd user unit shipped with the
     # yubikey-agent package
-    systemd.user.services.yubikey-agent = mkIf (pinentryFlavor != null) {
+    systemd.user.services.yubikey-agent = mkIf (config.programs.gnupg.agent.pinentryPackage != null) {
       path = [ config.programs.gnupg.agent.pinentryPackage ];
       wantedBy = [ "default.target" ];
     };
diff --git a/nixos/modules/services/video/photonvision.nix b/nixos/modules/services/video/photonvision.nix
new file mode 100644
index 0000000000000..fdbe9da3999da
--- /dev/null
+++ b/nixos/modules/services/video/photonvision.nix
@@ -0,0 +1,64 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.photonvision;
+in
+{
+  options = {
+    services.photonvision = {
+      enable = lib.mkEnableOption (lib.mdDoc "Enable PhotonVision");
+
+      package = lib.mkPackageOption pkgs "photonvision" {};
+
+      openFirewall = lib.mkOption {
+        description = lib.mdDoc ''
+          Whether to open the required ports in the firewall.
+        '';
+        default = false;
+        type = lib.types.bool;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.photonvision = {
+      description = "PhotonVision, the free, fast, and easy-to-use computer vision solution for the FIRST Robotics Competition";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = lib.getExe cfg.package;
+
+        # ephemeral root directory
+        RuntimeDirectory = "photonvision";
+        RootDirectory = "/run/photonvision";
+
+        # setup persistent state and logs directories
+        StateDirectory = "photonvision";
+        LogsDirectory = "photonvision";
+
+        BindReadOnlyPaths = [
+          # mount the nix store read-only
+          "/nix/store"
+
+          # the JRE reads the user.home property from /etc/passwd
+          "/etc/passwd"
+        ];
+        BindPaths = [
+          # mount the configuration and logs directories to the host
+          "/var/lib/photonvision:/photonvision_config"
+          "/var/log/photonvision:/photonvision_config/logs"
+        ];
+
+        # for PhotonVision's dynamic libraries, which it writes to /tmp
+        PrivateTmp = true;
+      };
+    };
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 5800 ];
+      allowedTCPPortRanges = [{ from = 1180; to = 1190; }];
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/engelsystem.nix b/nixos/modules/services/web-apps/engelsystem.nix
index 669620debce55..ae7b2b9e7d0c7 100644
--- a/nixos/modules/services/web-apps/engelsystem.nix
+++ b/nixos/modules/services/web-apps/engelsystem.nix
@@ -99,7 +99,6 @@ in {
       '';
 
     services.phpfpm.pools.engelsystem = {
-      phpPackage = pkgs.php81;
       user = "engelsystem";
       settings = {
         "listen.owner" = config.services.nginx.user;
diff --git a/nixos/modules/services/web-apps/gotosocial.nix b/nixos/modules/services/web-apps/gotosocial.nix
index 45464f646da89..657509c110051 100644
--- a/nixos/modules/services/web-apps/gotosocial.nix
+++ b/nixos/modules/services/web-apps/gotosocial.nix
@@ -27,7 +27,7 @@ let
 in
 {
   meta.doc = ./gotosocial.md;
-  meta.maintainers = with lib.maintainers; [ misuzu ];
+  meta.maintainers = with lib.maintainers; [ misuzu blakesmith ];
 
   options.services.gotosocial = {
     enable = lib.mkEnableOption (lib.mdDoc "ActivityPub social network server");
diff --git a/nixos/modules/services/web-apps/hedgedoc.nix b/nixos/modules/services/web-apps/hedgedoc.nix
index adcfe80a7332b..8b17c6cbc3be4 100644
--- a/nixos/modules/services/web-apps/hedgedoc.nix
+++ b/nixos/modules/services/web-apps/hedgedoc.nix
@@ -236,9 +236,9 @@ in
     };
 
     services.hedgedoc.settings = {
-      defaultNotePath = lib.mkDefault "${cfg.package}/public/default.md";
-      docsPath = lib.mkDefault "${cfg.package}/public/docs";
-      viewPath = lib.mkDefault "${cfg.package}/public/views";
+      defaultNotePath = lib.mkDefault "${cfg.package}/share/hedgedoc/public/default.md";
+      docsPath = lib.mkDefault "${cfg.package}/share/hedgedoc/public/docs";
+      viewPath = lib.mkDefault "${cfg.package}/share/hedgedoc/public/views";
     };
 
     systemd.services.hedgedoc = {
@@ -263,7 +263,7 @@ in
         Group = name;
 
         Restart = "always";
-        ExecStart = "${cfg.package}/bin/hedgedoc";
+        ExecStart = lib.getExe cfg.package;
         RuntimeDirectory = [ name ];
         StateDirectory = [ name ];
         WorkingDirectory = "/run/${name}";
diff --git a/nixos/modules/services/web-apps/kavita.nix b/nixos/modules/services/web-apps/kavita.nix
index c3e39f0b54761..c90697bcfa8b2 100644
--- a/nixos/modules/services/web-apps/kavita.nix
+++ b/nixos/modules/services/web-apps/kavita.nix
@@ -2,7 +2,18 @@
 
 let
   cfg = config.services.kavita;
-in {
+  settingsFormat = pkgs.formats.json { };
+  appsettings = settingsFormat.generate "appsettings.json" ({ TokenKey = "@TOKEN@"; } // cfg.settings);
+in
+{
+  imports = [
+    (lib.mkChangedOptionModule [ "services" "kavita" "ipAdresses" ] [ "services" "kavita" "settings" "IpAddresses" ] (config:
+      let value = lib.getAttrFromPath [ "services" "kavita" "ipAdresses" ] config; in
+      lib.concatStringsSep "," value
+    ))
+    (lib.mkRenamedOptionModule [ "services" "kavita" "port" ] [ "services" "kavita" "settings" "Port" ])
+  ];
+
   options.services.kavita = {
     enable = lib.mkEnableOption (lib.mdDoc "Kavita reading server");
 
@@ -27,16 +38,31 @@ in {
         It can be generated with `head -c 32 /dev/urandom | base64`.
       '';
     };
-    port = lib.mkOption {
-      default = 5000;
-      type = lib.types.port;
-      description = lib.mdDoc "Port to bind to.";
-    };
-    ipAdresses = lib.mkOption {
-      default = ["0.0.0.0" "::"];
-      type = lib.types.listOf lib.types.str;
-      description = lib.mdDoc "IP Addresses to bind to. The default is to bind
-      to all IPv4 and IPv6 addresses.";
+
+    settings = lib.mkOption {
+      default = { };
+      description = lib.mdDoc ''
+        Kavita configuration options, as configured in {file}`appsettings.json`.
+      '';
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+
+        options = {
+          Port = lib.mkOption {
+            default = 5000;
+            type = lib.types.port;
+            description = lib.mdDoc "Port to bind to.";
+          };
+
+          IpAddresses = lib.mkOption {
+            default = "0.0.0.0,::";
+            type = lib.types.commas;
+            description = lib.mdDoc ''
+              IP Addresses to bind to. The default is to bind to all IPv4 and IPv6 addresses.
+            '';
+          };
+        };
+      };
     };
   };
 
@@ -46,18 +72,15 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       preStart = ''
-        umask u=rwx,g=rx,o=
-        cat > "${cfg.dataDir}/config/appsettings.json" <<EOF
-        {
-          "TokenKey": "$(cat ${cfg.tokenKeyFile})",
-          "Port": ${toString cfg.port},
-          "IpAddresses": "${lib.concatStringsSep "," cfg.ipAdresses}"
-        }
-        EOF
+        install -m600 ${appsettings} ${lib.escapeShellArg cfg.dataDir}/config/appsettings.json
+        ${pkgs.replace-secret}/bin/replace-secret '@TOKEN@' \
+          ''${CREDENTIALS_DIRECTORY}/token \
+          '${cfg.dataDir}/config/appsettings.json'
       '';
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
-        ExecStart = "${lib.getExe cfg.package}";
+        LoadCredential = [ "token:${cfg.tokenKeyFile}" ];
+        ExecStart = lib.getExe cfg.package;
         Restart = "always";
         User = cfg.user;
       };
diff --git a/nixos/modules/services/web-apps/komga.nix b/nixos/modules/services/web-apps/komga.nix
index 31f475fc7b048..d7ab2a9e612ef 100644
--- a/nixos/modules/services/web-apps/komga.nix
+++ b/nixos/modules/services/web-apps/komga.nix
@@ -1,99 +1,122 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
+{
+  config,
+  pkgs,
+  lib,
+  ...
+}:
 
 let
   cfg = config.services.komga;
-
-in {
+  inherit (lib) mkOption mkEnableOption maintainers;
+  inherit (lib.types) port str bool;
+in
+{
   options = {
     services.komga = {
-      enable = mkEnableOption (lib.mdDoc "Komga, a free and open source comics/mangas media server");
+      enable = mkEnableOption "Komga, a free and open source comics/mangas media server";
 
       port = mkOption {
-        type = types.port;
+        type = port;
         default = 8080;
-        description = lib.mdDoc ''
-          The port that Komga will listen on.
-        '';
+        description = "The port that Komga will listen on.";
       };
 
       user = mkOption {
-        type = types.str;
+        type = str;
         default = "komga";
-        description = lib.mdDoc ''
-          User account under which Komga runs.
-        '';
+        description = "User account under which Komga runs.";
       };
 
       group = mkOption {
-        type = types.str;
+        type = str;
         default = "komga";
-        description = lib.mdDoc ''
-          Group under which Komga runs.
-        '';
+        description = "Group under which Komga runs.";
       };
 
       stateDir = mkOption {
-        type = types.str;
+        type = str;
         default = "/var/lib/komga";
-        description = lib.mdDoc ''
-          State and configuration directory Komga will use.
-        '';
+        description = "State and configuration directory Komga will use.";
       };
 
       openFirewall = mkOption {
-        type = types.bool;
+        type = bool;
         default = false;
-        description = lib.mdDoc ''
-          Whether to open the firewall for the port in {option}`services.komga.port`.
-        '';
+        description = "Whether to open the firewall for the port in {option}`services.komga.port`.";
       };
     };
   };
 
-  config = mkIf cfg.enable {
-
-    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+  config =
+    let
+      inherit (lib) mkIf getExe;
+    in
+    mkIf cfg.enable {
 
-    users.groups = mkIf (cfg.group == "komga") {
-      komga = {};
-    };
+      networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
 
-    users.users = mkIf (cfg.user == "komga") {
-      komga = {
-        group = cfg.group;
-        home = cfg.stateDir;
-        description = "Komga Daemon user";
-        isSystemUser = true;
-      };
-    };
+      users.groups = mkIf (cfg.group == "komga") { komga = { }; };
 
-    systemd.services.komga = {
-      environment = {
-        SERVER_PORT = builtins.toString cfg.port;
-        KOMGA_CONFIGDIR = cfg.stateDir;
+      users.users = mkIf (cfg.user == "komga") {
+        komga = {
+          group = cfg.group;
+          home = cfg.stateDir;
+          description = "Komga Daemon user";
+          isSystemUser = true;
+        };
       };
 
-      description = "Komga is a free and open source comics/mangas media server";
-
-      wantedBy = [ "multi-user.target" ];
-      wants = [ "network-online.target" ];
-      after = [ "network-online.target" ];
-
-      serviceConfig = {
-        User = cfg.user;
-        Group = cfg.group;
-
-        Type = "simple";
-        Restart = "on-failure";
-        ExecStart = "${pkgs.komga}/bin/komga";
-
-        StateDirectory = mkIf (cfg.stateDir == "/var/lib/komga") "komga";
+      systemd.services.komga = {
+        environment = {
+          SERVER_PORT = builtins.toString cfg.port;
+          KOMGA_CONFIGDIR = cfg.stateDir;
+        };
+
+        description = "Komga is a free and open source comics/mangas media server";
+
+        wantedBy = [ "multi-user.target" ];
+        wants = [ "network-online.target" ];
+        after = [ "network-online.target" ];
+
+        serviceConfig = {
+          User = cfg.user;
+          Group = cfg.group;
+
+          Type = "simple";
+          Restart = "on-failure";
+          ExecStart = getExe pkgs.komga;
+
+          StateDirectory = mkIf (cfg.stateDir == "/var/lib/komga") "komga";
+
+          RemoveIPC = true;
+          NoNewPrivileges = true;
+          CapabilityBoundingSet = "";
+          SystemCallFilter = [ "@system-service" ];
+          ProtectSystem = "full";
+          PrivateTmp = true;
+          ProtectProc = "invisible";
+          ProtectClock = true;
+          ProcSubset = "pid";
+          PrivateUsers = true;
+          PrivateDevices = true;
+          ProtectHostname = true;
+          ProtectKernelTunables = true;
+          RestrictAddressFamilies = [
+            "AF_INET"
+            "AF_INET6"
+            "AF_NETLINK"
+          ];
+          LockPersonality = true;
+          RestrictNamespaces = true;
+          ProtectKernelLogs = true;
+          ProtectControlGroups = true;
+          ProtectKernelModules = true;
+          SystemCallArchitectures = "native";
+          RestrictSUIDSGID = true;
+          RestrictRealtime = true;
+        };
       };
-
     };
-  };
 
   meta.maintainers = with maintainers; [ govanify ];
 }
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 5cda4a00a9de5..7f998207c434a 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -14,7 +14,6 @@ let
     expose_php = "Off";
     error_reporting = "E_ALL & ~E_DEPRECATED & ~E_STRICT";
     display_errors = "stderr";
-    "opcache.enable_cli" = "1";
     "opcache.interned_strings_buffer" = "8";
     "opcache.max_accelerated_files" = "10000";
     "opcache.memory_consumption" = "128";
diff --git a/nixos/modules/services/web-apps/pretix.nix b/nixos/modules/services/web-apps/pretix.nix
new file mode 100644
index 0000000000000..2355f8c450a1e
--- /dev/null
+++ b/nixos/modules/services/web-apps/pretix.nix
@@ -0,0 +1,581 @@
+{ config
+, lib
+, pkgs
+, utils
+, ...
+}:
+
+let
+  inherit (lib)
+    concatMapStringsSep
+    escapeShellArgs
+    filter
+    filterAttrs
+    getExe
+    getExe'
+    isAttrs
+    isList
+    literalExpression
+    mapAttrs
+    mkDefault
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    optionals
+    optionalString
+    recursiveUpdate
+    types
+  ;
+
+  filterRecursiveNull = o:
+    if isAttrs o then
+      mapAttrs (_: v: filterRecursiveNull v) (filterAttrs (_: v: v != null) o)
+    else if isList o then
+      map filterRecursiveNull (filter (v: v != null) o)
+    else
+      o;
+
+  cfg = config.services.pretix;
+  format = pkgs.formats.ini { };
+
+  configFile = format.generate "pretix.cfg" (filterRecursiveNull cfg.settings);
+
+  finalPackage = cfg.package.override {
+    inherit (cfg) plugins;
+  };
+
+  pythonEnv = cfg.package.python.buildEnv.override {
+    extraLibs = with cfg.package.python.pkgs; [
+      (toPythonModule finalPackage)
+      gunicorn
+    ]
+    ++ lib.optionals (cfg.settings.memcached.location != null)
+      cfg.package.optional-dependencies.memcached
+    ;
+  };
+
+  withRedis = cfg.settings.redis.location != null;
+in
+{
+  meta = with lib; {
+    maintainers = with maintainers; [ hexa ];
+  };
+
+  options.services.pretix = {
+    enable = mkEnableOption "pretix";
+
+    package = mkPackageOption pkgs "pretix" { };
+
+    group = mkOption {
+      type = types.str;
+      default = "pretix";
+      description = ''
+        Group under which pretix should run.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "pretix";
+      description = ''
+        User under which pretix should run.
+      '';
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/run/keys/pretix-secrets.env";
+      description = ''
+        Environment file to pass secret configuration values.
+
+        Each line must follow the `PRETIX_SECTION_KEY=value` pattern.
+      '';
+    };
+
+    plugins = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression ''
+        with config.services.pretix.package.plugins; [
+          passbook
+          pages
+        ];
+      '';
+      description = ''
+        Pretix plugins to install into the Python environment.
+      '';
+    };
+
+    gunicorn.extraArgs = mkOption {
+      type = with types; listOf str;
+      default = [
+        "--name=pretix"
+      ];
+      example = [
+        "--name=pretix"
+        "--workers=4"
+        "--max-requests=1200"
+        "--max-requests-jitter=50"
+        "--log-level=info"
+      ];
+      description = ''
+        Extra arguments to pass to gunicorn.
+        See <https://docs.pretix.eu/en/latest/admin/installation/manual_smallscale.html#start-pretix-as-a-service> for details.
+      '';
+      apply = escapeShellArgs;
+    };
+
+    celery = {
+      extraArgs = mkOption {
+        type = with types; listOf str;
+        default = [ ];
+        description = ''
+          Extra arguments to pass to celery.
+
+          See <https://docs.celeryq.dev/en/stable/reference/cli.html#celery-worker> for more info.
+        '';
+        apply = utils.escapeSystemdExecArgs;
+      };
+    };
+
+    nginx = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        example = false;
+        description = ''
+          Whether to set up an nginx virtual host.
+        '';
+      };
+
+      domain = mkOption {
+        type = types.str;
+        example = "talks.example.com";
+        description = ''
+          The domain name under which to set up the virtual host.
+        '';
+      };
+    };
+
+    database.createLocally = mkOption {
+      type = types.bool;
+      default = true;
+      example = false;
+      description = ''
+        Whether to automatically set up the database on the local DBMS instance.
+
+        Only supported for PostgreSQL. Not required for sqlite.
+      '';
+    };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = format.type;
+        options = {
+          pretix = {
+            instance_name = mkOption {
+              type = types.str;
+              example = "tickets.example.com";
+              description = ''
+                The name of this installation.
+              '';
+            };
+
+            url = mkOption {
+              type = types.str;
+              example = "https://tickets.example.com";
+              description = ''
+                The installation’s full URL, without a trailing slash.
+              '';
+            };
+
+            cachedir = mkOption {
+              type = types.path;
+              default = "/var/cache/pretix";
+              description = ''
+                Directory for storing temporary files.
+              '';
+            };
+
+            datadir = mkOption {
+              type = types.path;
+              default = "/var/lib/pretix";
+              description = ''
+                Directory for storing user uploads and similar data.
+              '';
+            };
+
+            logdir = mkOption {
+              type = types.path;
+              default = "/var/log/pretix";
+              description = ''
+                Directory for storing log files.
+              '';
+            };
+
+            currency = mkOption {
+              type = types.str;
+              default = "EUR";
+              example = "USD";
+              description = ''
+                Default currency for events in its ISO 4217 three-letter code.
+              '';
+            };
+
+            registration = mkOption {
+              type = types.bool;
+              default = false;
+              example = true;
+              description = ''
+                Whether to allow registration of new admin users.
+              '';
+            };
+          };
+
+          database = {
+            backend = mkOption {
+              type = types.enum [
+                "sqlite3"
+                "postgresql"
+              ];
+              default = "postgresql";
+              description = ''
+                Database backend to use.
+
+                Only postgresql is recommended for production setups.
+              '';
+            };
+
+            host = mkOption {
+              type = with types; nullOr types.path;
+              default = if cfg.settings.database.backend == "postgresql" then "/run/postgresql" else null;
+              defaultText = literalExpression ''
+                if config.services.pretix.settings..database.backend == "postgresql" then "/run/postgresql"
+                else null
+              '';
+              description = ''
+                Database host or socket path.
+              '';
+            };
+
+            name = mkOption {
+              type = types.str;
+              default = "pretix";
+              description = ''
+                Database name.
+              '';
+            };
+
+            user = mkOption {
+              type = types.str;
+              default = "pretix";
+              description = ''
+                Database username.
+              '';
+            };
+          };
+
+          mail = {
+            from = mkOption {
+              type = types.str;
+              example = "tickets@example.com";
+              description = ''
+                E-Mail address used in the `FROM` header of outgoing mails.
+              '';
+            };
+
+            host = mkOption {
+              type = types.str;
+              default = "localhost";
+              example = "mail.example.com";
+              description = ''
+                Hostname of the SMTP server use for mail delivery.
+              '';
+            };
+
+            port = mkOption {
+              type = types.port;
+              default = 25;
+              example = 587;
+              description = ''
+                Port of the SMTP server to use for mail delivery.
+              '';
+            };
+          };
+
+          celery = {
+            backend = mkOption {
+              type = types.str;
+              default = "redis+socket://${config.services.redis.servers.pretix.unixSocket}?virtual_host=1";
+              defaultText = literalExpression ''
+                optionalString config.services.pretix.celery.enable "redis+socket://''${config.services.redis.servers.pretix.unixSocket}?virtual_host=1"
+              '';
+              description = ''
+                URI to the celery backend used for the asynchronous job queue.
+              '';
+            };
+
+            broker = mkOption {
+              type = types.str;
+              default = "redis+socket://${config.services.redis.servers.pretix.unixSocket}?virtual_host=2";
+              defaultText = literalExpression ''
+                optionalString config.services.pretix.celery.enable "redis+socket://''${config.services.redis.servers.pretix.unixSocket}?virtual_host=2"
+              '';
+              description = ''
+                URI to the celery broker used for the asynchronous job queue.
+              '';
+            };
+          };
+
+          redis = {
+            location = mkOption {
+              type = with types; nullOr str;
+              default = "unix://${config.services.redis.servers.pretix.unixSocket}?db=0";
+              defaultText = literalExpression ''
+                "unix://''${config.services.redis.servers.pretix.unixSocket}?db=0"
+              '';
+              description = ''
+                URI to the redis server, used to speed up locking, caching and session storage.
+              '';
+            };
+
+            sessions = mkOption {
+              type = types.bool;
+              default = true;
+              example = false;
+              description = ''
+                Whether to use redis as the session storage.
+              '';
+            };
+          };
+
+          memcached = {
+            location = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              example = "127.0.0.1:11211";
+              description = ''
+                The `host:port` combination or the path to the UNIX socket of a memcached instance.
+
+                Can be used instead of Redis for caching.
+              '';
+            };
+          };
+
+          tools = {
+            pdftk = mkOption {
+              type = types.path;
+              default = getExe pkgs.pdftk;
+              defaultText = literalExpression ''
+                lib.getExe pkgs.pdftk
+              '';
+              description = ''
+                Path to the pdftk executable.
+              '';
+            };
+          };
+        };
+      };
+      default = { };
+      description = ''
+        pretix configuration as a Nix attribute set. All settings can also be passed
+        from the environment.
+
+        See <https://docs.pretix.eu/en/latest/admin/config.html> for possible options.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # https://docs.pretix.eu/en/latest/admin/installation/index.html
+
+    environment.systemPackages = [
+      (pkgs.writeScriptBin "pretix-manage" ''
+        cd ${cfg.settings.pretix.datadir}
+        sudo=exec
+        if [[ "$USER" != ${cfg.user} ]]; then
+          sudo='exec /run/wrappers/bin/sudo -u ${cfg.user} ${optionalString withRedis "-g redis-pretix"} --preserve-env=PRETIX_CONFIG_FILE'
+        fi
+        export PRETIX_CONFIG_FILE=${configFile}
+        $sudo ${getExe' pythonEnv "pretix-manage"} "$@"
+      '')
+    ];
+
+    services = {
+      nginx = mkIf cfg.nginx.enable {
+        enable = true;
+        recommendedGzipSettings = mkDefault true;
+        recommendedOptimisation = mkDefault true;
+        recommendedProxySettings = mkDefault true;
+        recommendedTlsSettings = mkDefault true;
+        upstreams.pretix.servers."unix:/run/pretix/pretix.sock" = { };
+        virtualHosts.${cfg.nginx.domain} = {
+          # https://docs.pretix.eu/en/latest/admin/installation/manual_smallscale.html#ssl
+          extraConfig = ''
+            more_set_headers Referrer-Policy same-origin;
+            more_set_headers X-Content-Type-Options nosniff;
+          '';
+          locations = {
+            "/".proxyPass = "http://pretix";
+            "/media/" = {
+              alias = "${cfg.settings.pretix.datadir}/media/";
+              extraConfig = ''
+                access_log off;
+                expires 7d;
+              '';
+            };
+            "^~ /media/(cachedfiles|invoices)" = {
+              extraConfig = ''
+                deny all;
+                return 404;
+              '';
+            };
+            "/static/" = {
+              alias = "${finalPackage}/${cfg.package.python.sitePackages}/pretix/static.dist/";
+              extraConfig = ''
+                access_log off;
+                more_set_headers Cache-Control "public";
+                expires 365d;
+              '';
+            };
+          };
+        };
+      };
+
+      postgresql = mkIf (cfg.database.createLocally && cfg.settings.database.backend == "postgresql") {
+        enable = true;
+        ensureUsers = [ {
+          name = cfg.settings.database.user;
+          ensureDBOwnership = true;
+        } ];
+        ensureDatabases = [ cfg.settings.database.name ];
+      };
+
+      redis.servers.pretix.enable = withRedis;
+    };
+
+    systemd.services = let
+      commonUnitConfig = {
+        environment.PRETIX_CONFIG_FILE = configFile;
+        serviceConfig = {
+          User = "pretix";
+          Group = "pretix";
+          EnvironmentFile = optionals (cfg.environmentFile != null) [
+            cfg.environmentFile
+          ];
+          StateDirectory = [
+            "pretix"
+          ];
+          StateDirectoryMode = "0755";
+          CacheDirectory = "pretix";
+          LogsDirectory = "pretix";
+          WorkingDirectory = cfg.settings.pretix.datadir;
+          SupplementaryGroups = optionals withRedis [
+            "redis-pretix"
+          ];
+          AmbientCapabilities = "";
+          CapabilityBoundingSet = [ "" ];
+          DevicePolicy = "closed";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = false; # required by pdftk
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateTmp = true;
+          ProcSubset = "pid";
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          ProtectSystem = "strict";
+          RemoveIPC = true;
+          RestrictAddressFamilies = [
+            "AF_INET"
+            "AF_INET6"
+            "AF_UNIX"
+          ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [
+            "@system-service"
+            "~@privileged"
+            "@chown"
+          ];
+          UMask = "0022";
+        };
+      };
+    in {
+      pretix-web = recursiveUpdate commonUnitConfig {
+        description = "pretix web service";
+        after = [
+          "network.target"
+          "redis-pretix.service"
+          "postgresql.service"
+        ];
+        wantedBy = [ "multi-user.target" ];
+        preStart = ''
+          versionFile="${cfg.settings.pretix.datadir}/.version"
+          version=$(cat "$versionFile" 2>/dev/null || echo 0)
+
+          pluginsFile="${cfg.settings.pretix.datadir}/.plugins"
+          plugins=$(cat "$pluginsFile" 2>/dev/null || echo "")
+          configuredPlugins="${concatMapStringsSep "|" (package: package.name) cfg.plugins}"
+
+          if [[ $version != ${cfg.package.version} || $plugins != $configuredPlugins ]]; then
+            ${getExe' pythonEnv "pretix-manage"} migrate
+
+            echo "${cfg.package.version}" > "$versionFile"
+            echo "$configuredPlugins" > "$pluginsFile"
+          fi
+        '';
+        serviceConfig = {
+          TimeoutStartSec = "5min";
+          ExecStart = "${getExe' pythonEnv "gunicorn"} --bind unix:/run/pretix/pretix.sock ${cfg.gunicorn.extraArgs} pretix.wsgi";
+          RuntimeDirectory = "pretix";
+        };
+      };
+
+      pretix-periodic = recursiveUpdate commonUnitConfig {
+        description = "pretix periodic task runner";
+        # every 15 minutes
+        startAt = [ "*:3,18,33,48" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${getExe' pythonEnv "pretix-manage"} runperiodic";
+        };
+      };
+
+      pretix-worker = recursiveUpdate commonUnitConfig {
+        description = "pretix asynchronous job runner";
+        after = [
+          "network.target"
+          "redis-pretix.service"
+          "postgresql.service"
+        ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig.ExecStart = "${getExe' pythonEnv "celery"} -A pretix.celery_app worker ${cfg.celery.extraArgs}";
+      };
+    };
+
+    systemd.sockets.pretix-web.socketConfig = {
+      ListenStream = "/run/pretix/pretix.sock";
+      SocketUser = "nginx";
+    };
+
+    users = {
+      groups."${cfg.group}" = {};
+      users."${cfg.user}" = {
+        isSystemUser = true;
+        createHome = true;
+        home = cfg.settings.pretix.datadir;
+        inherit (cfg) group;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/slskd.nix b/nixos/modules/services/web-apps/slskd.nix
index 580f66ec3ac90..15a5fd1177adf 100644
--- a/nixos/modules/services/web-apps/slskd.nix
+++ b/nixos/modules/services/web-apps/slskd.nix
@@ -2,120 +2,248 @@
 
 let
   settingsFormat = pkgs.formats.yaml {};
+  defaultUser = "slskd";
 in {
   options.services.slskd = with lib; with types; {
     enable = mkEnableOption "enable slskd";
 
-    rotateLogs = mkEnableOption "enable an unit and timer that will rotate logs in /var/slskd/logs";
+    package = mkPackageOptionMD pkgs "slskd" { };
 
-    package = mkPackageOption pkgs "slskd" { };
+    user = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = "User account under which slskd runs.";
+    };
 
-    nginx = mkOption {
-      description = lib.mdDoc "options for nginx";
-      example = {
-        enable = true;
-        domain = "example.com";
-        contextPath = "/slskd";
-      };
-      type = submodule ({name, config, ...}: {
-        options = {
-          enable = mkEnableOption "enable nginx as a reverse proxy";
+    group = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = "Group under which slskd runs.";
+    };
 
-          domainName = mkOption {
-            type = str;
-            description = "Domain you want to use";
-          };
-          contextPath = mkOption {
-            type = types.path;
-            default = "/";
-            description = lib.mdDoc ''
-              The context path, i.e., the last part of the slskd
-              URL. Typically '/' or '/slskd'. Default '/'
-            '';
-          };
-        };
-      });
+    domain = mkOption {
+      type = types.nullOr types.str;
+      description = ''
+        If non-null, enables an nginx reverse proxy virtual host at this FQDN,
+        at the path configurated with `services.slskd.web.url_base`.
+      '';
+      example = "slskd.example.com";
+    };
+
+    nginx = mkOption {
+      type = types.submodule (import ../web-servers/nginx/vhost-options.nix { inherit config lib; });
+      default = {};
+      example = lib.literalExpression ''
+        {
+          enableACME = true;
+          forceHttps = true;
+        }
+      '';
+      description = ''
+        This option customizes the nginx virtual host set up for slskd.
+      '';
     };
 
     environmentFile = mkOption {
       type = path;
       description = ''
-        Path to a file containing secrets.
-        It must at least contain the variable `SLSKD_SLSK_PASSWORD`
+        Path to the environment file sourced on startup.
+        It must at least contain the variables `SLSKD_SLSK_USERNAME` and `SLSKD_SLSK_PASSWORD`.
+        Web interface credentials should also be set here in `SLSKD_USERNAME` and `SLSKD_PASSWORD`.
+        Other, optional credentials like SOCKS5 with `SLSKD_SLSK_PROXY_USERNAME` and `SLSKD_SLSK_PROXY_PASSWORD`
+        should all reside here instead of in the world-readable nix store.
+        Variables are documented at https://github.com/slskd/slskd/blob/master/docs/config.md
       '';
     };
 
     openFirewall = mkOption {
       type = bool;
-      description = ''
-        Whether to open the firewall for services.slskd.settings.listen_port";
-      '';
+      description = "Whether to open the firewall for the soulseek network listen port (not the web interface port).";
       default = false;
     };
 
     settings = mkOption {
-      description = lib.mdDoc ''
-        Configuration for slskd, see
-        [available options](https://github.com/slskd/slskd/blob/master/docs/config.md)
-        `APP_DIR` is set to /var/lib/slskd, where default download & incomplete directories,
-        log and databases will be created.
+      description = ''
+        Application configuration for slskd. See
+        [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md).
       '';
       default = {};
       type = submodule {
         freeformType = settingsFormat.type;
         options = {
+          remote_file_management = mkEnableOption "modification of share contents through the web ui";
+
+          flags = {
+            force_share_scan = mkOption {
+              type = bool;
+              description = "Force a rescan of shares on every startup.";
+            };
+            no_version_check = mkOption {
+              type = bool;
+              default = true;
+              visible = false;
+              description = "Don't perform a version check on startup.";
+            };
+          };
+
+          directories = {
+            incomplete = mkOption {
+              type = nullOr path;
+              description = "Directory where incomplete downloading files are stored.";
+              defaultText = "/var/lib/slskd/incomplete";
+              default = null;
+            };
+            downloads = mkOption {
+              type = nullOr path;
+              description = "Directory where downloaded files are stored.";
+              defaultText = "/var/lib/slskd/downloads";
+              default = null;
+            };
+          };
+
+          shares = {
+            directories = mkOption {
+              type = listOf str;
+              description = ''
+                Paths to shared directories. See
+                [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md#directories)
+                for advanced usage.
+              '';
+              example = lib.literalExpression ''[ "/home/John/Music" "!/home/John/Music/Recordings" "[Music Drive]/mnt" ]'';
+            };
+            filters = mkOption {
+              type = listOf str;
+              example = lib.literalExpression ''[ "\.ini$" "Thumbs.db$" "\.DS_Store$" ]'';
+              description = "Regular expressions of files to exclude from sharing.";
+            };
+          };
+
+          rooms = mkOption {
+            type = listOf str;
+            description = "Chat rooms to join on startup.";
+          };
 
           soulseek = {
-            username = mkOption {
+            description = mkOption {
               type = str;
-              description = "Username on the Soulseek Network";
+              description = "The user description for the Soulseek network.";
+              defaultText = "A slskd user. https://github.com/slskd/slskd";
             };
             listen_port = mkOption {
               type = port;
-              description = "Port to use for communication on the Soulseek Network";
-              default = 50000;
+              description = "The port on which to listen for incoming connections.";
+              default = 50300;
             };
           };
 
+          global = {
+            # TODO speed units
+            upload = {
+              slots = mkOption {
+                type = ints.unsigned;
+                description = "Limit of the number of concurrent upload slots.";
+              };
+              speed_limit = mkOption {
+                type = ints.unsigned;
+                description = "Total upload speed limit.";
+              };
+            };
+            download = {
+              slots = mkOption {
+                type = ints.unsigned;
+                description = "Limit of the number of concurrent download slots.";
+              };
+              speed_limit = mkOption {
+                type = ints.unsigned;
+                description = "Total upload download limit";
+              };
+            };
+          };
+
+          filters.search.request = mkOption {
+            type = listOf str;
+            example = lib.literalExpression ''[ "^.{1,2}$" ]'';
+            description = "Incoming search requests which match this filter are ignored.";
+          };
+
           web = {
             port = mkOption {
               type = port;
-              default = 5001;
-              description = "The HTTP listen port";
+              default = 5030;
+              description = "The HTTP listen port.";
             };
             url_base = mkOption {
               type = path;
-              default = config.services.slskd.nginx.contextPath;
-              defaultText = "config.services.slskd.nginx.contextPath";
-              description = lib.mdDoc ''
-                The context path, i.e., the last part of the slskd URL
-              '';
+              default = "/";
+              description = "The base path in the url for web requests.";
+            };
+            # Users should use a reverse proxy instead for https
+            https.disabled = mkOption {
+              type = bool;
+              default = true;
+              description = "Disable the built-in HTTPS server";
             };
           };
 
-          shares = {
-            directories = mkOption {
-              type = listOf str;
-              description = lib.mdDoc ''
-                Paths to your shared directories. See
-                [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md#directories)
-                for advanced usage
-              '';
+          retention = {
+            transfers = {
+              upload = {
+                succeeded = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of succeeded upload tasks.";
+                  defaultText = "(indefinite)";
+                };
+                errored = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of errored upload tasks.";
+                  defaultText = "(indefinite)";
+                };
+                cancelled = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of cancelled upload tasks.";
+                  defaultText = "(indefinite)";
+                };
+              };
+              download = {
+                succeeded = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of succeeded download tasks.";
+                  defaultText = "(indefinite)";
+                };
+                errored = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of errored download tasks.";
+                  defaultText = "(indefinite)";
+                };
+                cancelled = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of cancelled download tasks.";
+                  defaultText = "(indefinite)";
+                };
+              };
+            };
+            files = {
+              complete = mkOption {
+                type = ints.unsigned;
+                description = "Lifespan of completely downloaded files in minutes.";
+                example = 20160;
+                defaultText = "(indefinite)";
+              };
+              incomplete = mkOption {
+                type = ints.unsigned;
+                description = "Lifespan of incomplete downloading files in minutes.";
+                defaultText = "(indefinite)";
+              };
             };
           };
 
-          directories = {
-            incomplete = mkOption {
-              type = nullOr path;
-              description = "Directory where downloading files are stored";
-              defaultText = "<APP_DIR>/incomplete";
-              default = null;
-            };
-            downloads = mkOption {
-              type = nullOr path;
-              description = "Directory where downloaded files are stored";
-              defaultText = "<APP_DIR>/downloads";
-              default = null;
+          logger = {
+            # Disable by default, journald already retains as needed
+            disk = mkOption {
+              type = bool;
+              description = "Whether to log to the application directory.";
+              default = false;
+              visible = false;
             };
           };
         };
@@ -126,51 +254,42 @@ in {
   config = let
     cfg = config.services.slskd;
 
-    confWithoutNullValues = (lib.filterAttrs (key: value: value != null) cfg.settings);
+    confWithoutNullValues = (lib.filterAttrsRecursive (key: value: (builtins.tryEval value).success && value != null) cfg.settings);
 
     configurationYaml = settingsFormat.generate "slskd.yml" confWithoutNullValues;
 
   in lib.mkIf cfg.enable {
 
-    users = {
-      users.slskd = {
+    # Force off, configuration file is in nix store and is immutable
+    services.slskd.settings.remote_configuration = lib.mkForce false;
+
+    users.users = lib.optionalAttrs (cfg.user == defaultUser) {
+      "${defaultUser}" = {
+        group = cfg.group;
         isSystemUser = true;
-        group = "slskd";
       };
-      groups.slskd = {};
     };
 
-    # Reverse proxy configuration
-    services.nginx.enable = true;
-    services.nginx.virtualHosts."${cfg.nginx.domainName}" = {
-      forceSSL = true;
-      enableACME = true;
-      locations = {
-        "${cfg.nginx.contextPath}" = {
-          proxyPass = "http://localhost:${toString cfg.settings.web.port}";
-          proxyWebsockets = true;
-        };
-      };
+    users.groups = lib.optionalAttrs (cfg.group == defaultUser) {
+      "${defaultUser}" = {};
     };
 
-    # Hide state & logs
-    systemd.tmpfiles.rules = [
-      "d /var/lib/slskd/data 0750 slskd slskd - -"
-      "d /var/lib/slskd/logs 0750 slskd slskd - -"
-    ];
-
     systemd.services.slskd = {
       description = "A modern client-server application for the Soulseek file sharing network";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         Type = "simple";
-        User = "slskd";
+        User = cfg.user;
+        Group = cfg.group;
         EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
-        StateDirectory = "slskd";
+        StateDirectory = "slskd";  # Creates /var/lib/slskd and manages permissions
         ExecStart = "${cfg.package}/bin/slskd --app-dir /var/lib/slskd --config ${configurationYaml}";
         Restart = "on-failure";
         ReadOnlyPaths = map (d: builtins.elemAt (builtins.split "[^/]*(/.+)" d) 1) cfg.settings.shares.directories;
+        ReadWritePaths =
+          (lib.optional (cfg.settings.directories.incomplete != null) cfg.settings.directories.incomplete) ++
+          (lib.optional (cfg.settings.directories.downloads != null) cfg.settings.directories.downloads);
         LockPersonality = true;
         NoNewPrivileges = true;
         PrivateDevices = true;
@@ -194,18 +313,21 @@ in {
 
     networking.firewall.allowedTCPPorts = lib.optional cfg.openFirewall cfg.settings.soulseek.listen_port;
 
-    systemd.services.slskd-rotatelogs = lib.mkIf cfg.rotateLogs {
-      description = "Rotate slskd logs";
-      serviceConfig = {
-        Type = "oneshot";
-        User = "slskd";
-        ExecStart = [
-          "${pkgs.findutils}/bin/find /var/lib/slskd/logs/ -type f -mtime +10 -delete"
-          "${pkgs.findutils}/bin/find /var/lib/slskd/logs/ -type f -mtime +1  -exec ${pkgs.gzip}/bin/gzip -q {} ';'"
-        ];
-      };
-      startAt = "daily";
+    services.nginx = lib.mkIf (cfg.domain != null) {
+      enable = lib.mkDefault true;
+      virtualHosts."${cfg.domain}" = lib.mkMerge [
+        cfg.nginx
+        {
+          locations."${cfg.settings.web.url_base}" = {
+            proxyPass = "http://127.0.0.1:${toString cfg.settings.web.port}";
+            proxyWebsockets = true;
+          };
+        }
+      ];
     };
+  };
 
+  meta = {
+    maintainers = with lib.maintainers; [ ppom melvyn2 ];
   };
 }
diff --git a/nixos/modules/services/web-apps/suwayomi-server.md b/nixos/modules/services/web-apps/suwayomi-server.md
index ff1e06c8a53ae..18e7a631443f4 100644
--- a/nixos/modules/services/web-apps/suwayomi-server.md
+++ b/nixos/modules/services/web-apps/suwayomi-server.md
@@ -101,6 +101,9 @@ Not all the configuration options are available directly in this module, but you
         port = 4567;
         autoDownloadNewChapters = false;
         maxSourcesInParallel" = 6;
+        extensionRepos = [
+          "https://raw.githubusercontent.com/MY_ACCOUNT/MY_REPO/repo/index.min.json"
+        ];
       };
     };
   };
diff --git a/nixos/modules/services/web-apps/suwayomi-server.nix b/nixos/modules/services/web-apps/suwayomi-server.nix
index 94dbe6f99356e..99c6ea2a36e60 100644
--- a/nixos/modules/services/web-apps/suwayomi-server.nix
+++ b/nixos/modules/services/web-apps/suwayomi-server.nix
@@ -102,6 +102,17 @@ in
                 '';
               };
 
+              extensionRepos = mkOption {
+                type = types.listOf types.str;
+                default = [];
+                example = [
+                  "https://raw.githubusercontent.com/MY_ACCOUNT/MY_REPO/repo/index.min.json"
+                ];
+                description = mdDoc ''
+                  URL of repositories from which the extensions can be installed.
+                '';
+              };
+
               localSourcePath = mkOption {
                 type = types.path;
                 default = cfg.dataDir;
diff --git a/nixos/modules/services/web-servers/garage.nix b/nixos/modules/services/web-servers/garage.nix
index 48dd5b34757c1..616be978b6e5b 100644
--- a/nixos/modules/services/web-servers/garage.nix
+++ b/nixos/modules/services/web-servers/garage.nix
@@ -75,7 +75,19 @@ in
       source = configFile;
     };
 
-    environment.systemPackages = [ cfg.package ]; # For administration
+    # For administration
+    environment.systemPackages = [
+      (pkgs.writeScriptBin "garage" ''
+        # make it so all future variables set are automatically exported as environment variables
+        set -a
+
+        # source the set environmentFile (since systemd EnvironmentFile is supposed to be a minor subset of posix sh parsing) (with shell arg escaping to avoid quoting issues)
+        [ -f ${lib.escapeShellArg cfg.environmentFile} ] && . ${lib.escapeShellArg cfg.environmentFile}
+
+        # exec the program with quoted args (also with shell arg escaping for the program path to avoid quoting issues there)
+        exec ${lib.escapeShellArg (lib.getExe cfg.package)} "$@"
+      '')
+    ];
 
     systemd.services.garage = {
       description = "Garage Object Storage (S3 compatible)";
diff --git a/nixos/modules/services/web-servers/stargazer.nix b/nixos/modules/services/web-servers/stargazer.nix
index 18f57363137cf..4eca33326040b 100644
--- a/nixos/modules/services/web-servers/stargazer.nix
+++ b/nixos/modules/services/web-servers/stargazer.nix
@@ -129,6 +129,12 @@ in
       example = lib.literalExpression "\"1y\"";
     };
 
+    debugMode = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "Run Stargazer in debug mode.";
+    };
+
     routes = lib.mkOption {
       type = lib.types.listOf
         (lib.types.submodule {
@@ -195,7 +201,7 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
-        ExecStart = "${pkgs.stargazer}/bin/stargazer ${configFile}";
+        ExecStart = "${pkgs.stargazer}/bin/stargazer ${configFile} ${lib.optionalString cfg.debugMode "-D"}";
         Restart = "always";
         # User and group
         User = cfg.user;
diff --git a/nixos/modules/services/x11/desktop-managers/budgie.nix b/nixos/modules/services/x11/desktop-managers/budgie.nix
index fe39097a22e8f..466ef5c565b7e 100644
--- a/nixos/modules/services/x11/desktop-managers/budgie.nix
+++ b/nixos/modules/services/x11/desktop-managers/budgie.nix
@@ -44,6 +44,8 @@ let
     enableSshSocket = config.services.openssh.startWhenNeeded;
   };
 in {
+  meta.maintainers = lib.teams.budgie.members;
+
   options = {
     services.xserver.desktopManager.budgie = {
       enable = mkEnableOption (mdDoc "the Budgie desktop");
@@ -144,7 +146,6 @@ in {
           mate.atril
           mate.engrampa
           mate.mate-calc
-          mate.mate-terminal
           mate.mate-system-monitor
           vlc
 
@@ -158,8 +159,11 @@ in {
         ] config.environment.budgie.excludePackages)
       ++ cfg.sessionPath;
 
+    # Both budgie-desktop-view and nemo defaults to this emulator.
+    programs.gnome-terminal.enable = mkDefault true;
+
     # Fonts.
-    fonts.packages = mkDefault [
+    fonts.packages = [
       pkgs.noto-fonts
       pkgs.hack-font
     ];
@@ -212,7 +216,6 @@ in {
     services.colord.enable = mkDefault true; # for BCC's Color panel.
     services.gnome.at-spi2-core.enable = mkDefault true; # for BCC's A11y panel.
     services.accounts-daemon.enable = mkDefault true; # for BCC's Users panel.
-    services.fprintd.enable = mkDefault true; # for BCC's Users panel.
     services.udisks2.enable = mkDefault true; # for BCC's Details panel.
 
     # For BCC's Online Accounts panel.
diff --git a/nixos/modules/services/x11/desktop-managers/deepin.nix b/nixos/modules/services/x11/desktop-managers/deepin.nix
index e6f221201013e..902e3a9317dd1 100644
--- a/nixos/modules/services/x11/desktop-managers/deepin.nix
+++ b/nixos/modules/services/x11/desktop-managers/deepin.nix
@@ -66,7 +66,7 @@ in
       services.upower.enable = mkDefault config.powerManagement.enable;
       networking.networkmanager.enable = mkDefault true;
       programs.dconf.enable = mkDefault true;
-      programs.gnupg.agent.pinentryPackage = pkgs.pinentry-qt;
+      programs.gnupg.agent.pinentryPackage = mkDefault pkgs.pinentry-qt;
 
       fonts.packages = with pkgs; [ noto-fonts ];
       xdg.mime.enable = true;
diff --git a/nixos/modules/services/x11/desktop-managers/default.nix b/nixos/modules/services/x11/desktop-managers/default.nix
index ecb8d1e91bde2..33d0a7b526436 100644
--- a/nixos/modules/services/x11/desktop-managers/default.nix
+++ b/nixos/modules/services/x11/desktop-managers/default.nix
@@ -18,7 +18,7 @@ in
   # determines the default: later modules (if enabled) are preferred.
   # E.g., if Plasma 5 is enabled, it supersedes xterm.
   imports = [
-    ./none.nix ./xterm.nix ./phosh.nix ./xfce.nix ./plasma5.nix ./plasma6.nix ./lumina.nix
+    ./none.nix ./xterm.nix ./phosh.nix ./xfce.nix ./plasma5.nix ../../desktop-managers/plasma6.nix ./lumina.nix
     ./lxqt.nix ./enlightenment.nix ./gnome.nix ./retroarch.nix ./kodi.nix
     ./mate.nix ./pantheon.nix ./surf-display.nix ./cde.nix
     ./cinnamon.nix ./budgie.nix ./deepin.nix
diff --git a/nixos/modules/services/x11/desktop-managers/lxqt.nix b/nixos/modules/services/x11/desktop-managers/lxqt.nix
index d3bdc4326a908..3d02deba6fc79 100644
--- a/nixos/modules/services/x11/desktop-managers/lxqt.nix
+++ b/nixos/modules/services/x11/desktop-managers/lxqt.nix
@@ -62,7 +62,7 @@ in
     # Link some extra directories in /run/current-system/software/share
     environment.pathsToLink = [ "/share" ];
 
-    programs.gnupg.agent.pinentryPackage = pkgs.pinentry-qt;
+    programs.gnupg.agent.pinentryPackage = mkDefault pkgs.pinentry-qt;
 
     # virtual file systems support for PCManFM-QT
     services.gvfs.enable = true;
diff --git a/nixos/modules/services/x11/desktop-managers/mate.nix b/nixos/modules/services/x11/desktop-managers/mate.nix
index f535a1d298b9f..957eac7848e7f 100644
--- a/nixos/modules/services/x11/desktop-managers/mate.nix
+++ b/nixos/modules/services/x11/desktop-managers/mate.nix
@@ -20,6 +20,22 @@ in
       };
 
       debug = mkEnableOption (lib.mdDoc "mate-session debug messages");
+
+      extraPanelApplets = mkOption {
+        default = [ ];
+        example = literalExpression "with pkgs.mate; [ mate-applets ]";
+        type = types.listOf types.package;
+        description = lib.mdDoc "Extra applets to add to mate-panel.";
+      };
+
+      extraCajaExtensions = mkOption {
+        default = [ ];
+        example = lib.literalExpression "with pkgs.mate; [ caja-extensions ]";
+        type = types.listOf types.package;
+        description = lib.mdDoc "Extra extensions to add to caja.";
+      };
+
+      enableWaylandSession = mkEnableOption (lib.mdDoc "MATE Wayland session");
     };
 
     environment.mate.excludePackages = mkOption {
@@ -31,55 +47,63 @@ in
 
   };
 
-  config = mkIf cfg.enable {
-
-    services.xserver.displayManager.sessionPackages = [
-      pkgs.mate.mate-session-manager
-    ];
-
-    # Let caja find extensions
-    environment.sessionVariables.CAJA_EXTENSION_DIRS = [ "${config.system.path}/lib/caja/extensions-2.0" ];
-
-    # Let mate-panel find applets
-    environment.sessionVariables."MATE_PANEL_APPLETS_DIR" = "${config.system.path}/share/mate-panel/applets";
-    environment.sessionVariables."MATE_PANEL_EXTRA_MODULES" = "${config.system.path}/lib/mate-panel/applets";
-
-    # Debugging
-    environment.sessionVariables.MATE_SESSION_DEBUG = mkIf cfg.debug "1";
-
-    environment.systemPackages = utils.removePackagesByName
-      (pkgs.mate.basePackages ++
-      pkgs.mate.extraPackages ++
-      [
-        pkgs.desktop-file-utils
-        pkgs.glib
-        pkgs.gtk3.out
-        pkgs.shared-mime-info
-        pkgs.xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
-        pkgs.yelp # for 'Contents' in 'Help' menus
-      ])
-      config.environment.mate.excludePackages;
-
-    programs.dconf.enable = true;
-    # Shell integration for VTE terminals
-    programs.bash.vteIntegration = mkDefault true;
-    programs.zsh.vteIntegration = mkDefault true;
-
-    # Mate uses this for printing
-    programs.system-config-printer.enable = (mkIf config.services.printing.enable (mkDefault true));
-
-    services.gnome.at-spi2-core.enable = true;
-    services.gnome.gnome-keyring.enable = true;
-    services.udev.packages = [ pkgs.mate.mate-settings-daemon ];
-    services.gvfs.enable = true;
-    services.upower.enable = config.powerManagement.enable;
-    services.xserver.libinput.enable = mkDefault true;
-
-    security.pam.services.mate-screensaver.unixAuth = true;
-
-    xdg.portal.configPackages = mkDefault [ pkgs.mate.mate-desktop ];
-
-    environment.pathsToLink = [ "/share" ];
-  };
-
+  config = mkMerge [
+    (mkIf (cfg.enable || cfg.enableWaylandSession) {
+      services.xserver.displayManager.sessionPackages = [
+        pkgs.mate.mate-session-manager
+      ];
+
+      # Debugging
+      environment.sessionVariables.MATE_SESSION_DEBUG = mkIf cfg.debug "1";
+
+      environment.systemPackages = utils.removePackagesByName
+        (pkgs.mate.basePackages ++
+        pkgs.mate.extraPackages ++
+        [
+          (pkgs.mate.caja-with-extensions.override {
+            extensions = cfg.extraCajaExtensions;
+          })
+          (pkgs.mate.mate-panel-with-applets.override {
+            applets = cfg.extraPanelApplets;
+          })
+          pkgs.desktop-file-utils
+          pkgs.glib
+          pkgs.gtk3.out
+          pkgs.shared-mime-info
+          pkgs.xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
+          pkgs.yelp # for 'Contents' in 'Help' menus
+        ])
+        config.environment.mate.excludePackages;
+
+      programs.dconf.enable = true;
+      # Shell integration for VTE terminals
+      programs.bash.vteIntegration = mkDefault true;
+      programs.zsh.vteIntegration = mkDefault true;
+
+      # Mate uses this for printing
+      programs.system-config-printer.enable = (mkIf config.services.printing.enable (mkDefault true));
+
+      services.gnome.at-spi2-core.enable = true;
+      services.gnome.gnome-keyring.enable = true;
+      services.udev.packages = [ pkgs.mate.mate-settings-daemon ];
+      services.gvfs.enable = true;
+      services.upower.enable = config.powerManagement.enable;
+      services.xserver.libinput.enable = mkDefault true;
+
+      security.pam.services.mate-screensaver.unixAuth = true;
+
+      xdg.portal.configPackages = mkDefault [ pkgs.mate.mate-desktop ];
+
+      environment.pathsToLink = [ "/share" ];
+    })
+    (mkIf cfg.enableWaylandSession {
+      programs.wayfire.enable = true;
+      programs.wayfire.plugins = [ pkgs.wayfirePlugins.firedecor ];
+
+      environment.sessionVariables.NIX_GSETTINGS_OVERRIDES_DIR = "${pkgs.mate.mate-gsettings-overrides}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas";
+
+      environment.systemPackages = [ pkgs.mate.mate-wayland-session ];
+      services.xserver.displayManager.sessionPackages = [ pkgs.mate.mate-wayland-session ];
+    })
+  ];
 }
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index c884b4487e240..f516a29fb5db3 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -336,7 +336,7 @@ in
         serif = [ "Noto Serif" ];
       };
 
-      programs.gnupg.agent.pinentryPackage = pkgs.pinentry-qt;
+      programs.gnupg.agent.pinentryPackage = mkDefault pkgs.pinentry-qt;
       programs.ssh.askPassword = mkDefault "${pkgs.plasma5Packages.ksshaskpass.out}/bin/ksshaskpass";
 
       # Enable helpful DBus services.
diff --git a/nixos/modules/services/x11/desktop-managers/xfce.nix b/nixos/modules/services/x11/desktop-managers/xfce.nix
index 6bc964f4c6ed7..3ba27b2015075 100644
--- a/nixos/modules/services/x11/desktop-managers/xfce.nix
+++ b/nixos/modules/services/x11/desktop-managers/xfce.nix
@@ -131,7 +131,7 @@ in
         xfdesktop
       ] ++ optional cfg.enableScreensaver xfce4-screensaver) excludePackages;
 
-    programs.gnupg.agent.pinentryPackage = pkgs.pinentry-gtk2;
+    programs.gnupg.agent.pinentryPackage = mkDefault pkgs.pinentry-gtk2;
     programs.xfconf.enable = true;
     programs.thunar.enable = true;
 
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index 5b7f4bc58d80c..a315a3ebf3224 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -1,19 +1,24 @@
 { config, lib, pkgs, ... }:
 
-with lib;
 let
   xcfg = config.services.xserver;
   dmcfg = xcfg.displayManager;
   cfg = dmcfg.sddm;
   xEnv = config.systemd.services.display-manager.environment;
 
-  sddm = cfg.package.override(old: {
+  sddm = cfg.package.override (old: {
     withWayland = cfg.wayland.enable;
-    extraPackages = old.extraPackages or [] ++ cfg.extraPackages;
+    extraPackages = old.extraPackages or [ ] ++ cfg.extraPackages;
   });
 
   iniFmt = pkgs.formats.ini { };
 
+  inherit (lib)
+    concatMapStrings concatStringsSep getExe
+    attrNames getAttr optionalAttrs optionalString
+    mkRemovedOptionModule mkRenamedOptionModule mkIf mkEnableOption mkOption mkPackageOption types
+    ;
+
   xserverWrapper = pkgs.writeShellScript "xserver-wrapper" ''
     ${concatMapStrings (n: "export ${n}=\"${getAttr n xEnv}\"\n") (attrNames xEnv)}
     exec systemd-cat -t xserver-wrapper ${dmcfg.xserverBin} ${toString dmcfg.xserverArgs} "$@"
@@ -38,12 +43,21 @@ let
       DefaultSession = optionalString (dmcfg.defaultSession != null) "${dmcfg.defaultSession}.desktop";
 
       DisplayServer = if cfg.wayland.enable then "wayland" else "x11";
+    } // optionalAttrs (cfg.wayland.compositor == "kwin") {
+      GreeterEnvironment = concatStringsSep " " [
+        "LANG=C.UTF-8"
+        "QT_WAYLAND_SHELL_INTEGRATION=layer-shell"
+      ];
+      InputMethod = ""; # needed if we are using --inputmethod with kwin
     };
 
     Theme = {
       Current = cfg.theme;
       ThemeDir = "/run/current-system/sw/share/sddm/themes";
       FacesDir = "/run/current-system/sw/share/sddm/faces";
+    } // optionalAttrs (cfg.theme == "breeze") {
+      CursorTheme = "breeze_cursors";
+      CursorSize = 24;
     };
 
     Users = {
@@ -69,7 +83,7 @@ let
       SessionDir = "${dmcfg.sessionData.desktops}/share/wayland-sessions";
       CompositorCommand = lib.optionalString cfg.wayland.enable cfg.wayland.compositorCommand;
     };
-  } // lib.optionalAttrs dmcfg.autoLogin.enable {
+  } // optionalAttrs dmcfg.autoLogin.enable {
     Autologin = {
       User = dmcfg.autoLogin.user;
       Session = autoLoginSessionName;
@@ -83,6 +97,34 @@ let
   autoLoginSessionName =
     "${dmcfg.sessionData.autologinSession}.desktop";
 
+  compositorCmds = {
+    kwin = concatStringsSep " " [
+      "${lib.getBin pkgs.kdePackages.kwin}/bin/kwin_wayland"
+      "--no-global-shortcuts"
+      "--no-kactivities"
+      "--no-lockscreen"
+      "--locale1"
+    ];
+    # This is basically the upstream default, but with Weston referenced by full path
+    # and the configuration generated from NixOS options.
+    weston =
+      let
+        westonIni = (pkgs.formats.ini { }).generate "weston.ini" {
+          libinput = {
+            enable-tap = xcfg.libinput.mouse.tapping;
+            left-handed = xcfg.libinput.mouse.leftHanded;
+          };
+          keyboard = {
+            keymap_model = xcfg.xkb.model;
+            keymap_layout = xcfg.xkb.layout;
+            keymap_variant = xcfg.xkb.variant;
+            keymap_options = xcfg.xkb.options;
+          };
+        };
+      in
+      "${getExe pkgs.weston} --shell=kiosk -c ${westonIni}";
+  };
+
 in
 {
   imports = [
@@ -111,7 +153,7 @@ in
         '';
       };
 
-      package = mkPackageOption pkgs [ "plasma5Packages" "sddm" ] {};
+      package = mkPackageOption pkgs [ "plasma5Packages" "sddm" ] { };
 
       enableHidpi = mkOption {
         type = types.bool;
@@ -145,7 +187,7 @@ in
 
       extraPackages = mkOption {
         type = types.listOf types.package;
-        default = [];
+        default = [ ];
         defaultText = "[]";
         description = lib.mdDoc ''
           Extra Qt plugins / QML libraries to add to the environment.
@@ -206,24 +248,16 @@ in
       wayland = {
         enable = mkEnableOption "experimental Wayland support";
 
+        compositor = mkOption {
+          description = lib.mdDoc "The compositor to use: ${lib.concatStringsSep ", " (builtins.attrNames compositorCmds)}";
+          type = types.enum (builtins.attrNames compositorCmds);
+          default = "weston";
+        };
+
         compositorCommand = mkOption {
           type = types.str;
           internal = true;
-
-          # This is basically the upstream default, but with Weston referenced by full path
-          # and the configuration generated from NixOS options.
-          default = let westonIni = (pkgs.formats.ini {}).generate "weston.ini" {
-              libinput = {
-                enable-tap = xcfg.libinput.mouse.tapping;
-                left-handed = xcfg.libinput.mouse.leftHanded;
-              };
-              keyboard = {
-                keymap_model = xcfg.xkb.model;
-                keymap_layout = xcfg.xkb.layout;
-                keymap_variant = xcfg.xkb.variant;
-                keymap_options = xcfg.xkb.options;
-              };
-            }; in "${pkgs.weston}/bin/weston --shell=kiosk -c ${westonIni}";
+          default = compositorCmds.${cfg.wayland.compositor};
           description = lib.mdDoc "Command used to start the selected compositor";
         };
       };
@@ -247,8 +281,6 @@ in
       }
     ];
 
-    services.xserver.displayManager.job.execCmd = "exec /run/current-system/sw/bin/sddm";
-
     security.pam.services = {
       sddm.text = ''
         auth      substack      login
@@ -293,30 +325,41 @@ in
       uid = config.ids.uids.sddm;
     };
 
-    environment.etc."sddm.conf".source = cfgFile;
-    environment.pathsToLink = [
-      "/share/sddm"
-    ];
+    environment = {
+      etc."sddm.conf".source = cfgFile;
+      pathsToLink = [
+        "/share/sddm"
+      ];
+      systemPackages = [ sddm ];
+    };
 
     users.groups.sddm.gid = config.ids.gids.sddm;
 
-    environment.systemPackages = [ sddm ];
-    services.dbus.packages = [ sddm ];
-    systemd.tmpfiles.packages = [ sddm ];
-
-    # We're not using the upstream unit, so copy these: https://github.com/sddm/sddm/blob/develop/services/sddm.service.in
-    systemd.services.display-manager.after = [
-      "systemd-user-sessions.service"
-      "getty@tty7.service"
-      "plymouth-quit.service"
-      "systemd-logind.service"
-    ];
-    systemd.services.display-manager.conflicts = [
-      "getty@tty7.service"
-    ];
+    services = {
+      dbus.packages = [ sddm ];
+      xserver = {
+        displayManager.job.execCmd = "exec /run/current-system/sw/bin/sddm";
+        # To enable user switching, allow sddm to allocate TTYs/displays dynamically.
+        tty = null;
+        display = null;
+      };
+    };
 
-    # To enable user switching, allow sddm to allocate TTYs/displays dynamically.
-    services.xserver.tty = null;
-    services.xserver.display = null;
+    systemd = {
+      tmpfiles.packages = [ sddm ];
+
+      # We're not using the upstream unit, so copy these: https://github.com/sddm/sddm/blob/develop/services/sddm.service.in
+      services.display-manager = {
+        after = [
+          "systemd-user-sessions.service"
+          "getty@tty7.service"
+          "plymouth-quit.service"
+          "systemd-logind.service"
+        ];
+        conflicts = [
+          "getty@tty7.service"
+        ];
+      };
+    };
   };
 }
diff --git a/nixos/modules/services/x11/window-managers/nimdow.nix b/nixos/modules/services/x11/window-managers/nimdow.nix
index de3192876024b..9cee4bb271a5a 100644
--- a/nixos/modules/services/x11/window-managers/nimdow.nix
+++ b/nixos/modules/services/x11/window-managers/nimdow.nix
@@ -8,16 +8,23 @@ in
 {
   options = {
     services.xserver.windowManager.nimdow.enable = mkEnableOption (lib.mdDoc "nimdow");
+    services.xserver.windowManager.nimdow.package = mkOption {
+      type = types.package;
+      default = pkgs.nimdow;
+      defaultText = "pkgs.nimdow";
+      description = lib.mdDoc "nimdow package to use";
+    };
   };
 
+
   config = mkIf cfg.enable {
     services.xserver.windowManager.session = singleton {
       name = "nimdow";
       start = ''
-        ${pkgs.nimdow}/bin/nimdow &
+        ${cfg.package}/bin/nimdow &
         waitPID=$!
       '';
     };
-    environment.systemPackages = [ pkgs.nimdow ];
+    environment.systemPackages = [ cfg.package pkgs.st ];
   };
 }
diff --git a/nixos/modules/services/x11/window-managers/xmonad.nix b/nixos/modules/services/x11/window-managers/xmonad.nix
index c35446bf405b8..2962f2851fa91 100644
--- a/nixos/modules/services/x11/window-managers/xmonad.nix
+++ b/nixos/modules/services/x11/window-managers/xmonad.nix
@@ -37,7 +37,7 @@ let
 
   xmonad = if (cfg.config != null) then xmonad-config else xmonad-vanilla;
 in {
-  meta.maintainers = with maintainers; [ lassulus xaverdh ivanbrennan ];
+  meta.maintainers = with maintainers; [ lassulus xaverdh ivanbrennan slotThe ];
 
   options = {
     services.xserver.windowManager.xmonad = {
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 3d7474e18263d..453f414e2a862 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -303,7 +303,7 @@ in
         type = types.listOf types.str;
         default = [ "modesetting" "fbdev" ];
         example = [
-          "nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304"
+          "nvidia"
           "amdgpu-pro"
         ];
         # TODO(@oxij): think how to easily add the rest, like those nvidia things
@@ -749,7 +749,7 @@ in
     boot.kernel.sysctl."fs.inotify.max_user_instances" = mkDefault 524288;
     boot.kernel.sysctl."fs.inotify.max_user_watches" = mkDefault 524288;
 
-    programs.gnupg.agent.pinentryPackage = lib.mkDefault pkgs.pinentry-gnome3;
+    programs.gnupg.agent.pinentryPackage = lib.mkOverride 1100 pkgs.pinentry-gnome3;
 
     systemd.defaultUnit = mkIf cfg.autorun "graphical.target";
 
diff --git a/nixos/modules/system/boot/binfmt.nix b/nixos/modules/system/boot/binfmt.nix
index 08e3dce708447..2242c9da62d08 100644
--- a/nixos/modules/system/boot/binfmt.nix
+++ b/nixos/modules/system/boot/binfmt.nix
@@ -331,6 +331,7 @@ in {
           "proc-sys-fs-binfmt_misc.mount"
           "systemd-binfmt.service"
         ];
+        services.systemd-binfmt.after = [ "systemd-tmpfiles-setup.service" ];
         services.systemd-binfmt.restartTriggers = [ (builtins.toJSON config.boot.binfmt.registrations) ];
       })
     ];
diff --git a/nixos/modules/system/boot/kernel.nix b/nixos/modules/system/boot/kernel.nix
index b0ac857feb4b8..950cff386d025 100644
--- a/nixos/modules/system/boot/kernel.nix
+++ b/nixos/modules/system/boot/kernel.nix
@@ -233,7 +233,9 @@ in
         symlinks because modprobe only supports one directory.
       '';
       # Convert the list of path to only one path.
-      apply = pkgs.aggregateModules;
+      apply = let
+        kernel-name = config.boot.kernelPackages.kernel.name or "kernel";
+      in modules: (pkgs.aggregateModules modules).override { name = kernel-name + "-modules"; };
     };
 
     system.requiredKernelConfig = mkOption {
@@ -299,6 +301,7 @@ in
             "usbhid"
             "hid_generic" "hid_lenovo" "hid_apple" "hid_roccat"
             "hid_logitech_hidpp" "hid_logitech_dj" "hid_microsoft" "hid_cherry"
+            "hid_corsair"
 
           ] ++ optionals pkgs.stdenv.hostPlatform.isx86 [
             # Misc. x86 keyboard stuff.
diff --git a/nixos/modules/system/boot/luksroot.nix b/nixos/modules/system/boot/luksroot.nix
index 86a3875e2c67c..3020734783e7a 100644
--- a/nixos/modules/system/boot/luksroot.nix
+++ b/nixos/modules/system/boot/luksroot.nix
@@ -982,8 +982,10 @@ in
         }
         { assertion = config.boot.initrd.systemd.enable -> !luks.fido2Support;
           message = ''
-            systemd stage 1 does not support configuring FIDO2 unlocking through `boot.initrd.luks.devices.<name>.fido2`.
-            Use systemd-cryptenroll(1) to configure FIDO2 support.
+            systemd stage 1 does not support configuring FIDO2 unlocking through `boot.initrd.luks.fido2Support`.
+            Use systemd-cryptenroll(1) to configure FIDO2 support, and set
+            `boot.initrd.luks.devices.''${DEVICE}.crypttabExtraOpts` as appropriate per crypttab(5)
+            (e.g. `fido2-device=auto`).
           '';
         }
         # TODO
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index b6b0f64b94c81..9b0d750d12ce2 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -729,8 +729,8 @@ let
         (assertInt "FirewallMark")
         (assertRange "FirewallMark" 1 4294967295)
         (assertInt "Priority")
-        (assertPort "SourcePort")
-        (assertPort "DestinationPort")
+        (assertPortOrPortRange "SourcePort")
+        (assertPortOrPortRange "DestinationPort")
         (assertValueOneOf "InvertRule" boolValues)
         (assertValueOneOf "Family" ["ipv4" "ipv6" "both"])
         (assertInt "SuppressPrefixLength")
diff --git a/nixos/modules/system/boot/plymouth.nix b/nixos/modules/system/boot/plymouth.nix
index 16bca40993aeb..85f0fd4622df1 100644
--- a/nixos/modules/system/boot/plymouth.nix
+++ b/nixos/modules/system/boot/plymouth.nix
@@ -4,7 +4,6 @@ with lib;
 
 let
 
-  inherit (pkgs) nixos-icons;
   plymouth = pkgs.plymouth.override {
     systemd = config.boot.initrd.systemd.package;
   };
@@ -97,8 +96,8 @@ in
       logo = mkOption {
         type = types.path;
         # Dimensions are 48x48 to match GDM logo
-        default = "${nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png";
-        defaultText = literalExpression ''"''${nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png"'';
+        default = "${pkgs.nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png";
+        defaultText = literalExpression ''"''${pkgs.nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png"'';
         example = literalExpression ''
           pkgs.fetchurl {
             url = "https://nixos.org/logo/nixos-hires.png";
@@ -107,6 +106,7 @@ in
         '';
         description = lib.mdDoc ''
           Logo which is displayed on the splash screen.
+          Currently supports PNG file format only.
         '';
       };
 
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index 90a74c0ac5788..02a3f5113cc0a 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -13,15 +13,11 @@ let
 
   kernel-name = config.boot.kernelPackages.kernel.name or "kernel";
 
-  modulesTree = config.system.modulesTree.override { name = kernel-name + "-modules"; };
-  firmware = config.hardware.firmware;
-
-
   # Determine the set of modules that we need to mount the root FS.
   modulesClosure = pkgs.makeModulesClosure {
     rootModules = config.boot.initrd.availableKernelModules ++ config.boot.initrd.kernelModules;
-    kernel = modulesTree;
-    firmware = firmware;
+    kernel = config.system.modulesTree;
+    firmware = config.hardware.firmware;
     allowMissing = false;
   };
 
@@ -688,7 +684,7 @@ in
 
   config = mkIf config.boot.initrd.enable {
     assertions = [
-      { assertion = any (fs: fs.mountPoint == "/") fileSystems;
+      { assertion = !config.boot.initrd.systemd.enable -> any (fs: fs.mountPoint == "/") fileSystems;
         message = "The ‘fileSystems’ option does not specify your root file system.";
       }
       { assertion = let inherit (config.boot) resumeDevice; in
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 49090423e078c..a8885aee78f2b 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -97,7 +97,7 @@ let
 
       # Maintaining state across reboots.
       "systemd-random-seed.service"
-      "systemd-boot-random-seed.service"
+      ] ++ (optional cfg.package.withBootloader "systemd-boot-random-seed.service") ++ [
       "systemd-backlight@.service"
       "systemd-rfkill.service"
       "systemd-rfkill.socket"
diff --git a/nixos/modules/system/boot/systemd/initrd.nix b/nixos/modules/system/boot/systemd/initrd.nix
index f83837fbc6d41..06359f273846a 100644
--- a/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixos/modules/system/boot/systemd/initrd.nix
@@ -91,13 +91,11 @@ let
   };
 
   kernel-name = config.boot.kernelPackages.kernel.name or "kernel";
-  modulesTree = config.system.modulesTree.override { name = kernel-name + "-modules"; };
-  firmware = config.hardware.firmware;
   # Determine the set of modules that we need to mount the root FS.
   modulesClosure = pkgs.makeModulesClosure {
     rootModules = config.boot.initrd.availableKernelModules ++ config.boot.initrd.kernelModules;
-    kernel = modulesTree;
-    firmware = firmware;
+    kernel = config.system.modulesTree;
+    firmware = config.hardware.firmware;
     allowMissing = false;
   };
 
@@ -212,6 +210,19 @@ in {
       default = [];
     };
 
+    root = lib.mkOption {
+      type = lib.types.enum [ "fstab" "gpt-auto" ];
+      default = "fstab";
+      example = "gpt-auto";
+      description = ''
+        Controls how systemd will interpret the root FS in initrd. See
+        {manpage}`kernel-command-line(7)`. NixOS currently does not
+        allow specifying the root file system itself this
+        way. Instead, the `fstab` value is used in order to interpret
+        the root file system specified with the `fileSystems` option.
+      '';
+    };
+
     emergencyAccess = mkOption {
       type = with types; oneOf [ bool (nullOr (passwdEntry str)) ];
       description = lib.mdDoc ''
@@ -342,7 +353,12 @@ in {
   };
 
   config = mkIf (config.boot.initrd.enable && cfg.enable) {
-    assertions = map (name: {
+    assertions = [
+      {
+        assertion = cfg.root == "fstab" -> any (fs: fs.mountPoint == "/") (builtins.attrValues config.fileSystems);
+        message = "The ‘fileSystems’ option does not specify your root file system.";
+      }
+    ] ++ map (name: {
       assertion = lib.attrByPath name (throw "impossible") config.boot.initrd == "";
       message = ''
         systemd stage 1 does not support 'boot.initrd.${lib.concatStringsSep "." name}'. Please
@@ -371,7 +387,15 @@ in {
       "autofs"
       # systemd-cryptenroll
     ] ++ lib.optional cfg.enableTpm2 "tpm-tis"
-    ++ lib.optional (cfg.enableTpm2 && !(pkgs.stdenv.hostPlatform.isRiscV64 || pkgs.stdenv.hostPlatform.isArmv7)) "tpm-crb";
+    ++ lib.optional (cfg.enableTpm2 && !(pkgs.stdenv.hostPlatform.isRiscV64 || pkgs.stdenv.hostPlatform.isArmv7)) "tpm-crb"
+    ++ lib.optional cfg.package.withEfi "efivarfs";
+
+    boot.kernelParams = [
+      "root=${config.boot.initrd.systemd.root}"
+    ] ++ lib.optional (config.boot.resumeDevice != "") "resume=${config.boot.resumeDevice}"
+      # `systemd` mounts root in initrd as read-only unless "rw" is on the kernel command line.
+      # For NixOS activation to succeed, we need to have root writable in initrd.
+      ++ lib.optional (config.boot.initrd.systemd.root == "gpt-auto") "rw";
 
     boot.initrd.systemd = {
       initrdBin = [pkgs.bash pkgs.coreutils cfg.package.kmod cfg.package];
@@ -495,7 +519,7 @@ in {
               case $o in
                   init=*)
                       IFS== read -r -a initParam <<< "$o"
-                      closure="$(dirname "''${initParam[1]}")"
+                      closure="''${initParam[1]}"
                       ;;
               esac
           done
@@ -506,6 +530,13 @@ in {
             exit 1
           fi
 
+          # Resolve symlinks in the init parameter. We need this for some boot loaders
+          # (e.g. boot.loader.generationsDir).
+          closure="$(chroot /sysroot ${pkgs.coreutils}/bin/realpath "$closure")"
+
+          # Assume the directory containing the init script is the closure.
+          closure="$(dirname "$closure")"
+
           # If we are not booting a NixOS closure (e.g. init=/bin/sh),
           # we don't know what root to prepare so we don't do anything
           if ! [ -x "/sysroot$(readlink "/sysroot$closure/prepare-root" || echo "$closure/prepare-root")" ]; then
@@ -554,7 +585,5 @@ in {
         serviceConfig.Type = "oneshot";
       };
     };
-
-    boot.kernelParams = lib.mkIf (config.boot.resumeDevice != "") [ "resume=${config.boot.resumeDevice}" ];
   };
 }
diff --git a/nixos/modules/system/boot/timesyncd.nix b/nixos/modules/system/boot/timesyncd.nix
index 2666e4cd6b284..ef17c1481abb4 100644
--- a/nixos/modules/system/boot/timesyncd.nix
+++ b/nixos/modules/system/boot/timesyncd.nix
@@ -21,6 +21,9 @@ with lib;
         type = types.listOf types.str;
         description = lib.mdDoc ''
           The set of NTP servers from which to synchronise.
+          Note if this is set to an empty list, the defaults systemd itself is
+          compiled with ({0..4}.nixos.pool.ntp.org) apply,
+          In case you want to disable timesyncd altogether, use the `enable` option.
         '';
       };
       extraConfig = mkOption {
diff --git a/nixos/modules/system/boot/uki.nix b/nixos/modules/system/boot/uki.nix
index ce00ac8e63978..c8d3c2f6605f5 100644
--- a/nixos/modules/system/boot/uki.nix
+++ b/nixos/modules/system/boot/uki.nix
@@ -7,8 +7,6 @@ let
   inherit (pkgs.stdenv.hostPlatform) efiArch;
 
   format = pkgs.formats.ini { };
-  ukifyConfig = format.generate "ukify.conf" cfg.settings;
-
 in
 
 {
@@ -48,6 +46,15 @@ in
           contains and how it is built.
         '';
       };
+
+      configFile = lib.mkOption {
+        type = lib.types.path;
+        description = lib.mdDoc ''
+          The configuration file passed to {manpage}`ukify(1)` to create the UKI.
+
+          By default this configuration file is created from {option}`boot.uki.settings`.
+        '';
+      };
     };
 
     system.boot.loader.ukiFile = lib.mkOption {
@@ -75,9 +82,13 @@ in
         OSRelease = lib.mkOptionDefault "@${config.system.build.etc}/etc/os-release";
         # This is needed for cross compiling.
         EFIArch = lib.mkOptionDefault efiArch;
+      } // lib.optionalAttrs (config.hardware.deviceTree.enable && config.hardware.deviceTree.name != null) {
+        DeviceTree = lib.mkOptionDefault "${config.hardware.deviceTree.package}/${config.hardware.deviceTree.name}";
       };
     };
 
+    boot.uki.configFile = lib.mkOptionDefault (format.generate "ukify.conf" cfg.settings);
+
     system.boot.loader.ukiFile =
       let
         name = config.boot.uki.name;
@@ -90,7 +101,7 @@ in
     system.build.uki = pkgs.runCommand config.system.boot.loader.ukiFile { } ''
       mkdir -p $out
       ${pkgs.buildPackages.systemdUkify}/lib/systemd/ukify build \
-        --config=${ukifyConfig} \
+        --config=${cfg.configFile} \
         --output="$out/${config.system.boot.loader.ukiFile}"
     '';
 
diff --git a/nixos/modules/tasks/filesystems/envfs.nix b/nixos/modules/tasks/filesystems/envfs.nix
index 365cb46ff2fe3..6719a03610d10 100644
--- a/nixos/modules/tasks/filesystems/envfs.nix
+++ b/nixos/modules/tasks/filesystems/envfs.nix
@@ -7,6 +7,7 @@ let
       device = "none";
       fsType = "envfs";
       options = [
+        "bind-mount=/bin"
         "fallback-path=${pkgs.runCommand "fallback-path" {} (''
           mkdir -p $out
           ln -s ${config.environment.usrbinenv} $out/env
@@ -15,6 +16,9 @@ let
         "nofail"
       ];
     };
+    # We need to bind-mount /bin to /usr/bin, because otherwise upgrading
+    # from envfs < 1.0.5 will cause having the old envs with no /bin bind mount.
+    # Systemd is smart enough to not mount /bin if it's already mounted.
     "/bin" = {
       device = "/usr/bin";
       fsType = "none";
diff --git a/nixos/modules/testing/test-instrumentation.nix b/nixos/modules/testing/test-instrumentation.nix
index 6aa718c1975d7..50a54a006415d 100644
--- a/nixos/modules/testing/test-instrumentation.nix
+++ b/nixos/modules/testing/test-instrumentation.nix
@@ -42,8 +42,10 @@ let
         # Otherwise we get errors on the terminal because bash tries to
         # setup things like job control.
         # Note: calling bash explicitly here instead of sh makes sure that
-        # we can also run non-NixOS guests during tests.
-        PS1= exec /usr/bin/env bash --norc /dev/hvc0
+        # we can also run non-NixOS guests during tests. This, however, is
+        # mostly futureproofing as the test instrumentation is still very
+        # tightly coupled to NixOS.
+        PS1= exec ${pkgs.coreutils}/bin/env bash --norc /dev/hvc0
       '';
       serviceConfig.KillSignal = "SIGHUP";
   };
@@ -121,7 +123,9 @@ in
           }
         ];
 
-        contents."/usr/bin/env".source = "${pkgs.coreutils}/bin/env";
+        storePaths = [
+          "${pkgs.coreutils}/bin/env"
+        ];
       })
     ];
 
diff --git a/nixos/modules/virtualisation/amazon-ec2-amis.nix b/nixos/modules/virtualisation/amazon-ec2-amis.nix
index ff88f02e5d334..97636f4dcb2a5 100644
--- a/nixos/modules/virtualisation/amazon-ec2-amis.nix
+++ b/nixos/modules/virtualisation/amazon-ec2-amis.nix
@@ -1,3 +1,6 @@
+# NOTE: this file will stop being updated.
+# please use https://nixos.github.io/amis/  and https://nixos.github.io/amis/images.json
+# instead.
 let self = {
   "14.04".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-71c6f470";
   "14.04".ap-northeast-1.x86_64-linux.pv-ebs = "ami-4dcbf84c";
@@ -584,5 +587,68 @@ let self = {
   "23.05".us-west-1.aarch64-linux.hvm-ebs = "ami-0e75c8f3deb1f842b";
   "23.05".us-west-2.aarch64-linux.hvm-ebs = "ami-0d0979d889078d036";
 
-  latest = self."23.05";
+
+  # 23.11.4976.79baff8812a0
+
+  "23.11".eu-north-1.x86_64-linux.hvm-ebs = "ami-00e64007071666b9e";
+  "23.11".ap-south-2.x86_64-linux.hvm-ebs = "ami-0bbb8e5663f912455";
+  "23.11".ap-south-1.x86_64-linux.hvm-ebs = "ami-057ec482a7bc9cb87";
+  "23.11".eu-south-1.x86_64-linux.hvm-ebs = "ami-083f9740e86ab36b9";
+  "23.11".eu-south-2.x86_64-linux.hvm-ebs = "ami-0686a9ef630e6eeef";
+  "23.11".me-central-1.x86_64-linux.hvm-ebs = "ami-0475d5925ff0390f8";
+  "23.11".il-central-1.x86_64-linux.hvm-ebs = "ami-0997e21a353de1226";
+  "23.11".ca-central-1.x86_64-linux.hvm-ebs = "ami-0f9fc310496ea54ec";
+  "23.11".eu-central-1.x86_64-linux.hvm-ebs = "ami-0923e79b6f9b198fa";
+  "23.11".eu-central-2.x86_64-linux.hvm-ebs = "ami-0b02e6421cde609ff";
+  "23.11".us-west-1.x86_64-linux.hvm-ebs = "ami-0e94c086e49480566";
+  "23.11".us-west-2.x86_64-linux.hvm-ebs = "ami-0a2f8942a90eb233a";
+  "23.11".af-south-1.x86_64-linux.hvm-ebs = "ami-0c9ff564e4a503c56";
+  "23.11".eu-west-3.x86_64-linux.hvm-ebs = "ami-0955d4d89c446d5ff";
+  "23.11".eu-west-2.x86_64-linux.hvm-ebs = "ami-0c5834d32e6dce6c8";
+  "23.11".eu-west-1.x86_64-linux.hvm-ebs = "ami-0e7d1823ac80520e6";
+  "23.11".ap-northeast-3.x86_64-linux.hvm-ebs = "ami-06b3692ef87ef308a";
+  "23.11".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0fb1e23007bdc6083";
+  "23.11".me-south-1.x86_64-linux.hvm-ebs = "ami-0a4beeb2fc744c149";
+  "23.11".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-0f60ab2288ac784c3";
+  "23.11".sa-east-1.x86_64-linux.hvm-ebs = "ami-058779e1d5c1cc6ae";
+  "23.11".ap-east-1.x86_64-linux.hvm-ebs = "ami-039879f1b367e167f";
+  "23.11".ca-west-1.x86_64-linux.hvm-ebs = "ami-07dc9bc5645a981f1";
+  "23.11".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0ee92af1fc4c4e5f3";
+  "23.11".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-0814092ef52275887";
+  "23.11".ap-southeast-3.x86_64-linux.hvm-ebs = "ami-0d20b4b6529b6214c";
+  "23.11".ap-southeast-4.x86_64-linux.hvm-ebs = "ami-0aa7b8aa04e5ec741";
+  "23.11".us-east-1.x86_64-linux.hvm-ebs = "ami-0c2e37dc938ed9405";
+  "23.11".us-east-2.x86_64-linux.hvm-ebs = "ami-05c218c4a08c58296";
+
+  "23.11".eu-north-1.aarch64-linux.hvm-ebs = "ami-05e68ac90786e5ac0";
+  "23.11".ap-south-2.aarch64-linux.hvm-ebs = "ami-0a53356d3176a82af";
+  "23.11".ap-south-1.aarch64-linux.hvm-ebs = "ami-0cc335be202b53954";
+  "23.11".eu-south-1.aarch64-linux.hvm-ebs = "ami-05d387849385390c0";
+  "23.11".eu-south-2.aarch64-linux.hvm-ebs = "ami-0193deb9aa4b398bb";
+  "23.11".me-central-1.aarch64-linux.hvm-ebs = "ami-05bd96550451e131d";
+  "23.11".il-central-1.aarch64-linux.hvm-ebs = "ami-0264204c502a16f49";
+  "23.11".ca-central-1.aarch64-linux.hvm-ebs = "ami-08345537f80791b3c";
+  "23.11".eu-central-1.aarch64-linux.hvm-ebs = "ami-03b257130b2b01000";
+  "23.11".eu-central-2.aarch64-linux.hvm-ebs = "ami-0b4412d8a4d3fb7ae";
+  "23.11".us-west-1.aarch64-linux.hvm-ebs = "ami-0a2d70cd7a3b03e24";
+  "23.11".us-west-2.aarch64-linux.hvm-ebs = "ami-01b8e062c74311de0";
+  "23.11".af-south-1.aarch64-linux.hvm-ebs = "ami-011c296fee301596b";
+  "23.11".eu-west-3.aarch64-linux.hvm-ebs = "ami-024531ee8546d7ec7";
+  "23.11".eu-west-2.aarch64-linux.hvm-ebs = "ami-0a5dda5cf7e00b39b";
+  "23.11".eu-west-1.aarch64-linux.hvm-ebs = "ami-013f7a51b602dec7d";
+  "23.11".ap-northeast-3.aarch64-linux.hvm-ebs = "ami-0220fc0e06979c6ff";
+  "23.11".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-0978048539bbad573";
+  "23.11".me-south-1.aarch64-linux.hvm-ebs = "ami-02057e1a9c901b506";
+  "23.11".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-05616e07e227f9982";
+  "23.11".sa-east-1.aarch64-linux.hvm-ebs = "ami-0bf5255283cec803b";
+  "23.11".ap-east-1.aarch64-linux.hvm-ebs = "ami-00432470e52956e49";
+  "23.11".ca-west-1.aarch64-linux.hvm-ebs = "ami-0675649785473e1ac";
+  "23.11".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-006494f0fa381ba53";
+  "23.11".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-0d8c15d68266d6881";
+  "23.11".ap-southeast-3.aarch64-linux.hvm-ebs = "ami-0db38da4ed78d2a10";
+  "23.11".ap-southeast-4.aarch64-linux.hvm-ebs = "ami-098b3b942486b5e71";
+  "23.11".us-east-1.aarch64-linux.hvm-ebs = "ami-02d1d66bda50b9036";
+  "23.11".us-east-2.aarch64-linux.hvm-ebs = "ami-0aa62457617706386";
+
+  latest = self."23.11";
 }; in self
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index c7fe1bed51592..77730178422c3 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -79,6 +79,10 @@ in
       serviceConfig.StandardOutput = "journal+console";
     };
 
+    # Amazon-issued AMIs include the SSM Agent by default, so we do the same.
+    # https://docs.aws.amazon.com/systems-manager/latest/userguide/ami-preinstalled-agent.html
+    services.amazon-ssm-agent.enable = true;
+
     # Allow root logins only using the SSH key that the user specified
     # at instance creation time.
     services.openssh.enable = true;
diff --git a/nixos/modules/virtualisation/incus.nix b/nixos/modules/virtualisation/incus.nix
index a561c5682ae58..1ceaa40cca9dc 100644
--- a/nixos/modules/virtualisation/incus.nix
+++ b/nixos/modules/virtualisation/incus.nix
@@ -1,8 +1,80 @@
-{ config, lib, pkgs, ... }:
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
 
 let
   cfg = config.virtualisation.incus;
   preseedFormat = pkgs.formats.yaml { };
+
+  serverBinPath = ''${pkgs.qemu_kvm}/libexec:${
+    lib.makeBinPath (
+      with pkgs;
+      [
+        cfg.package
+
+        acl
+        attr
+        bash
+        btrfs-progs
+        cdrkit
+        coreutils
+        criu
+        dnsmasq
+        e2fsprogs
+        findutils
+        getent
+        gnugrep
+        gnused
+        gnutar
+        gptfdisk
+        gzip
+        iproute2
+        iptables
+        kmod
+        lvm2
+        minio
+        nftables
+        qemu_kvm
+        qemu-utils
+        rsync
+        squashfsTools
+        systemd
+        thin-provisioning-tools
+        util-linux
+        virtiofsd
+        xz
+
+        (writeShellScriptBin "apparmor_parser" ''
+          exec '${apparmor-parser}/bin/apparmor_parser' -I '${apparmor-profiles}/etc/apparmor.d' "$@"
+        '')
+      ]
+      ++ lib.optionals config.boot.zfs.enabled [
+        config.boot.zfs.package
+        "${config.boot.zfs.package}/lib/udev"
+      ]
+      ++ lib.optionals config.virtualisation.vswitch.enable [ config.virtualisation.vswitch.package ]
+    )
+  }'';
+
+  # https://github.com/lxc/incus/blob/cff35a29ee3d7a2af1f937cbb6cf23776941854b/internal/server/instance/drivers/driver_qemu.go#L123
+  ovmf-prefix = if pkgs.stdenv.hostPlatform.isAarch64 then "AAVMF" else "OVMF";
+  ovmf = pkgs.linkFarm "incus-ovmf" [
+    {
+      name = "OVMF_CODE.4MB.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_CODE.fd";
+    }
+    {
+      name = "OVMF_VARS.4MB.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_VARS.fd";
+    }
+    {
+      name = "OVMF_VARS.4MB.ms.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_VARS.fd";
+    }
+  ];
 in
 {
   meta = {
@@ -11,26 +83,29 @@ in
 
   options = {
     virtualisation.incus = {
-      enable = lib.mkEnableOption (lib.mdDoc ''
+      enable = lib.mkEnableOption ''
         incusd, a daemon that manages containers and virtual machines.
 
         Users in the "incus-admin" group can interact with
         the daemon (e.g. to start or stop containers) using the
         {command}`incus` command line tool, among others.
-      '');
+      '';
 
       package = lib.mkPackageOption pkgs "incus" { };
 
       lxcPackage = lib.mkPackageOption pkgs "lxc" { };
 
+      clientPackage = lib.mkPackageOption pkgs [
+        "incus"
+        "client"
+      ] { };
+
       preseed = lib.mkOption {
-        type = lib.types.nullOr (
-          lib.types.submodule { freeformType = preseedFormat.type; }
-        );
+        type = lib.types.nullOr (lib.types.submodule { freeformType = preseedFormat.type; });
 
         default = null;
 
-        description = lib.mdDoc ''
+        description = ''
           Configuration for Incus preseed, see
           <https://linuxcontainers.org/incus/docs/main/howto/initialize/#non-interactive-configuration>
           for supported values.
@@ -80,18 +155,16 @@ in
         };
       };
 
-      socketActivation = lib.mkEnableOption (
-        lib.mdDoc ''
-          socket-activation for starting incus.service. Enabling this option
-          will stop incus.service from starting automatically on boot.
-        ''
-      );
+      socketActivation = lib.mkEnableOption (''
+        socket-activation for starting incus.service. Enabling this option
+        will stop incus.service from starting automatically on boot.
+      '');
 
       startTimeout = lib.mkOption {
         type = lib.types.ints.unsigned;
         default = 600;
         apply = toString;
-        description = lib.mdDoc ''
+        description = ''
           Time to wait (in seconds) for incusd to become ready to process requests.
           If incusd does not reply within the configured time, `incus.service` will be
           considered failed and systemd will attempt to restart it.
@@ -99,9 +172,12 @@ in
       };
 
       ui = {
-        enable = lib.mkEnableOption (lib.mdDoc "(experimental) Incus UI");
+        enable = lib.mkEnableOption "(experimental) Incus UI";
 
-        package = lib.mkPackageOption pkgs [ "incus" "ui" ] { };
+        package = lib.mkPackageOption pkgs [
+          "incus"
+          "ui"
+        ] { };
       };
     };
   };
@@ -109,7 +185,12 @@ in
   config = lib.mkIf cfg.enable {
     assertions = [
       {
-        assertion = !(config.networking.firewall.enable && !config.networking.nftables.enable && config.virtualisation.incus.enable);
+        assertion =
+          !(
+            config.networking.firewall.enable
+            && !config.networking.nftables.enable
+            && config.virtualisation.incus.enable
+          );
         message = "Incus on NixOS is unsupported using iptables. Set `networking.nftables.enable = true;`";
       }
     ];
@@ -137,7 +218,12 @@ in
       "vhost_vsock"
     ] ++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
 
-    environment.systemPackages = [ cfg.package ];
+    environment.systemPackages = [
+      cfg.clientPackage
+
+      # gui console support
+      pkgs.spice-gtk
+    ];
 
     # Note: the following options are also declared in virtualisation.lxc, but
     # the latter can't be simply enabled to reuse the formers, because it
@@ -164,26 +250,24 @@ in
         "network-online.target"
         "lxcfs.service"
         "incus.socket"
-      ];
+      ] ++ lib.optionals config.virtualisation.vswitch.enable [ "ovs-vswitchd.service" ];
+
       requires = [
         "lxcfs.service"
         "incus.socket"
-      ];
-      wants = [
-        "network-online.target"
-      ];
+      ] ++ lib.optionals config.virtualisation.vswitch.enable [ "ovs-vswitchd.service" ];
 
-      path = lib.mkIf config.boot.zfs.enabled [
-        config.boot.zfs.package
-        "${config.boot.zfs.package}/lib/udev"
-      ];
+      wants = [ "network-online.target" ];
 
-      environment = lib.mkMerge [ {
-        # Override Path to the LXC template configuration directory
-        INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
-      } (lib.mkIf (cfg.ui.enable) {
-        "INCUS_UI" = cfg.ui.package;
-      }) ];
+      environment = lib.mkMerge [
+        {
+          INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
+          INCUS_OVMF_PATH = ovmf;
+          INCUS_USBIDS_PATH = "${pkgs.hwdata}/share/hwdata/usb.ids";
+          PATH = lib.mkForce serverBinPath;
+        }
+        (lib.mkIf (cfg.ui.enable) { "INCUS_UI" = cfg.ui.package; })
+      ];
 
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/incusd --group incus-admin";
@@ -217,15 +301,13 @@ in
     systemd.services.incus-preseed = lib.mkIf (cfg.preseed != null) {
       description = "Incus initialization with preseed file";
 
-      wantedBy = ["incus.service"];
-      after = ["incus.service"];
-      bindsTo = ["incus.service"];
-      partOf = ["incus.service"];
+      wantedBy = [ "incus.service" ];
+      after = [ "incus.service" ];
+      bindsTo = [ "incus.service" ];
+      partOf = [ "incus.service" ];
 
       script = ''
-        ${cfg.package}/bin/incus admin init --preseed <${
-          preseedFormat.generate "incus-preseed.yaml" cfg.preseed
-        }
+        ${cfg.package}/bin/incus admin init --preseed <${preseedFormat.generate "incus-preseed.yaml" cfg.preseed}
       '';
 
       serviceConfig = {
diff --git a/nixos/modules/virtualisation/nixos-containers.nix b/nixos/modules/virtualisation/nixos-containers.nix
index d4fa707b2dd5f..5db3a336f85d5 100644
--- a/nixos/modules/virtualisation/nixos-containers.nix
+++ b/nixos/modules/virtualisation/nixos-containers.nix
@@ -509,6 +509,12 @@ in
                                 for details).
                               '';
                             }
+                            {
+                              assertion = !lib.strings.hasInfix "_" name;
+                              message = ''
+                                Names containing underscores are not allowed in nixos-containers. Please rename the container '${name}'
+                              '';
+                            }
                           ];
                         };
                       };
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index 459700514ffc5..96b24feeb0631 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -169,11 +169,6 @@ in rec {
         (onFullSupported "nixpkgs.jdk")
         (onSystems ["x86_64-linux"] "nixpkgs.mesa_i686") # i686 sanity check + useful
         ["nixpkgs.tarball"]
-
-        # Ensure that nixpkgs-check-by-name is available in nixos-unstable,
-        # so that a pre-built version can be used in CI for PR's
-        # See ../pkgs/test/nixpkgs-check-by-name/README.md
-        (onSystems ["x86_64-linux"] "nixpkgs.tests.nixpkgs-check-by-name")
       ];
     };
 }
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 7376cd40b910a..9cff268ae1d15 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -290,8 +290,8 @@ in {
   activation-etc-overlay-mutable = runTest ./activation/etc-overlay-mutable.nix;
   activation-etc-overlay-immutable = runTest ./activation/etc-overlay-immutable.nix;
   activation-perlless = runTest ./activation/perlless.nix;
-  etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
-  etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
+  etcd = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./etcd/etcd.nix {};
+  etcd-cluster = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./etcd/etcd-cluster.nix {};
   etebase-server = handleTest ./etebase-server.nix {};
   etesync-dav = handleTest ./etesync-dav.nix {};
   evcc = handleTest ./evcc.nix {};
@@ -309,6 +309,7 @@ in {
   firefox-devedition = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-devedition; };
   firefox-esr    = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr; }; # used in `tested` job
   firefox-esr-115 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-115; };
+  firefoxpwa = handleTest ./firefoxpwa.nix {};
   firejail = handleTest ./firejail.nix {};
   firewall = handleTest ./firewall.nix { nftables = false; };
   firewall-nftables = handleTest ./firewall.nix { nftables = true; };
@@ -464,7 +465,7 @@ in {
   keymap = handleTest ./keymap.nix {};
   knot = handleTest ./knot.nix {};
   komga = handleTest ./komga.nix {};
-  krb5 = discoverTests (import ./krb5 {});
+  krb5 = discoverTests (import ./krb5);
   ksm = handleTest ./ksm.nix {};
   kthxbye = handleTest ./kthxbye.nix {};
   kubernetes = handleTestOn ["x86_64-linux"] ./kubernetes {};
@@ -513,6 +514,7 @@ in {
   mastodon = discoverTests (import ./web-apps/mastodon { inherit handleTestOn; });
   pixelfed = discoverTests (import ./web-apps/pixelfed { inherit handleTestOn; });
   mate = handleTest ./mate.nix {};
+  mate-wayland = handleTest ./mate-wayland.nix {};
   matter-server = handleTest ./matter-server.nix {};
   matomo = handleTest ./matomo.nix {};
   matrix-appservice-irc = handleTest ./matrix/appservice-irc.nix {};
@@ -528,6 +530,7 @@ in {
   memcached = handleTest ./memcached.nix {};
   merecat = handleTest ./merecat.nix {};
   metabase = handleTest ./metabase.nix {};
+  mihomo = handleTest ./mihomo.nix {};
   mindustry = handleTest ./mindustry.nix {};
   minecraft = handleTest ./minecraft.nix {};
   minecraft-server = handleTest ./minecraft-server.nix {};
@@ -541,6 +544,7 @@ in {
   mod_perl = handleTest ./mod_perl.nix {};
   molly-brown = handleTest ./molly-brown.nix {};
   monado = handleTest ./monado.nix {};
+  monetdb = handleTest ./monetdb.nix {};
   monica = handleTest ./web-apps/monica.nix {};
   mongodb = handleTest ./mongodb.nix {};
   moodle = handleTest ./moodle.nix {};
@@ -560,6 +564,7 @@ in {
   munin = handleTest ./munin.nix {};
   mutableUsers = handleTest ./mutable-users.nix {};
   mxisd = handleTest ./mxisd.nix {};
+  mycelium = handleTest ./mycelium {};
   mympd = handleTest ./mympd.nix {};
   mysql = handleTest ./mysql/mysql.nix {};
   mysql-autobackup = handleTest ./mysql/mysql-autobackup.nix {};
@@ -579,6 +584,7 @@ in {
   ndppd = handleTest ./ndppd.nix {};
   nebula = handleTest ./nebula.nix {};
   netbird = handleTest ./netbird.nix {};
+  nimdow = handleTest ./nimdow.nix {};
   neo4j = handleTest ./neo4j.nix {};
   netdata = handleTest ./netdata.nix {};
   networking.networkd = handleTest ./networking.nix { networkd = true; };
@@ -613,6 +619,7 @@ in {
   nginx-variants = handleTest ./nginx-variants.nix {};
   nifi = handleTestOn ["x86_64-linux"] ./web-apps/nifi.nix {};
   nitter = handleTest ./nitter.nix {};
+  nix-config = handleTest ./nix-config.nix {};
   nix-ld = handleTest ./nix-ld.nix {};
   nix-serve = handleTest ./nix-serve.nix {};
   nix-serve-ssh = handleTest ./nix-serve-ssh.nix {};
@@ -640,6 +647,7 @@ in {
   nzbget = handleTest ./nzbget.nix {};
   nzbhydra2 = handleTest ./nzbhydra2.nix {};
   oh-my-zsh = handleTest ./oh-my-zsh.nix {};
+  ollama = handleTest ./ollama.nix {};
   ombi = handleTest ./ombi.nix {};
   openarena = handleTest ./openarena.nix {};
   openldap = handleTest ./openldap.nix {};
@@ -682,11 +690,14 @@ in {
   peering-manager = handleTest ./web-apps/peering-manager.nix {};
   peertube = handleTestOn ["x86_64-linux"] ./web-apps/peertube.nix {};
   peroxide = handleTest ./peroxide.nix {};
+  pg_anonymizer = handleTest ./pg_anonymizer.nix {};
   pgadmin4 = handleTest ./pgadmin4.nix {};
   pgbouncer = handleTest ./pgbouncer.nix {};
   pgjwt = handleTest ./pgjwt.nix {};
   pgmanage = handleTest ./pgmanage.nix {};
+  pgvecto-rs = handleTest ./pgvecto-rs.nix {};
   phosh = handleTest ./phosh.nix {};
+  photonvision = handleTest ./photonvision.nix {};
   photoprism = handleTest ./photoprism.nix {};
   php = handleTest ./php {};
   php81 = handleTest ./php { php = pkgs.php81; };
@@ -725,6 +736,7 @@ in {
   pppd = handleTest ./pppd.nix {};
   predictable-interface-names = handleTest ./predictable-interface-names.nix {};
   pretalx = runTest ./web-apps/pretalx.nix;
+  pretix = runTest ./web-apps/pretix.nix;
   printing-socket = handleTest ./printing.nix { socket = true; };
   printing-service = handleTest ./printing.nix { socket = false; };
   privoxy = handleTest ./privoxy.nix {};
@@ -779,6 +791,7 @@ in {
   sanoid = handleTest ./sanoid.nix {};
   scaphandre = handleTest ./scaphandre.nix {};
   schleuder = handleTest ./schleuder.nix {};
+  scion-freestanding-deployment = handleTest ./scion/freestanding-deployment {};
   scrutiny = handleTest ./scrutiny.nix {};
   sddm = handleTest ./sddm.nix {};
   seafile = handleTest ./seafile.nix {};
@@ -890,6 +903,7 @@ in {
   systemd-sysusers-immutable = runTest ./systemd-sysusers-immutable.nix;
   systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
   systemd-timesyncd-nscd-dnssec = handleTest ./systemd-timesyncd-nscd-dnssec.nix {};
+  systemd-user-linger = handleTest ./systemd-user-linger.nix {};
   systemd-user-tmpfiles-rules = handleTest ./systemd-user-tmpfiles-rules.nix {};
   systemd-misc = handleTest ./systemd-misc.nix {};
   systemd-userdbd = handleTest ./systemd-userdbd.nix {};
@@ -950,6 +964,7 @@ in {
   user-activation-scripts = handleTest ./user-activation-scripts.nix {};
   user-expiry = runTest ./user-expiry.nix;
   user-home-mode = handleTest ./user-home-mode.nix {};
+  ustreamer = handleTest ./ustreamer.nix {};
   uwsgi = handleTest ./uwsgi.nix {};
   v2ray = handleTest ./v2ray.nix {};
   varnish60 = handleTest ./varnish.nix { package = pkgs.varnish60; };
diff --git a/nixos/tests/armagetronad.nix b/nixos/tests/armagetronad.nix
index ff2841dedd218..d59827354b771 100644
--- a/nixos/tests/armagetronad.nix
+++ b/nixos/tests/armagetronad.nix
@@ -1,4 +1,9 @@
-import ./make-test-python.nix ({ pkgs, ...} :
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
 
 let
   user = "alice";
@@ -16,7 +21,8 @@ let
       test-support.displayManager.auto.user = user;
     };
 
-in {
+in
+makeTest {
   name = "armagetronad";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ numinit ];
@@ -269,4 +275,4 @@ in {
         srv.node.wait_until_fails(f"ss --numeric --udp --listening | grep -q {srv.port}")
     '';
 
-})
+}
diff --git a/nixos/tests/budgie.nix b/nixos/tests/budgie.nix
index fe0ed2cf80edf..5228e869b056a 100644
--- a/nixos/tests/budgie.nix
+++ b/nixos/tests/budgie.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "budgie";
 
-  meta.maintainers = [ lib.maintainers.federicoschonborn ];
+  meta.maintainers = lib.teams.budgie.members;
 
   nodes.machine = { ... }: {
     imports = [
@@ -29,6 +29,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
   testScript = { nodes, ... }:
     let
       user = nodes.machine.users.users.alice;
+      env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0";
+      su = command: "su - ${user.name} -c '${env} ${command}'";
     in
     ''
       with subtest("Wait for login"):
@@ -47,21 +49,46 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
 
       with subtest("Check if Budgie session components actually start"):
-          machine.wait_until_succeeds("pgrep budgie-daemon")
+          for i in ["budgie-daemon", "budgie-panel", "budgie-wm", "budgie-desktop-view", "gsd-media-keys"]:
+              machine.wait_until_succeeds(f"pgrep -f {i}")
+          # We don't check xwininfo for budgie-wm.
+          # See https://github.com/NixOS/nixpkgs/pull/216737#discussion_r1155312754
           machine.wait_for_window("budgie-daemon")
-          machine.wait_until_succeeds("pgrep budgie-panel")
           machine.wait_for_window("budgie-panel")
-          # We don't check xwininfo for this one.
-          # See https://github.com/NixOS/nixpkgs/pull/216737#discussion_r1155312754
-          machine.wait_until_succeeds("pgrep budgie-wm")
 
-      with subtest("Open MATE terminal"):
-          machine.succeed("su - ${user.name} -c 'DISPLAY=:0 mate-terminal >&2 &'")
-          machine.wait_for_window("Terminal")
+      with subtest("Check if various environment variables are set"):
+          cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/budgie-wm)/environ"
+          machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Budgie:GNOME'")
+          machine.succeed(f"{cmd} | grep 'BUDGIE_PLUGIN_DATADIR' | grep '${pkgs.budgie.budgie-desktop-with-plugins.pname}'")
+
+      with subtest("Open run dialog"):
+          machine.send_key("alt-f2")
+          machine.wait_for_window("budgie-run-dialog")
+          machine.sleep(2)
+          machine.screenshot("run_dialog")
+          machine.send_key("esc")
+
+      with subtest("Open Budgie Control Center"):
+          machine.succeed("${su "budgie-control-center >&2 &"}")
+          machine.wait_for_window("Budgie Control Center")
+
+      with subtest("Lock the screen"):
+          machine.succeed("${su "budgie-screensaver-command -l >&2 &"}")
+          machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is active'")
+          machine.sleep(2)
+          machine.send_chars("${user.password}", delay=0.5)
+          machine.screenshot("budgie_screensaver")
+          machine.send_chars("\n")
+          machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is inactive'")
+          machine.sleep(2)
+
+      with subtest("Open GNOME terminal"):
+          machine.succeed("${su "gnome-terminal"}")
+          machine.wait_for_window("${user.name}@machine: ~")
 
-      with subtest("Check if budgie-wm has ever coredumped"):
-          machine.fail("coredumpctl --json=short | grep budgie-wm")
-          machine.sleep(20)
+      with subtest("Check if Budgie has ever coredumped"):
+          machine.fail("coredumpctl --json=short | grep budgie")
+          machine.sleep(10)
           machine.screenshot("screen")
     '';
 })
diff --git a/nixos/tests/drawterm.nix b/nixos/tests/drawterm.nix
index 1d444bb55433b..3594343853c01 100644
--- a/nixos/tests/drawterm.nix
+++ b/nixos/tests/drawterm.nix
@@ -38,11 +38,24 @@ let
         def drawterm_running():
             machine.succeed("pgrep drawterm")
 
+        # cage is a bit wonky here.
+        # it seems to lag behind drawing
+        # and somehow needs a single input character
+        # in order to get the first prompt to show up.
+        # This is not present in any other compositor
+        # as far as I know, and after spending a couple
+        # hours with the upstream source trying to deduce
+        # how to perhaps fix it, I figured just polling is OK.
+        @polling_condition
+        def cpu_shown_up():
+            machine.send_chars(".")
+            machine.wait_for_text("cpu", 1)
+
         start_all()
 
         machine.wait_for_unit("graphical.target")
         drawterm_running.wait() # type: ignore[union-attr]
-        machine.wait_for_text("cpu")
+        cpu_shown_up.wait() # type: ignore[union-attr]
         machine.send_chars("cpu\n")
         machine.wait_for_text("auth")
         machine.send_chars("cpu\n")
diff --git a/nixos/tests/etcd-cluster.nix b/nixos/tests/etcd/etcd-cluster.nix
index c77c0dd73c252..734d56dbc2233 100644
--- a/nixos/tests/etcd-cluster.nix
+++ b/nixos/tests/etcd/etcd-cluster.nix
@@ -1,6 +1,6 @@
 # This test runs simple etcd cluster
 
-import ./make-test-python.nix ({ pkgs, ... } : let
+import ../make-test-python.nix ({ pkgs, ... } : let
 
   runWithOpenSSL = file: cmd: pkgs.runCommand file {
     buildInputs = [ pkgs.openssl ];
diff --git a/nixos/tests/etcd.nix b/nixos/tests/etcd/etcd.nix
index 79857778ae1b5..a32d0f9a55d17 100644
--- a/nixos/tests/etcd.nix
+++ b/nixos/tests/etcd/etcd.nix
@@ -1,6 +1,6 @@
 # This test runs simple etcd node
 
-import ./make-test-python.nix ({ pkgs, ... } : {
+import ../make-test-python.nix ({ pkgs, ... } : {
   name = "etcd";
 
   meta = with pkgs.lib.maintainers; {
diff --git a/nixos/tests/firefoxpwa.nix b/nixos/tests/firefoxpwa.nix
new file mode 100644
index 0000000000000..374d67b01ac60
--- /dev/null
+++ b/nixos/tests/firefoxpwa.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "firefoxpwa";
+  meta.maintainers = with lib.maintainers; [ camillemndn ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+      environment.systemPackages = with pkgs; [ firefoxpwa jq ];
+
+      programs.firefox = {
+        enable = true;
+        nativeMessagingHosts.packages = [ pkgs.firefoxpwa ];
+      };
+
+      services.jellyfin.enable = true;
+    };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.start()
+
+    with subtest("Install a progressive web app"):
+        machine.wait_for_unit("jellyfin.service")
+        machine.wait_for_open_port(8096)
+        machine.succeed("firefoxpwa site install http://localhost:8096/web/manifest.json >&2")
+
+    with subtest("Launch the progressive web app"):
+        machine.succeed("firefoxpwa site launch $(jq -r < ~/.local/share/firefoxpwa/config.json '.sites | keys[0]') >&2")
+        machine.wait_for_window("Jellyfin")
+        machine.wait_for_text("Jellyfin")
+  '';
+})
diff --git a/nixos/tests/freetube.nix b/nixos/tests/freetube.nix
index faa5349382270..10f0773cb884c 100644
--- a/nixos/tests/freetube.nix
+++ b/nixos/tests/freetube.nix
@@ -40,4 +40,4 @@ let
       '';
     });
 in
-builtins.mapAttrs (k: v: mkTest k v { }) tests
+builtins.mapAttrs (k: v: mkTest k v) tests
diff --git a/nixos/tests/goss.nix b/nixos/tests/goss.nix
index 6b772d19215e3..2e77b2734464f 100644
--- a/nixos/tests/goss.nix
+++ b/nixos/tests/goss.nix
@@ -28,10 +28,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         };
         group.root.exists = true;
         kernel-param."kernel.ostype".value = "Linux";
-        service.goss = {
-          enabled = true;
-          running = true;
-        };
         user.root.exists = true;
       };
     };
@@ -46,8 +42,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     with subtest("returns health status"):
       result = json.loads(machine.succeed("curl -sS http://localhost:8080/healthz"))
 
-      assert len(result["results"]) == 10, f".results should be an array of 10 items, was {result['results']!r}"
+      assert len(result["results"]) == 8, f".results should be an array of 10 items, was {result['results']!r}"
       assert result["summary"]["failed-count"] == 0, f".summary.failed-count should be zero, was {result['summary']['failed-count']}"
-      assert result["summary"]["test-count"] == 10, f".summary.test-count should be 10, was {result['summary']['test-count']}"
+      assert result["summary"]["test-count"] == 8, f".summary.test-count should be 10, was {result['summary']['test-count']}"
     '';
 })
diff --git a/nixos/tests/hibernate.nix b/nixos/tests/hibernate.nix
index 296aa9ba68b92..6de287f63e081 100644
--- a/nixos/tests/hibernate.nix
+++ b/nixos/tests/hibernate.nix
@@ -24,8 +24,8 @@ makeTest {
       virtualisation.useNixStoreImage = true;
 
       swapDevices = lib.mkOverride 0 [ { device = "/dev/vdc"; options = [ "x-systemd.makefs" ]; } ];
-      boot.resumeDevice = "/dev/vdc";
       boot.initrd.systemd.enable = systemdStage1;
+      virtualisation.useEFIBoot = true;
     };
   };
 
diff --git a/nixos/tests/incus/container.nix b/nixos/tests/incus/container.nix
index 9260f70da98c2..a71c5355046a5 100644
--- a/nixos/tests/incus/container.nix
+++ b/nixos/tests/incus/container.nix
@@ -1,20 +1,21 @@
-import ../make-test-python.nix ({ pkgs, lib, extra ? {}, ... } :
+import ../make-test-python.nix ({ pkgs, lib, extra ? {}, name ? "incus-container", ... } :
 
 let
   releases = import ../../release.nix {
-    configuration = {
-      # Building documentation makes the test unnecessarily take a longer time:
-      documentation.enable = lib.mkForce false;
+    configuration = lib.recursiveUpdate {
+        # Building documentation makes the test unnecessarily take a longer time:
+        documentation.enable = lib.mkForce false;
 
-      boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
-    } // extra;
+        boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
+    }
+    extra;
   };
 
   container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
   container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
 in
 {
-  name = "incus-container";
+  inherit name;
 
   meta = {
     maintainers = lib.teams.lxc.members;
diff --git a/nixos/tests/incus/default.nix b/nixos/tests/incus/default.nix
index ff36fe9d67308..b850c4fba018d 100644
--- a/nixos/tests/incus/default.nix
+++ b/nixos/tests/incus/default.nix
@@ -5,14 +5,22 @@
   handleTestOn,
 }:
 {
-  container-old-init = import ./container.nix { inherit system pkgs; };
-  container-new-init = import ./container.nix { inherit system pkgs; extra = {
-    # Enable new systemd init
-    boot.initrd.systemd.enable = true;
-  }; };
+  container-legacy-init = import ./container.nix {
+    name = "container-legacy-init";
+    inherit system pkgs;
+  };
+  container-systemd-init = import ./container.nix {
+    name = "container-systemd-init";
+    inherit system pkgs;
+    extra = {
+      boot.initrd.systemd.enable = true;
+    };
+  };
   lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
+  openvswitch = import ./openvswitch.nix { inherit system pkgs; };
   preseed = import ./preseed.nix { inherit system pkgs; };
   socket-activated = import ./socket-activated.nix { inherit system pkgs; };
-  ui = import ./ui.nix {inherit system pkgs;};
+  storage = import ./storage.nix { inherit system pkgs; };
+  ui = import ./ui.nix { inherit system pkgs; };
   virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix { inherit system pkgs; };
 }
diff --git a/nixos/tests/incus/lxd-to-incus.nix b/nixos/tests/incus/lxd-to-incus.nix
index 262f63c0f26fb..e93b76591eca4 100644
--- a/nixos/tests/incus/lxd-to-incus.nix
+++ b/nixos/tests/incus/lxd-to-incus.nix
@@ -95,7 +95,7 @@ import ../make-test-python.nix (
       machine.wait_for_unit("incus.service")
 
       with machine.nested("run migration"):
-          machine.succeed("lxd-to-incus --yes")
+          machine.succeed("${pkgs.incus}/bin/lxd-to-incus --yes")
 
       with machine.nested("verify resources migrated to incus"):
           machine.succeed("incus config show container")
diff --git a/nixos/tests/incus/openvswitch.nix b/nixos/tests/incus/openvswitch.nix
new file mode 100644
index 0000000000000..5d4aef031ad0a
--- /dev/null
+++ b/nixos/tests/incus/openvswitch.nix
@@ -0,0 +1,65 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+{
+  name = "incus-openvswitch";
+
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      incus.enable = true;
+      vswitch.enable = true;
+      incus.preseed = {
+        networks = [
+          {
+            name = "nixostestbr0";
+            type = "bridge";
+            config = {
+              "bridge.driver" = "openvswitch";
+              "ipv4.address" = "10.0.100.1/24";
+              "ipv4.nat" = "true";
+            };
+          }
+        ];
+        profiles = [
+          {
+            name = "nixostest_default";
+            devices = {
+              eth0 = {
+                name = "eth0";
+                network = "nixostestbr0";
+                type = "nic";
+              };
+              root = {
+                path = "/";
+                pool = "default";
+                size = "35GiB";
+                type = "disk";
+              };
+            };
+          }
+        ];
+        storage_pools = [
+          {
+            name = "nixostest_pool";
+            driver = "dir";
+          }
+        ];
+      };
+    };
+    networking.nftables.enable = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("incus.service")
+    machine.wait_for_unit("incus-preseed.service")
+
+    with subtest("Verify openvswitch bridge"):
+      machine.succeed("incus network info nixostestbr0")
+
+    with subtest("Verify openvswitch bridge"):
+      machine.succeed("ovs-vsctl br-exists nixostestbr0")
+  '';
+})
diff --git a/nixos/tests/incus/storage.nix b/nixos/tests/incus/storage.nix
new file mode 100644
index 0000000000000..190f4f7451c20
--- /dev/null
+++ b/nixos/tests/incus/storage.nix
@@ -0,0 +1,46 @@
+import ../make-test-python.nix (
+  { pkgs, lib, ... }:
+
+  {
+    name = "incus-storage";
+
+    meta = {
+      maintainers = lib.teams.lxc.members;
+    };
+
+    nodes.machine =
+      { lib, ... }:
+      {
+        boot.supportedFilesystems = [ "zfs" ];
+        boot.zfs.forceImportRoot = false;
+        environment.systemPackages = [ pkgs.parted ];
+        networking.hostId = "01234567";
+        networking.nftables.enable = true;
+
+        virtualisation = {
+          emptyDiskImages = [ 2048 ];
+          incus.enable = true;
+        };
+      };
+
+    testScript = ''
+      machine.wait_for_unit("incus.service")
+
+      with subtest("Verify zfs pool created and usable"):
+        machine.succeed(
+            "zpool status",
+            "parted --script /dev/vdb mklabel gpt",
+            "zpool create zfs_pool /dev/vdb",
+        )
+
+        machine.succeed("incus storage create zfs_pool zfs source=zfs_pool/incus")
+        machine.succeed("zfs list zfs_pool/incus")
+        machine.succeed("incus storage volume create zfs_pool test_fs --type filesystem")
+        machine.succeed("incus storage volume create zfs_pool test_vol --type block")
+        machine.succeed("incus storage show zfs_pool")
+        machine.succeed("incus storage volume list zfs_pool")
+        machine.succeed("incus storage volume show zfs_pool test_fs")
+        machine.succeed("incus storage volume show zfs_pool test_vol")
+    '';
+  }
+)
diff --git a/nixos/tests/installer-systemd-stage-1.nix b/nixos/tests/installer-systemd-stage-1.nix
index 662017935412c..d10256d91d7fa 100644
--- a/nixos/tests/installer-systemd-stage-1.nix
+++ b/nixos/tests/installer-systemd-stage-1.nix
@@ -37,6 +37,7 @@
     clevisLuksFallback
     clevisZfs
     clevisZfsFallback
+    gptAutoRoot
     ;
 
 }
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index 97bb7f8def595..1de886d6a0d19 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -82,6 +82,7 @@ let
   testScriptFun = { bootLoader, createPartitions, grubDevice, grubUseEfi, grubIdentifier
                   , postInstallCommands, preBootCommands, postBootCommands, extraConfig
                   , testSpecialisationConfig, testFlakeSwitch, clevisTest, clevisFallbackTest
+                  , disableFileSystems
                   }:
     let
       qemu-common = import ../lib/qemu-common.nix { inherit (pkgs) lib pkgs; };
@@ -163,7 +164,7 @@ let
       ${createPartitions}
 
       with subtest("Create the NixOS configuration"):
-          machine.succeed("nixos-generate-config --root /mnt")
+          machine.succeed("nixos-generate-config ${optionalString disableFileSystems "--no-filesystems"} --root /mnt")
           machine.succeed("cat /mnt/etc/nixos/hardware-configuration.nix >&2")
           machine.copy_from_host(
               "${ makeConfig {
@@ -433,6 +434,7 @@ let
     , testFlakeSwitch ? false
     , clevisTest ? false
     , clevisFallbackTest ? false
+    , disableFileSystems ? false
     }:
     makeTest {
       inherit enableOCR;
@@ -541,7 +543,8 @@ let
       testScript = testScriptFun {
         inherit bootLoader createPartitions postInstallCommands preBootCommands postBootCommands
                 grubDevice grubIdentifier grubUseEfi extraConfig
-                testSpecialisationConfig testFlakeSwitch clevisTest clevisFallbackTest;
+                testSpecialisationConfig testFlakeSwitch clevisTest clevisFallbackTest
+                disableFileSystems;
       };
     };
 
@@ -1414,4 +1417,39 @@ in {
       };
     };
   };
+
+  gptAutoRoot = let
+    rootPartType = {
+      ia32 = "44479540-F297-41B2-9AF7-D131D5F0458A";
+      x64 = "4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709";
+      arm = "69DAD710-2CE4-4E3C-B16C-21A1D49ABED3";
+      aa64 = "B921B045-1DF0-41C3-AF44-4C6F280D3FAE";
+    }.${pkgs.stdenv.hostPlatform.efiArch};
+  in makeInstallerTest "gptAutoRoot" {
+    disableFileSystems = true;
+    createPartitions = ''
+      machine.succeed(
+        "sgdisk --zap-all /dev/vda",
+        "sgdisk --new=1:0:+100M --typecode=0:ef00 /dev/vda", # /boot
+        "sgdisk --new=2:0:+1G --typecode=0:8200 /dev/vda", # swap
+        "sgdisk --new=3:0:+5G --typecode=0:${rootPartType} /dev/vda", # /
+        "udevadm settle",
+
+        "mkfs.vfat /dev/vda1",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "mkfs.ext4 -L root /dev/vda3",
+        "udevadm settle",
+
+        "mount /dev/vda3 /mnt",
+        "mkdir -p /mnt/boot",
+        "mount /dev/vda1 /mnt/boot"
+      )
+    '';
+    bootLoader = "systemd-boot";
+    extraConfig = ''
+      boot.initrd.systemd.root = "gpt-auto";
+      boot.initrd.supportedFilesystems = ["ext4"];
+    '';
+  };
 }
diff --git a/nixos/tests/k3s/multi-node.nix b/nixos/tests/k3s/multi-node.nix
index 932b4639b39c8..20279f3ca4b93 100644
--- a/nixos/tests/k3s/multi-node.nix
+++ b/nixos/tests/k3s/multi-node.nix
@@ -128,9 +128,7 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
       };
     };
 
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ euank ];
-    };
+    meta.maintainers = k3s.meta.maintainers;
 
     testScript = ''
       machines = [server, server2, agent]
diff --git a/nixos/tests/k3s/single-node.nix b/nixos/tests/k3s/single-node.nix
index e059603b9c9d7..fd64a050e61ef 100644
--- a/nixos/tests/k3s/single-node.nix
+++ b/nixos/tests/k3s/single-node.nix
@@ -25,9 +25,7 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
   in
   {
     name = "${k3s.name}-single-node";
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ euank ];
-    };
+    meta.maintainers = k3s.meta.maintainers;
 
     nodes.machine = { pkgs, ... }: {
       environment.systemPackages = with pkgs; [ k3s gzip ];
diff --git a/nixos/tests/kavita.nix b/nixos/tests/kavita.nix
index f27b3fffbcf64..bb55e1fb29d43 100644
--- a/nixos/tests/kavita.nix
+++ b/nixos/tests/kavita.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ... }: {
   name = "kavita";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ misterio77 ];
@@ -8,29 +8,35 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     kavita = { config, pkgs, ... }: {
       services.kavita = {
         enable = true;
-        port = 5000;
-        tokenKeyFile = builtins.toFile "kavita.key" "QfpjFvjT83BLtZ74GE3U3Q==";
+        tokenKeyFile = builtins.toFile "kavita.key" "d26ba694b455271a8872415830fb7b5c58f8da98f9ef7f58b2ca4c34bd406512";
       };
     };
   };
 
-  testScript = let
-    regUrl = "http://kavita:5000/api/Account/register";
-    payload = builtins.toFile "payload.json" (builtins.toJSON {
-      username = "foo";
-      password = "correcthorsebatterystaple";
-      email = "foo@bar";
-    });
-  in ''
-    kavita.start
-    kavita.wait_for_unit("kavita.service")
+  testScript =
+    let
+      regUrl = "http://kavita:5000/api/Account/register";
+      loginUrl = "http://kavita:5000/api/Account/login";
+      localeUrl = "http://kavita:5000/api/locale";
+    in
+    ''
+      import json
 
-    # Check that static assets are working
-    kavita.wait_until_succeeds("curl http://kavita:5000/site.webmanifest | grep Kavita")
+      kavita.start
+      kavita.wait_for_unit("kavita.service")
 
-    # Check that registration is working
-    kavita.succeed("curl -fX POST ${regUrl} --json @${payload}")
-    # But only for the first one
-    kavita.fail("curl -fX POST ${regUrl} --json @${payload}")
-  '';
+      # Check that static assets are working
+      kavita.wait_until_succeeds("curl http://kavita:5000/site.webmanifest | grep Kavita")
+
+      # Check that registration is working
+      kavita.succeed("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""")
+      # But only for the first one
+      kavita.fail("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""")
+
+      # Log in and retrieve token
+      session = json.loads(kavita.succeed("""curl -fX POST ${loginUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'"""))
+      # Check list of locales
+      locales = json.loads(kavita.succeed(f"curl -fX GET ${localeUrl} -H 'Authorization: Bearer {session['token']}'"))
+      assert len(locales) > 0, "expected a list of locales"
+    '';
 })
diff --git a/nixos/tests/kea.nix b/nixos/tests/kea.nix
index c8ecf771fa13a..98a8e93a07609 100644
--- a/nixos/tests/kea.nix
+++ b/nixos/tests/kea.nix
@@ -44,6 +44,11 @@ import ./make-test-python.nix ({ pkgs, lib, ...}: {
             name = "/var/lib/kea/dhcp4.leases";
           };
 
+          control-socket = {
+            socket-type = "unix";
+            socket-name = "/run/kea/dhcp4.sock";
+          };
+
           interfaces-config = {
             dhcp-socket-type = "raw";
             interfaces = [
@@ -89,6 +94,25 @@ import ./make-test-python.nix ({ pkgs, lib, ...}: {
           };
         };
       };
+
+      services.kea.ctrl-agent = {
+        enable = true;
+        settings = {
+          http-host = "127.0.0.1";
+          http-port = 8000;
+          control-sockets.dhcp4 = {
+            socket-type = "unix";
+            socket-name = "/run/kea/dhcp4.sock";
+          };
+        };
+      };
+
+      services.prometheus.exporters.kea = {
+        enable = true;
+        controlSocketPaths = [
+          "http://127.0.0.1:8000"
+        ];
+      };
     };
 
     nameserver = { config, pkgs, ... }: {
@@ -182,5 +206,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...}: {
     client.wait_until_succeeds("ping -c 5 10.0.0.1")
     router.wait_until_succeeds("ping -c 5 10.0.0.3")
     nameserver.wait_until_succeeds("kdig +short client.lan.nixos.test @10.0.0.2 | grep -q 10.0.0.3")
+    router.log(router.execute("curl 127.0.0.1:9547")[1])
+    router.succeed("curl --no-buffer 127.0.0.1:9547 | grep -qE '^kea_dhcp4_addresses_assigned_total.*1.0$'")
   '';
 })
diff --git a/nixos/tests/keycloak.nix b/nixos/tests/keycloak.nix
index 228e57d1cdd6f..67b412c80961d 100644
--- a/nixos/tests/keycloak.nix
+++ b/nixos/tests/keycloak.nix
@@ -6,8 +6,8 @@ let
   certs = import ./common/acme/server/snakeoil-certs.nix;
   frontendUrl = "https://${certs.domain}";
 
-  keycloakTest = import ./make-test-python.nix (
-    { pkgs, databaseType, ... }:
+  keycloakTest = databaseType: import ./make-test-python.nix (
+    { pkgs, ... }:
     let
       initialAdminPassword = "h4Iho\"JFn't2>iQIR9";
       adminPasswordFile = pkgs.writeText "admin-password" "${initialAdminPassword}";
@@ -76,16 +76,18 @@ let
             enabled = true;
             realm = "test-realm";
             clients = [ client ];
-            users = [(
-              user // {
-                enabled = true;
-                credentials = [{
-                  type = "password";
-                  temporary = false;
-                  value = password;
-                }];
-              }
-            )];
+            users = [
+              (
+                user // {
+                  enabled = true;
+                  credentials = [{
+                    type = "password";
+                    temporary = false;
+                    value = password;
+                  }];
+                }
+              )
+            ];
           };
 
           realmDataJson = pkgs.writeText "realm-data.json" (builtins.toJSON realm);
@@ -177,7 +179,7 @@ let
   );
 in
 {
-  postgres = keycloakTest { databaseType = "postgresql"; };
-  mariadb = keycloakTest { databaseType = "mariadb"; };
-  mysql = keycloakTest { databaseType = "mysql"; };
+  postgres = keycloakTest "postgresql";
+  mariadb = keycloakTest "mariadb";
+  mysql = keycloakTest "mysql";
 }
diff --git a/nixos/tests/krb5/default.nix b/nixos/tests/krb5/default.nix
index ede085632c634..274ad580cebc9 100644
--- a/nixos/tests/krb5/default.nix
+++ b/nixos/tests/krb5/default.nix
@@ -1,4 +1,3 @@
-{ system ? builtins.currentSystem }:
 {
-  example-config = import ./example-config.nix { inherit system; };
+  example-config = import ./example-config.nix;
 }
diff --git a/nixos/tests/ladybird.nix b/nixos/tests/ladybird.nix
index 4e9ab9a36d137..8ed0f47887c7d 100644
--- a/nixos/tests/ladybird.nix
+++ b/nixos/tests/ladybird.nix
@@ -21,7 +21,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     ''
       machine.wait_for_x()
       machine.succeed("echo '<!DOCTYPE html><html><body><h1>Hello world</h1></body></html>' > page.html")
-      machine.execute("ladybird file://$(pwd)/page.html >&2 &")
+      machine.execute("Ladybird file://$(pwd)/page.html >&2 &")
       machine.wait_for_window("Ladybird")
       machine.sleep(5)
       machine.wait_for_text("Hello world")
diff --git a/nixos/tests/make-test-python.nix b/nixos/tests/make-test-python.nix
index 28569f1d2955a..32531fffd2bf3 100644
--- a/nixos/tests/make-test-python.nix
+++ b/nixos/tests/make-test-python.nix
@@ -1,5 +1,5 @@
 f: {
-  system ? builtins.currentSystem,
+  system,
   pkgs ? import ../.. { inherit system; config = {}; overlays = []; },
   ...
 } @ args:
diff --git a/nixos/tests/mate-wayland.nix b/nixos/tests/mate-wayland.nix
new file mode 100644
index 0000000000000..df39ead286e15
--- /dev/null
+++ b/nixos/tests/mate-wayland.nix
@@ -0,0 +1,63 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "mate-wayland";
+
+  meta.maintainers = lib.teams.mate.members;
+
+  nodes.machine = { ... }: {
+    imports = [
+      ./common/user-account.nix
+    ];
+
+    services.xserver.enable = true;
+    services.xserver.displayManager = {
+      sddm.enable = true; # https://github.com/canonical/lightdm/issues/63
+      sddm.wayland.enable = true;
+      defaultSession = "MATE";
+      autoLogin = {
+        enable = true;
+        user = "alice";
+      };
+    };
+    services.xserver.desktopManager.mate.enableWaylandSession = true;
+
+    hardware.pulseaudio.enable = true;
+
+    # Need to switch to a different GPU driver than the default one (-vga std) so that wayfire can launch:
+    virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.users.users.alice;
+    in
+    ''
+      machine.wait_for_unit("display-manager.service")
+
+      with subtest("Wait for Wayland server"):
+          machine.wait_for_file("/run/user/${toString user.uid}/wayland-1")
+
+      with subtest("Check if MATE session components actually start"):
+          for i in ["wayfire", "mate-panel", "mate-wayland.sh", "mate-wayland-components.sh"]:
+              machine.wait_until_succeeds(f"pgrep -f {i}")
+          machine.wait_for_text('(Applications|Places|System)')
+          # It is expected that this applet doesn't work in Wayland
+          machine.wait_for_text('WorkspaceSwitcherApplet')
+
+      with subtest("Check if various environment variables are set"):
+          cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf mate-panel)/environ"
+          machine.succeed(f"{cmd} | grep 'XDG_SESSION_TYPE' | grep 'wayland'")
+          machine.succeed(f"{cmd} | grep 'XDG_SESSION_DESKTOP' | grep 'MATE'")
+          machine.succeed(f"{cmd} | grep 'MATE_PANEL_APPLETS_DIR' | grep '${pkgs.mate.mate-panel-with-applets.pname}'")
+
+      with subtest("Check if Wayfire config is properly configured"):
+          for i in ["button_style = mate", "firedecor", "mate-wayland-components.sh"]:
+              machine.wait_until_succeeds(f"cat /home/${user.name}/.config/mate/wayfire.ini | grep '{i}'")
+
+      with subtest("Check if Wayfire has ever coredumped"):
+          machine.fail("coredumpctl --json=short | grep wayfire")
+          machine.sleep(10)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixos/tests/mate.nix b/nixos/tests/mate.nix
index 48582e18d520c..1252ec43cf3d5 100644
--- a/nixos/tests/mate.nix
+++ b/nixos/tests/mate.nix
@@ -54,6 +54,15 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           machine.wait_for_text('(Applications|Places|System)')
           machine.wait_for_text('(Computer|Home|Trash)')
 
+      with subtest("Check if various environment variables are set"):
+          machine.succeed("xargs --null --max-args=1 echo < /proc/$(pgrep -xf marco)/environ | grep 'XDG_CURRENT_DESKTOP' | grep 'MATE'")
+          # From mate-panel-with-applets packaging
+          machine.succeed("xargs --null --max-args=1 echo < /proc/$(pgrep -xf mate-panel)/environ | grep 'MATE_PANEL_APPLETS_DIR' | grep '${pkgs.mate.mate-panel-with-applets.pname}'")
+
+      with subtest("Check if applets are built with in-process support"):
+          # This is needed for Wayland support
+          machine.fail("pgrep -fa clock-applet")
+
       with subtest("Lock the screen"):
           machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is inactive'")
           machine.succeed("su - ${user.name} -c '${env} mate-screensaver-command -l >&2 &'")
diff --git a/nixos/tests/mihomo.nix b/nixos/tests/mihomo.nix
new file mode 100644
index 0000000000000..472d10050f7fb
--- /dev/null
+++ b/nixos/tests/mihomo.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "mihomo";
+  meta.maintainers = with pkgs.lib.maintainers; [ Guanran928 ];
+
+  nodes.machine = {
+    environment.systemPackages = [ pkgs.curl ];
+
+    services.nginx = {
+      enable = true;
+      statusPage = true;
+    };
+
+    services.mihomo = {
+      enable = true;
+      configFile = pkgs.writeTextFile {
+        name = "config.yaml";
+        text = ''
+          mixed-port: 7890
+          external-controller: 127.0.0.1:9090
+          authentication:
+          - "user:supersecret"
+        '';
+      };
+    };
+  };
+
+  testScript = ''
+    # Wait until it starts
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_unit("mihomo.service")
+    machine.wait_for_open_port(80)
+    machine.wait_for_open_port(7890)
+    machine.wait_for_open_port(9090)
+
+    # Proxy
+    machine.succeed("curl --fail --max-time 10 --proxy http://user:supersecret@localhost:7890 http://localhost")
+    machine.succeed("curl --fail --max-time 10 --proxy socks5://user:supersecret@localhost:7890 http://localhost")
+    machine.fail("curl --fail --max-time 10 --proxy http://user:supervillain@localhost:7890 http://localhost")
+    machine.fail("curl --fail --max-time 10 --proxy socks5://user:supervillain@localhost:7890 http://localhost")
+
+    # Web UI
+    machine.succeed("curl --fail http://localhost:9090") == '{"hello":"clash"}'
+  '';
+})
diff --git a/nixos/tests/miriway.nix b/nixos/tests/miriway.nix
index a0987d9fc41b6..24e6ec6367cdb 100644
--- a/nixos/tests/miriway.nix
+++ b/nixos/tests/miriway.nix
@@ -100,7 +100,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     # Test Wayland
     # We let Miriway start the first terminal, as we might get stuck if it's not ready to process the first keybind
     # machine.send_key("ctrl-alt-t")
-    machine.wait_for_text("alice@machine")
+    machine.wait_for_text(r"(alice|machine)")
     machine.send_chars("test-wayland\n")
     machine.wait_for_file("/tmp/test-wayland-exit-ok")
     machine.copy_from_vm("/tmp/test-wayland.out")
@@ -112,7 +112,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
 
     # Test XWayland
     machine.send_key("ctrl-alt-a")
-    machine.wait_for_text("alice@machine")
+    machine.wait_for_text(r"(alice|machine)")
     machine.send_chars("test-x11\n")
     machine.wait_for_file("/tmp/test-x11-exit-ok")
     machine.copy_from_vm("/tmp/test-x11.out")
diff --git a/nixos/tests/monetdb.nix b/nixos/tests/monetdb.nix
new file mode 100644
index 0000000000000..acbf01f769756
--- /dev/null
+++ b/nixos/tests/monetdb.nix
@@ -0,0 +1,77 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+  let creds = pkgs.writeText ".monetdb" ''
+        user=monetdb
+        password=monetdb
+      '';
+      createUser = pkgs.writeText "createUser.sql" ''
+        CREATE USER "voc" WITH PASSWORD 'voc' NAME 'VOC Explorer' SCHEMA "sys";
+        CREATE SCHEMA "voc" AUTHORIZATION "voc";
+        ALTER USER "voc" SET SCHEMA "voc";
+      '';
+      credsVoc = pkgs.writeText ".monetdb" ''
+        user=voc
+        password=voc
+      '';
+      transaction = pkgs.writeText "transaction" ''
+        START TRANSACTION;
+        CREATE TABLE test (id int, data varchar(30));
+        ROLLBACK;
+      '';
+      vocData = pkgs.fetchzip {
+        url = "https://dev.monetdb.org/Assets/VOC/voc_dump.zip";
+        hash = "sha256-sQ5acTsSAiXQfOgt2PhN7X7Z9TZGZtLrPPxgQT2pCGQ=";
+      };
+      onboardPeople = pkgs.writeText "onboardPeople" ''
+        CREATE VIEW onboard_people AS
+        SELECT * FROM (
+        SELECT 'craftsmen' AS type, craftsmen.* FROM craftsmen
+        UNION ALL
+        SELECT 'impotenten' AS type, impotenten.* FROM impotenten
+        UNION ALL
+        SELECT 'passengers' AS type, passengers.* FROM passengers
+        UNION ALL
+        SELECT 'seafarers' AS type, seafarers.* FROM seafarers
+        UNION ALL
+        SELECT 'soldiers' AS type, soldiers.* FROM soldiers
+        UNION ALL
+        SELECT 'total' AS type, total.* FROM total
+        ) AS onboard_people_table;
+        SELECT type, COUNT(*) AS total
+        FROM onboard_people GROUP BY type ORDER BY type;
+      '';
+      onboardExpected = pkgs.lib.strings.replaceStrings ["\n"] ["\\n"] ''
+        +------------+-------+
+        | type       | total |
+        +============+=======+
+        | craftsmen  |  2349 |
+        | impotenten |   938 |
+        | passengers |  2813 |
+        | seafarers  |  4468 |
+        | soldiers   |  4177 |
+        | total      |  2467 |
+        +------------+-------+
+      '';
+  in {
+    name = "monetdb";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ StillerHarpo ];
+    };
+    nodes.machine.services.monetdb.enable = true;
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("monetdb")
+      machine.succeed("monetdbd create mydbfarm")
+      machine.succeed("monetdbd start mydbfarm")
+      machine.succeed("monetdb create voc")
+      machine.succeed("monetdb release voc")
+      machine.succeed("cp ${creds} ./.monetdb")
+      assert "hello world" in machine.succeed("mclient -d voc -s \"SELECT 'hello world'\"")
+      machine.succeed("mclient -d voc ${createUser}")
+      machine.succeed("cp ${credsVoc} ./.monetdb")
+      machine.succeed("mclient -d voc ${transaction}")
+      machine.succeed("mclient -d voc ${vocData}/voc_dump.sql")
+      assert "8131" in machine.succeed("mclient -d voc -s \"SELECT count(*) FROM voyages\"")
+      assert "${onboardExpected}" in machine.succeed("mclient -d voc ${onboardPeople}")
+
+  '';
+  })
diff --git a/nixos/tests/mycelium/default.nix b/nixos/tests/mycelium/default.nix
new file mode 100644
index 0000000000000..f0d72436843c2
--- /dev/null
+++ b/nixos/tests/mycelium/default.nix
@@ -0,0 +1,57 @@
+import ../make-test-python.nix ({ lib, ... }: let
+  peer1-ip = "531:c350:28c1:dfde:ea6d:77d1:a60b:7209";
+  peer2-ip = "49f:3942:3a55:d100:4c78:c558:c4f:695b";
+in
+  {
+    name = "mycelium";
+    meta.maintainers = with lib.maintainers; [ lassulus ];
+
+    nodes = {
+
+      peer1 = { config, pkgs, ... }: {
+        virtualisation.vlans = [ 1 ];
+        networking.interfaces.eth1.ipv4.addresses = [{
+          address = "192.168.1.11";
+          prefixLength = 24;
+        }];
+
+        services.mycelium = {
+          enable = true;
+          addHostedPublicNodes = false;
+          openFirewall = true;
+          keyFile = ./peer1.key;
+          peers = [
+            "quic://192.168.1.12:9651"
+            "tcp://192.168.1.12:9651"
+          ];
+        };
+      };
+
+      peer2 = { config, pkgs, ... }: {
+        virtualisation.vlans = [ 1 ];
+        networking.interfaces.eth1.ipv4.addresses = [{
+          address = "192.168.1.12";
+          prefixLength = 24;
+        }];
+
+        services.mycelium = {
+          enable = true;
+          addHostedPublicNodes = false;
+          openFirewall = true;
+          keyFile = ./peer2.key;
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      peer1.wait_for_unit("network-online.target")
+      peer2.wait_for_unit("network-online.target")
+      peer1.wait_for_unit("mycelium.service")
+      peer2.wait_for_unit("mycelium.service")
+
+      peer1.succeed("ping -c5 ${peer2-ip}")
+      peer2.succeed("ping -c5 ${peer1-ip}")
+    '';
+  })
diff --git a/nixos/tests/mycelium/peer1.key b/nixos/tests/mycelium/peer1.key
new file mode 100644
index 0000000000000..db1cf9e72fe4b
--- /dev/null
+++ b/nixos/tests/mycelium/peer1.key
@@ -0,0 +1 @@
+sì	B0㔟ŽdûRæÈôÌH¶5œu?à»í…^
\ No newline at end of file
diff --git a/nixos/tests/mycelium/peer2.key b/nixos/tests/mycelium/peer2.key
new file mode 100644
index 0000000000000..7e757de48efbe
--- /dev/null
+++ b/nixos/tests/mycelium/peer2.key
@@ -0,0 +1 @@
+ÏXÿ1®yGÏÕ…ŸSAM»eÈÀ«¾‡œÝ7]
\ No newline at end of file
diff --git a/nixos/tests/nebula.nix b/nixos/tests/nebula.nix
index 89b91d89fcb3f..6c468153d5b27 100644
--- a/nixos/tests/nebula.nix
+++ b/nixos/tests/nebula.nix
@@ -10,6 +10,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
       environment.systemPackages = [ pkgs.nebula ];
       users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
       services.openssh.enable = true;
+      networking.firewall.enable = true; # Implicitly true, but let's make sure.
       networking.interfaces.eth1.useDHCP = false;
 
       services.nebula.networks.smoke = {
@@ -17,7 +18,10 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
         ca = "/etc/nebula/ca.crt";
         cert = "/etc/nebula/${name}.crt";
         key = "/etc/nebula/${name}.key";
-        listen = { host = "0.0.0.0"; port = 4242; };
+        listen = {
+          host = "0.0.0.0";
+          port = if (config.services.nebula.networks.smoke.isLighthouse || config.services.nebula.networks.smoke.isRelay) then 4242 else 0;
+        };
       };
     }
     extraConfig
diff --git a/nixos/tests/nimdow.nix b/nixos/tests/nimdow.nix
new file mode 100644
index 0000000000000..cefe46edc5fb2
--- /dev/null
+++ b/nixos/tests/nimdow.nix
@@ -0,0 +1,25 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "nimdow";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ marcusramberg ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = lib.mkForce "none+nimdow";
+    services.xserver.windowManager.nimdow.enable = true;
+  };
+
+  testScript = { ... }: ''
+    with subtest("ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("/home/alice/.Xauthority")
+        machine.succeed("xauth merge ~alice/.Xauthority")
+
+    with subtest("ensure we can open a new terminal"):
+        machine.send_key("meta_l-ret")
+        machine.wait_for_window(r"alice.*?machine")
+        machine.screenshot("terminal")
+  '';
+})
diff --git a/nixos/tests/nix-config.nix b/nixos/tests/nix-config.nix
new file mode 100644
index 0000000000000..907e886def351
--- /dev/null
+++ b/nixos/tests/nix-config.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "nix-config";
+  nodes.machine = { pkgs, ... }: {
+    nix.settings = {
+      nix-path = [ "nonextra=/etc/value.nix" ];
+      extra-nix-path = [ "extra=/etc/value.nix" ];
+    };
+    environment.etc."value.nix".text = "42";
+  };
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("nix-daemon.socket")
+    # regression test for the workaround for https://github.com/NixOS/nix/issues/9487
+    print(machine.succeed("nix-instantiate --find-file extra"))
+    print(machine.succeed("nix-instantiate --find-file nonextra"))
+  '';
+})
diff --git a/nixos/tests/nix-ld.nix b/nixos/tests/nix-ld.nix
index 8733f5b0c3978..9b851f88617a0 100644
--- a/nixos/tests/nix-ld.nix
+++ b/nixos/tests/nix-ld.nix
@@ -1,17 +1,39 @@
-import ./make-test-python.nix ({ lib, pkgs, ...} :
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  shared =
+    { config, pkgs, ... }:
+    {
+      programs.nix-ld.enable = true;
+      environment.systemPackages = [
+        (pkgs.runCommand "patched-hello" { } ''
+          install -D -m755 ${pkgs.hello}/bin/hello $out/bin/hello
+          patchelf $out/bin/hello --set-interpreter $(cat ${config.programs.nix-ld.package}/nix-support/ldpath)
+        '')
+      ];
+    };
+in
 {
-  name = "nix-ld";
-  nodes.machine = { pkgs, ... }: {
-    programs.nix-ld.enable = true;
-    environment.systemPackages = [
-      (pkgs.runCommand "patched-hello" {} ''
-        install -D -m755 ${pkgs.hello}/bin/hello $out/bin/hello
-        patchelf $out/bin/hello --set-interpreter $(cat ${pkgs.nix-ld}/nix-support/ldpath)
-      '')
-    ];
+  nix-ld = makeTest {
+    name = "nix-ld";
+    nodes.machine = shared;
+    testScript = ''
+      start_all()
+      machine.succeed("hello")
+    '';
   };
-  testScript = ''
-    start_all()
-    machine.succeed("hello")
- '';
-})
+  nix-ld-rs = makeTest {
+    name = "nix-ld-rs";
+    nodes.machine = {
+      imports = [ shared ];
+      programs.nix-ld.package = pkgs.nix-ld-rs;
+    };
+    testScript = ''
+      start_all()
+      machine.succeed("hello")
+    '';
+  };
+}
diff --git a/nixos/tests/nixops/default.nix b/nixos/tests/nixops/default.nix
index 8477e5059fcaf..6468b8c382249 100644
--- a/nixos/tests/nixops/default.nix
+++ b/nixos/tests/nixops/default.nix
@@ -93,23 +93,5 @@ let
 
   inherit (import ../ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
 
-  /*
-    Return a store path with a closure containing everything including
-    derivations and all build dependency outputs, all the way down.
-  */
-  allDrvOutputs = pkg:
-    let name = "allDrvOutputs-${pkg.pname or pkg.name or "unknown"}";
-    in
-    pkgs.runCommand name { refs = pkgs.writeReferencesToFile pkg.drvPath; } ''
-      touch $out
-      while read ref; do
-        case $ref in
-          *.drv)
-            cat $ref >>$out
-            ;;
-        esac
-      done <$refs
-    '';
-
 in
 tests
diff --git a/nixos/tests/ollama.nix b/nixos/tests/ollama.nix
new file mode 100644
index 0000000000000..4b21f445cdbd3
--- /dev/null
+++ b/nixos/tests/ollama.nix
@@ -0,0 +1,56 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  mainPort = "11434";
+  altPort = "11435";
+
+  curlRequest = port: request:
+    "curl http://127.0.0.1:${port}/api/generate -d '${builtins.toJSON request}'";
+
+  prompt = {
+    model = "tinydolphin";
+    prompt = "lorem ipsum";
+    options = {
+      seed = 69;
+      temperature = 0;
+    };
+  };
+in
+{
+  name = "ollama";
+  meta = with lib.maintainers; {
+    maintainers = [ abysssol ];
+  };
+
+  nodes = {
+    cpu = { ... }: {
+      services.ollama.enable = true;
+    };
+
+    rocm = { ... }: {
+      services.ollama.enable = true;
+      services.ollama.acceleration = "rocm";
+    };
+
+    cuda = { ... }: {
+      services.ollama.enable = true;
+      services.ollama.acceleration = "cuda";
+    };
+
+    altAddress = { ... }: {
+      services.ollama.enable = true;
+      services.ollama.listenAddress = "127.0.0.1:${altPort}";
+    };
+  };
+
+  testScript = ''
+    vms = [ cpu, rocm, cuda, altAddress ];
+
+    start_all()
+    for vm in vms:
+        vm.wait_for_unit("multi-user.target")
+
+    stdout = cpu.succeed("""${curlRequest mainPort prompt}""", timeout=100)
+
+    stdout = altAddress.succeed("""${curlRequest altPort prompt}""", timeout=100)
+  '';
+})
diff --git a/nixos/tests/opensearch.nix b/nixos/tests/opensearch.nix
index 2887ac9677656..7d37583464cb0 100644
--- a/nixos/tests/opensearch.nix
+++ b/nixos/tests/opensearch.nix
@@ -1,7 +1,7 @@
 let
-  opensearchTest =
+  opensearchTest = extraSettings:
     import ./make-test-python.nix (
-      { pkgs, lib, extraSettings ? {} }: {
+      { pkgs, lib, ... }: {
         name = "opensearch";
         meta.maintainers = with pkgs.lib.maintainers; [ shyim ];
 
@@ -27,20 +27,18 @@ in
 {
   opensearch = opensearchTest {};
   opensearchCustomPathAndUser = opensearchTest {
-    extraSettings = {
-      services.opensearch.dataDir = "/var/opensearch_test";
-      services.opensearch.user = "open_search";
-      services.opensearch.group = "open_search";
-      systemd.tmpfiles.rules = [
-        "d /var/opensearch_test 0700 open_search open_search -"
-      ];
-      users = {
-        groups.open_search = {};
-        users.open_search = {
-          description = "OpenSearch daemon user";
-          group = "open_search";
-          isSystemUser = true;
-        };
+    services.opensearch.dataDir = "/var/opensearch_test";
+    services.opensearch.user = "open_search";
+    services.opensearch.group = "open_search";
+    systemd.tmpfiles.rules = [
+      "d /var/opensearch_test 0700 open_search open_search -"
+    ];
+    users = {
+      groups.open_search = { };
+      users.open_search = {
+        description = "OpenSearch daemon user";
+        group = "open_search";
+        isSystemUser = true;
       };
     };
   };
diff --git a/nixos/tests/pg_anonymizer.nix b/nixos/tests/pg_anonymizer.nix
new file mode 100644
index 0000000000000..2960108e37c34
--- /dev/null
+++ b/nixos/tests/pg_anonymizer.nix
@@ -0,0 +1,94 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "pg_anonymizer";
+  meta.maintainers = lib.teams.flyingcircus.members;
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.pg-dump-anon ];
+    services.postgresql = {
+      enable = true;
+      extraPlugins = ps: [ ps.anonymizer ];
+      settings.shared_preload_libraries = "anon";
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("postgresql.service")
+
+    with subtest("Setup"):
+        machine.succeed("sudo -u postgres psql --command 'create database demo'")
+        machine.succeed(
+            "sudo -u postgres psql -d demo -f ${pkgs.writeText "init.sql" ''
+              create extension anon cascade;
+              select anon.init();
+              create table player(id serial, name text, points int);
+              insert into player(id,name,points) values (1,'Foo', 23);
+              insert into player(id,name,points) values (2,'Bar',42);
+              security label for anon on column player.name is 'MASKED WITH FUNCTION anon.fake_last_name();';
+              security label for anon on column player.points is 'MASKED WITH VALUE NULL';
+            ''}"
+        )
+
+    def get_player_table_contents():
+        return [
+            x.split(',') for x in machine.succeed("sudo -u postgres psql -d demo --csv --command 'select * from player'").splitlines()[1:]
+        ]
+
+    def check_anonymized_row(row, id, original_name):
+        assert row[0] == id, f"Expected first row to have ID {id}, but got {row[0]}"
+        assert row[1] != original_name, f"Expected first row to have a name other than {original_name}"
+        assert not bool(row[2]), "Expected points to be NULL in first row"
+
+    def find_xsv_in_dump(dump, sep=','):
+        """
+        Expecting to find a CSV (for pg_dump_anon) or TSV (for pg_dump) structure, looking like
+
+            COPY public.player ...
+            1,Shields,
+            2,Salazar,
+            \.
+
+        in the given dump (the commas are tabs in case of pg_dump).
+              Extract the CSV lines and split by `sep`.
+        """
+
+        try:
+            from itertools import dropwhile, takewhile
+            return [x.split(sep) for x in list(takewhile(
+                lambda x: x != "\\.",
+                dropwhile(
+                    lambda x: not x.startswith("COPY public.player"),
+                    dump.splitlines()
+                )
+            ))[1:]]
+        except:
+            print(f"Dump to process: {dump}")
+            raise
+
+    def check_original_data(output):
+        assert output[0] == ['1','Foo','23'], f"Expected first row from player table to be 1,Foo,23; got {output[0]}"
+        assert output[1] == ['2','Bar','42'], f"Expected first row from player table to be 2,Bar,42; got {output[1]}"
+
+    def check_anonymized_rows(output):
+        check_anonymized_row(output[0], '1', 'Foo')
+        check_anonymized_row(output[1], '2', 'Bar')
+
+    with subtest("Check initial state"):
+        check_original_data(get_player_table_contents())
+
+    with subtest("Anonymous dumps"):
+        check_original_data(find_xsv_in_dump(
+            machine.succeed("sudo -u postgres pg_dump demo"),
+            sep='\t'
+        ))
+        check_anonymized_rows(find_xsv_in_dump(
+            machine.succeed("sudo -u postgres pg_dump_anon -U postgres -h /run/postgresql -d demo"),
+            sep=','
+        ))
+
+    with subtest("Anonymize"):
+        machine.succeed("sudo -u postgres psql -d demo --command 'select anon.anonymize_database();'")
+        check_anonymized_rows(get_player_table_contents())
+  '';
+})
diff --git a/nixos/tests/pgvecto-rs.nix b/nixos/tests/pgvecto-rs.nix
new file mode 100644
index 0000000000000..cd871dab6a0f1
--- /dev/null
+++ b/nixos/tests/pgvecto-rs.nix
@@ -0,0 +1,76 @@
+# mostly copied from ./timescaledb.nix which was copied from ./postgresql.nix
+# as it seemed unapproriate to test additional extensions for postgresql there.
+
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  postgresql-versions = import ../../pkgs/servers/sql/postgresql pkgs;
+  # Test cases from https://docs.pgvecto.rs/use-cases/hybrid-search.html
+  test-sql = pkgs.writeText "postgresql-test" ''
+    CREATE EXTENSION vectors;
+
+    CREATE TABLE items (
+      id bigserial PRIMARY KEY,
+      content text NOT NULL,
+      embedding vectors.vector(3) NOT NULL -- 3 dimensions
+    );
+
+    INSERT INTO items (content, embedding) VALUES
+      ('a fat cat sat on a mat and ate a fat rat', '[1, 2, 3]'),
+      ('a fat dog sat on a mat and ate a fat rat', '[4, 5, 6]'),
+      ('a thin cat sat on a mat and ate a thin rat', '[7, 8, 9]'),
+      ('a thin dog sat on a mat and ate a thin rat', '[10, 11, 12]');
+  '';
+  make-postgresql-test = postgresql-name: postgresql-package: makeTest {
+    name = postgresql-name;
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ diogotcorreia ];
+    };
+
+    nodes.machine = { ... }:
+      {
+        services.postgresql = {
+          enable = true;
+          package = postgresql-package;
+          extraPlugins = ps: with ps; [
+            pgvecto-rs
+          ];
+          settings.shared_preload_libraries = "vectors";
+        };
+      };
+
+    testScript = ''
+      def check_count(statement, lines):
+          return 'test $(sudo -u postgres psql postgres -tAc "{}"|wc -l) -eq {}'.format(
+              statement, lines
+          )
+
+
+      machine.start()
+      machine.wait_for_unit("postgresql")
+
+      with subtest("Postgresql with extension vectors is available just after unit start"):
+          machine.succeed(check_count("SELECT * FROM pg_available_extensions WHERE name = 'vectors' AND default_version = '${postgresql-package.pkgs.pgvecto-rs.version}';", 1))
+
+      machine.succeed("sudo -u postgres psql -f ${test-sql}")
+
+      machine.succeed(check_count("SELECT content, embedding FROM items WHERE to_tsvector('english', content) @@ 'cat & rat'::tsquery;", 2))
+
+      machine.shutdown()
+    '';
+
+  };
+  applicablePostgresqlVersions = filterAttrs (_: value: versionAtLeast value.version "12") postgresql-versions;
+in
+mapAttrs'
+  (name: package: {
+    inherit name;
+    value = make-postgresql-test name package;
+  })
+  applicablePostgresqlVersions
diff --git a/nixos/tests/photonvision.nix b/nixos/tests/photonvision.nix
new file mode 100644
index 0000000000000..2cadaa4bc02e4
--- /dev/null
+++ b/nixos/tests/photonvision.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "photonvision";
+
+  nodes = {
+    machine = { pkgs, ... }: {
+      services.photonvision = {
+        enable = true;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("photonvision.service")
+    machine.wait_for_open_port(5800)
+  '';
+
+  meta.maintainers = with lib.maintainers; [ max-niederman ];
+})
+
diff --git a/nixos/tests/privoxy.nix b/nixos/tests/privoxy.nix
index 2d95c4522a01c..2a18d332c8778 100644
--- a/nixos/tests/privoxy.nix
+++ b/nixos/tests/privoxy.nix
@@ -77,6 +77,11 @@ in
     networking.proxy.httpsProxy = "http://localhost:8118";
   };
 
+  nodes.machine_socks4  = { ... }: { services.privoxy = { enable = true; settings.forward-socks4  = "/ 127.0.0.1:9050 ."; }; };
+  nodes.machine_socks4a = { ... }: { services.privoxy = { enable = true; settings.forward-socks4a = "/ 127.0.0.1:9050 ."; }; };
+  nodes.machine_socks5  = { ... }: { services.privoxy = { enable = true; settings.forward-socks5  = "/ 127.0.0.1:9050 ."; }; };
+  nodes.machine_socks5t = { ... }: { services.privoxy = { enable = true; settings.forward-socks5t = "/ 127.0.0.1:9050 ."; }; };
+
   testScript =
     ''
       with subtest("Privoxy is running"):
@@ -109,5 +114,13 @@ in
           machine.systemctl("start systemd-tmpfiles-clean")
           # ...and count again
           machine.succeed("test $(ls /run/privoxy/certs | wc -l) -eq 0")
+
+      with subtest("Privoxy supports socks upstream proxies"):
+          for m in [machine_socks4, machine_socks4a, machine_socks5, machine_socks5t]:
+              m.wait_for_unit("privoxy")
+              m.wait_for_open_port(8118)
+              # We expect a 503 error because the dummy upstream proxy is not reachable.
+              # In issue #265654, instead privoxy segfaulted causing curl to exit with "Empty reply from server".
+              m.succeed("http_proxy=http://localhost:8118 curl -v http://does-not-exist/ 2>&1 | grep 'HTTP/1.1 503'")
     '';
 })
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 632656ad57953..3dc368e320ff2 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -418,54 +418,6 @@ let
       '';
     };
 
-    kea = let
-      controlSocketPathV4 = "/run/kea/dhcp4.sock";
-      controlSocketPathV6 = "/run/kea/dhcp6.sock";
-    in
-    {
-      exporterConfig = {
-        enable = true;
-        controlSocketPaths = [
-          controlSocketPathV4
-          controlSocketPathV6
-        ];
-      };
-      metricProvider = {
-        services.kea = {
-          dhcp4 = {
-            enable = true;
-            settings = {
-              control-socket = {
-                socket-type = "unix";
-                socket-name = controlSocketPathV4;
-              };
-            };
-          };
-          dhcp6 = {
-            enable = true;
-            settings = {
-              control-socket = {
-                socket-type = "unix";
-                socket-name = controlSocketPathV6;
-              };
-            };
-          };
-        };
-      };
-
-      exporterTest = ''
-        wait_for_unit("kea-dhcp4-server.service")
-        wait_for_unit("kea-dhcp6-server.service")
-        wait_for_file("${controlSocketPathV4}")
-        wait_for_file("${controlSocketPathV6}")
-        wait_for_unit("prometheus-kea-exporter.service")
-        wait_for_open_port(9547)
-        succeed(
-            "curl --fail localhost:9547/metrics | grep 'packets_received_total'"
-        )
-      '';
-    };
-
     knot = {
       exporterConfig = {
         enable = true;
diff --git a/nixos/tests/redlib.nix b/nixos/tests/redlib.nix
new file mode 100644
index 0000000000000..e4bde25e30a63
--- /dev/null
+++ b/nixos/tests/redlib.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "redlib";
+  meta.maintainers = with lib.maintainers; [ soispha ];
+
+  nodes.machine = {
+    services.libreddit = {
+      package = pkgs.redlib;
+      enable = true;
+      # Test CAP_NET_BIND_SERVICE
+      port = 80;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("libreddit.service")
+    machine.wait_for_open_port(80)
+    # Query a page that does not require Internet access
+    machine.succeed("curl --fail http://localhost:80/settings")
+  '';
+})
diff --git a/nixos/tests/scion/freestanding-deployment/README.rst b/nixos/tests/scion/freestanding-deployment/README.rst
new file mode 100644
index 0000000000000..b2448a2dc9add
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/README.rst
@@ -0,0 +1,12 @@
+This NixOS VM test implements the network topology outlined in https://github.com/scionproto/scion/blob/27983125bccac6b84d1f96f406853aab0e460405/doc/tutorials/deploy.rst#sample-scion-demo-topology, below is an excerpt from that document
+
+Sample SCION Demo Topology
+..........................
+
+The topology of the ISD includes the inter-AS connections to neighboring ASes, and defines the underlay IP/UDP addresses of services and routers running in this AS. This is specified in topology files - this guide later explains how to configure these files. A following graphic depicts the topology on a high level.
+
+.. figure:: https://github.com/scionproto/scion/raw/27983125bccac6b84d1f96f406853aab0e460405/doc/tutorials/deploy/SCION-deployment-guide.drawio.png
+   :width: 95 %
+   :figwidth: 100 %
+
+   *Figure 1 - Topology of the sample SCION demo environment. It consists of 1 ISD, 3 core ASes and 2 non-core ASes.*
diff --git a/nixos/tests/scion/freestanding-deployment/default.nix b/nixos/tests/scion/freestanding-deployment/default.nix
new file mode 100644
index 0000000000000..0c9686fbfbadf
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/default.nix
@@ -0,0 +1,172 @@
+# implements https://github.com/scionproto/scion/blob/27983125bccac6b84d1f96f406853aab0e460405/doc/tutorials/deploy.rst
+import ../../make-test-python.nix ({ pkgs, ... }:
+let
+  trust-root-configuration-keys = pkgs.runCommand "generate-trc-keys.sh" {
+    buildInputs = [
+      pkgs.scion
+    ];
+  } ''
+    set -euo pipefail
+
+    mkdir /tmp/tutorial-scion-certs && cd /tmp/tutorial-scion-certs
+    mkdir AS{1..5}
+
+    # Create voting and root keys and (self-signed) certificates for core ASes
+    pushd AS1
+    scion-pki certificate create --not-after=3650d --profile=sensitive-voting <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 sensitive voting cert"}') sensitive-voting.pem sensitive-voting.key
+    scion-pki certificate create --not-after=3650d --profile=regular-voting <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 regular voting cert"}') regular-voting.pem regular-voting.key
+    scion-pki certificate create --not-after=3650d --profile=cp-root <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 cp root cert"}') cp-root.pem cp-root.key
+    popd
+
+    pushd AS2
+    scion-pki certificate create --not-after=3650d --profile=cp-root <(echo '{"isd_as": "42-ffaa:1:2", "common_name": "42-ffaa:1:2 cp root cert"}') cp-root.pem cp-root.key
+    popd
+
+    pushd AS3
+    scion-pki certificate create --not-after=3650d --profile=sensitive-voting <(echo '{"isd_as": "42-ffaa:1:3", "common_name": "42-ffaa:1:3 sensitive voting cert"}') sensitive-voting.pem sensitive-voting.key
+    scion-pki certificate create --not-after=3650d --profile=regular-voting <(echo '{"isd_as": "42-ffaa:1:3", "common_name": "42-ffaa:1:3 regular voting cert"}') regular-voting.pem regular-voting.key
+    popd
+
+    # Create the TRC (Trust Root Configuration)
+    mkdir tmp
+    echo '
+    isd = 42
+    description = "Demo ISD 42"
+    serial_version = 1
+    base_version = 1
+    voting_quorum = 2
+
+    core_ases = ["ffaa:1:1", "ffaa:1:2", "ffaa:1:3"]
+    authoritative_ases = ["ffaa:1:1", "ffaa:1:2", "ffaa:1:3"]
+    cert_files = ["AS1/sensitive-voting.pem", "AS1/regular-voting.pem", "AS1/cp-root.pem", "AS2/cp-root.pem", "AS3/sensitive-voting.pem", "AS3/regular-voting.pem"]
+
+    [validity]
+    not_before = '$(date +%s)'
+    validity = "365d"' \
+    > trc-B1-S1-pld.tmpl
+
+    scion-pki trc payload --out=tmp/ISD42-B1-S1.pld.der --template trc-B1-S1-pld.tmpl
+    rm trc-B1-S1-pld.tmpl
+
+    # Sign and bundle the TRC
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS1/sensitive-voting.{pem,key} --out tmp/ISD42-B1-S1.AS1-sensitive.trc
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS1/regular-voting.{pem,key} --out tmp/ISD42-B1-S1.AS1-regular.trc
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS3/sensitive-voting.{pem,key} --out tmp/ISD42-B1-S1.AS3-sensitive.trc
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS3/regular-voting.{pem,key} --out tmp/ISD42-B1-S1.AS3-regular.trc
+
+    scion-pki trc combine tmp/ISD42-B1-S1.AS{1,3}-{sensitive,regular}.trc --payload tmp/ISD42-B1-S1.pld.der --out ISD42-B1-S1.trc
+    rm tmp -r
+
+    # Create CA key and certificate for issuing ASes
+    pushd AS1
+    scion-pki certificate create --profile=cp-ca <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 CA cert"}') cp-ca.pem cp-ca.key --ca cp-root.pem --ca-key cp-root.key
+    popd
+    pushd AS2
+    scion-pki certificate create --profile=cp-ca <(echo '{"isd_as": "42-ffaa:1:2", "common_name": "42-ffaa:1:2 CA cert"}') cp-ca.pem cp-ca.key --ca cp-root.pem --ca-key cp-root.key
+    popd
+
+    # Create AS key and certificate chains
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 AS cert"}') AS1/cp-as.pem AS1/cp-as.key --ca AS1/cp-ca.pem --ca-key AS1/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:2", "common_name": "42-ffaa:1:2 AS cert"}') AS2/cp-as.pem AS2/cp-as.key --ca AS2/cp-ca.pem --ca-key AS2/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:3", "common_name": "42-ffaa:1:3 AS cert"}') AS3/cp-as.pem AS3/cp-as.key --ca AS1/cp-ca.pem --ca-key AS1/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:4", "common_name": "42-ffaa:1:4 AS cert"}') AS4/cp-as.pem AS4/cp-as.key --ca AS1/cp-ca.pem --ca-key AS1/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:5", "common_name": "42-ffaa:1:5 AS cert"}') AS5/cp-as.pem AS5/cp-as.key --ca AS2/cp-ca.pem --ca-key AS2/cp-ca.key --bundle
+
+    for i in {1..5}
+    do
+      mkdir -p $out/AS$i
+      cp AS$i/cp-as.{key,pem} $out/AS$i
+    done
+
+    mv *.trc $out
+  '';
+  imports = hostId: [
+    ({
+      services.scion = {
+        enable = true;
+        bypassBootstrapWarning = true;
+      };
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+      };
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "192.168.1.${toString hostId}/24";
+      };
+      environment.etc = {
+        "scion/topology.json".source = ./topology${toString hostId}.json;
+        "scion/crypto/as".source = trust-root-configuration-keys + "/AS${toString hostId}";
+        "scion/certs/ISD42-B1-S1.trc".source = trust-root-configuration-keys + "/ISD42-B1-S1.trc";
+        "scion/keys/master0.key".text = "U${toString hostId}v4k23ZXjGDwDofg/Eevw==";
+        "scion/keys/master1.key".text = "dBMko${toString hostId}qMS8DfrN/zP2OUdA==";
+      };
+      environment.systemPackages = [
+        pkgs.scion
+      ];
+    })
+  ];
+in
+{
+  name = "scion-test";
+  nodes = {
+    scion01 = { ... }: {
+      imports = (imports 1);
+    };
+    scion02 = { ... }: {
+      imports = (imports 2);
+    };
+    scion03 = { ... }: {
+      imports = (imports 3);
+    };
+    scion04 = { ... }: {
+      imports = (imports 4);
+    };
+    scion05 = { ... }: {
+      imports = (imports 5);
+    };
+  };
+  testScript = let
+    pingAll = pkgs.writeShellScript "ping-all-scion.sh" ''
+      addresses="42-ffaa:1:1 42-ffaa:1:2 42-ffaa:1:3 42-ffaa:1:4 42-ffaa:1:5"
+      timeout=100
+      wait_for_all() {
+        for as in "$@"
+        do
+          scion showpaths $as --no-probe > /dev/null
+          return 1
+        done
+        return 0
+      }
+      ping_all() {
+        for as in "$@"
+        do
+          scion ping "$as,127.0.0.1" -c 3
+        done
+        return 0
+      }
+      for i in $(seq 0 $timeout); do
+        wait_for_all $addresses && exit 0
+        ping_all $addresses && exit 0
+        sleep 1
+      done
+    '';
+  in
+  ''
+    # List of AS instances
+    machines = [scion01, scion02, scion03, scion04, scion05]
+
+    # Wait for scion-control.service on all instances
+    for i in machines:
+        i.wait_for_unit("scion-control.service")
+
+    # Execute pingAll command on all instances
+    for i in machines:
+        i.succeed("${pingAll} >&2")
+
+    # Restart scion-dispatcher and ping again to test robustness
+    for i in machines:
+        i.succeed("systemctl restart scion-dispatcher >&2")
+        i.succeed("${pingAll} >&2")
+  '';
+})
diff --git a/nixos/tests/scion/freestanding-deployment/topology1.json b/nixos/tests/scion/freestanding-deployment/topology1.json
new file mode 100644
index 0000000000000..de51515eebc2d
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology1.json
@@ -0,0 +1,51 @@
+{
+  "attributes": [
+    "core"
+  ],
+  "isd_as": "42-ffaa:1:1",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.1:50014",
+            "remote": "192.168.1.4:50014"
+          },
+          "isd_as": "42-ffaa:1:4",
+          "link_to": "child",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.1:50012",
+            "remote": "192.168.1.2:50012"
+          },
+          "isd_as": "42-ffaa:1:2",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "3": {
+          "underlay": {
+            "public": "192.168.1.1:50013",
+            "remote": "192.168.1.3:50013"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "core",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/scion/freestanding-deployment/topology2.json b/nixos/tests/scion/freestanding-deployment/topology2.json
new file mode 100644
index 0000000000000..f8e10d5d1f75d
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology2.json
@@ -0,0 +1,51 @@
+{
+  "attributes": [
+    "core"
+  ],
+  "isd_as": "42-ffaa:1:2",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.2:50012",
+            "remote": "192.168.1.1:50012"
+          },
+          "isd_as": "42-ffaa:1:1",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.2:50023",
+            "remote": "192.168.1.3:50023"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "3": {
+          "underlay": {
+            "public": "192.168.1.2:50025",
+            "remote": "192.168.1.5:50025"
+          },
+          "isd_as": "42-ffaa:1:5",
+          "link_to": "child",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/scion/freestanding-deployment/topology3.json b/nixos/tests/scion/freestanding-deployment/topology3.json
new file mode 100644
index 0000000000000..53cee431885b3
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology3.json
@@ -0,0 +1,60 @@
+{
+  "attributes": [
+    "core"
+  ],
+  "isd_as": "42-ffaa:1:3",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.3:50013",
+            "remote": "192.168.1.1:50013"
+          },
+          "isd_as": "42-ffaa:1:1",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.3:50023",
+            "remote": "192.168.1.2:50023"
+          },
+          "isd_as": "42-ffaa:1:2",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "3": {
+          "underlay": {
+            "public": "192.168.1.3:50034",
+            "remote": "192.168.1.4:50034"
+          },
+          "isd_as": "42-ffaa:1:4",
+          "link_to": "child",
+          "mtu": 1472
+        },
+        "4": {
+          "underlay": {
+            "public": "192.168.1.3:50035",
+            "remote": "192.168.1.5:50035"
+          },
+          "isd_as": "42-ffaa:1:5",
+          "link_to": "child",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/scion/freestanding-deployment/topology4.json b/nixos/tests/scion/freestanding-deployment/topology4.json
new file mode 100644
index 0000000000000..03c507a4daf58
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology4.json
@@ -0,0 +1,40 @@
+{
+  "attributes": [],
+  "isd_as": "42-ffaa:1:4",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.4:50014",
+            "remote": "192.168.1.1:50014"
+          },
+          "isd_as": "42-ffaa:1:1",
+          "link_to": "parent",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.4:50034",
+            "remote": "192.168.1.3:50034"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "parent",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/scion/freestanding-deployment/topology5.json b/nixos/tests/scion/freestanding-deployment/topology5.json
new file mode 100644
index 0000000000000..6114c1f73c2a7
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology5.json
@@ -0,0 +1,40 @@
+{
+  "attributes": [],
+  "isd_as": "42-ffaa:1:5",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.5:50025",
+            "remote": "192.168.1.2:50025"
+          },
+          "isd_as": "42-ffaa:1:2",
+          "link_to": "parent",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.5:50035",
+            "remote": "192.168.1.3:50035"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "parent",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/systemd-machinectl.nix b/nixos/tests/systemd-machinectl.nix
index b8ed0c33e8e4a..02b4d9c590b59 100644
--- a/nixos/tests/systemd-machinectl.nix
+++ b/nixos/tests/systemd-machinectl.nix
@@ -42,8 +42,18 @@ import ./make-test-python.nix ({ pkgs, ... }:
 
       virtualisation.additionalPaths = [ containerSystem ];
 
-      # not needed, but we want to test the nspawn file generation
-      systemd.nspawn.${containerName} = { };
+      systemd.tmpfiles.rules = [
+        "d /var/lib/machines/shared-decl 0755 root root - -"
+      ];
+      systemd.nspawn.shared-decl = {
+        execConfig = {
+          Boot = false;
+          Parameters = "${containerSystem}/init";
+        };
+        filesConfig = {
+          BindReadOnly = "/nix/store";
+        };
+      };
 
       systemd.services."systemd-nspawn@${containerName}" = {
         serviceConfig.Environment = [
@@ -52,14 +62,33 @@ import ./make-test-python.nix ({ pkgs, ... }:
         ];
         overrideStrategy = "asDropin";
       };
+
+      # open DHCP for container
+      networking.firewall.extraCommands = ''
+        ${pkgs.iptables}/bin/iptables -A nixos-fw -i ve-+ -p udp -m udp --dport 67 -j nixos-fw-accept
+      '';
     };
 
     testScript = ''
       start_all()
       machine.wait_for_unit("default.target");
 
-      # Install container
+      # Test machinectl start stop of shared-decl
+      machine.succeed("machinectl start shared-decl");
+      machine.wait_until_succeeds("systemctl -M shared-decl is-active default.target");
+      machine.succeed("machinectl stop shared-decl");
+
+      # create containers root
       machine.succeed("mkdir -p ${containerRoot}");
+
+      # start container with shared nix store by using same arguments as for systemd-nspawn@.service
+      machine.succeed("systemd-run systemd-nspawn --machine=${containerName} --network-veth -U --bind-ro=/nix/store ${containerSystem}/init")
+      machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target");
+
+      # Test machinectl stop
+      machine.succeed("machinectl stop ${containerName}");
+
+      # Install container
       # Workaround for nixos-install
       machine.succeed("chmod o+rx /var/lib/machines");
       machine.succeed("nixos-install --root ${containerRoot} --system ${containerSystem} --no-channel-copy --no-root-passwd");
@@ -77,6 +106,12 @@ import ./make-test-python.nix ({ pkgs, ... }:
       # Test nss_mymachines via nscd
       machine.succeed("getent hosts ${containerName}");
 
+      # Test systemd-nspawn network configuration to container
+      machine.succeed("networkctl --json=short status ve-${containerName} | ${pkgs.jq}/bin/jq -e '.OperationalState == \"routable\"'");
+
+      # Test systemd-nspawn network configuration to host
+      machine.succeed("machinectl shell ${containerName} /run/current-system/sw/bin/networkctl --json=short status host0 | ${pkgs.jq}/bin/jq -r '.OperationalState == \"routable\"'");
+
       # Test systemd-nspawn network configuration
       machine.succeed("ping -n -c 1 ${containerName}");
 
diff --git a/nixos/tests/systemd-user-linger.nix b/nixos/tests/systemd-user-linger.nix
new file mode 100644
index 0000000000000..2c3d71668979f
--- /dev/null
+++ b/nixos/tests/systemd-user-linger.nix
@@ -0,0 +1,39 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+  {
+    name = "systemd-user-linger";
+
+    nodes.machine =
+      { ... }:
+      {
+        users.users = {
+          alice = {
+            isNormalUser = true;
+            linger = true;
+            uid = 1000;
+          };
+
+          bob = {
+            isNormalUser = true;
+            linger = false;
+            uid = 10001;
+          };
+        };
+      };
+
+    testScript =
+      { ... }:
+      ''
+        machine.wait_for_file("/var/lib/systemd/linger/alice")
+        machine.succeed("systemctl status user-1000.slice")
+
+        machine.fail("test -e /var/lib/systemd/linger/bob")
+        machine.fail("systemctl status user-1001.slice")
+
+        with subtest("missing users have linger purged"):
+            machine.succeed("touch /var/lib/systemd/linger/missing")
+            machine.systemctl("restart linger-users")
+            machine.succeed("test ! -e /var/lib/systemd/linger/missing")
+      '';
+  }
+)
diff --git a/nixos/tests/tracee.nix b/nixos/tests/tracee.nix
index 3dadc0f9fdb33..1c241f3ec4983 100644
--- a/nixos/tests/tracee.nix
+++ b/nixos/tests/tracee.nix
@@ -1,7 +1,13 @@
-import ./make-test-python.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, ... }: rec {
   name = "tracee-integration";
   meta.maintainers = pkgs.tracee.meta.maintainers;
 
+  passthru.hello-world-builder = pkgs: pkgs.dockerTools.buildImage {
+    name = "hello-world";
+    tag = "latest";
+    config.Cmd = [ "${pkgs.hello}/bin/hello" ];
+  };
+
   nodes = {
     machine = { config, pkgs, ... }: {
       # EventFilters/trace_only_events_from_new_containers and
@@ -12,57 +18,48 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       environment.systemPackages = with pkgs; [
         # required by Test_EventFilters/trace_events_from_ls_and_which_binary_in_separate_scopes
         which
-        # build the go integration tests as a binary
-        (tracee.overrideAttrs (oa: {
-          pname = oa.pname + "-integration";
-          postPatch = oa.postPatch or "" + ''
-            # prepare tester.sh (which will be embedded in the test binary)
-            patchShebangs tests/integration/tester.sh
-
-            # fix the test to look at nixos paths for running programs
-            substituteInPlace tests/integration/integration_test.go \
-              --replace "bin=/usr/bin/" "comm=" \
-              --replace "binary=/usr/bin/" "comm=" \
-              --replace "/usr/bin/dockerd" "dockerd" \
-              --replace "/usr/bin" "/run/current-system/sw/bin"
-          '';
-          nativeBuildInputs = oa.nativeBuildInputs or [ ] ++ [ makeWrapper ];
-          buildPhase = ''
-            runHook preBuild
-            # just build the static lib we need for the go test binary
-            make $makeFlags ''${enableParallelBuilding:+-j$NIX_BUILD_CORES} bpf-core ./dist/btfhub
-
-            # then compile the tests to be ran later
-            CGO_LDFLAGS="$(pkg-config --libs libbpf)" go test -tags core,ebpf,integration -p 1 -c -o $GOPATH/tracee-integration ./tests/integration/...
-            runHook postBuild
-          '';
-          doCheck = false;
-          outputs = [ "out" ];
-          installPhase = ''
-            mkdir -p $out/bin
-            mv $GOPATH/tracee-integration $out/bin/
-          '';
-          doInstallCheck = false;
-
-          meta = oa.meta // {
-            outputsToInstall = [];
-          };
-        }))
+        # the go integration tests as a binary
+        tracee.passthru.tests.integration-test-cli
       ];
     };
   };
 
-  testScript = ''
-    machine.wait_for_unit("docker.service")
+  testScript =
+    let
+      skippedTests = [
+        # these comm tests for some reason do not resolve.
+        # something about the test is different as it works fine if I replicate
+        # the policies and run tracee myself but doesn't work in the integration
+        # test either with the automatic run or running the commands by hand
+        # while it's searching.
+        "Test_EventFilters/comm:_event:_args:_trace_event_set_in_a_specific_policy_with_args_from_ls_command"
+        "Test_EventFilters/comm:_event:_trace_events_set_in_two_specific_policies_from_ls_and_uname_commands"
+
+        # worked at some point, seems to be flakey
+        "Test_EventFilters/pid:_event:_args:_trace_event_sched_switch_with_args_from_pid_0"
+      ];
+    in
+    ''
+      with subtest("prepare for integration tests"):
+        machine.wait_for_unit("docker.service")
+        machine.succeed('which bash')
+
+        # EventFilters/trace_only_events_from_new_containers also requires a container called "hello-world"
+        machine.succeed('docker load < ${passthru.hello-world-builder pkgs}')
 
-    with subtest("run integration tests"):
-      # EventFilters/trace_only_events_from_new_containers also requires a container called "alpine"
-      machine.succeed('tar c -C ${pkgs.pkgsStatic.busybox} . | docker import - alpine --change "ENTRYPOINT [\"sleep\"]"')
+        # exec= needs fully resolved paths
+        machine.succeed(
+          'mkdir /tmp/testdir',
+          'cp $(which who) /tmp/testdir/who',
+          'cp $(which uname) /tmp/testdir/uname',
+        )
 
-      # Test_EventFilters/trace_event_set_in_a_specific_scope expects to be in a dir that includes "integration"
-      print(machine.succeed(
-        'mkdir /tmp/integration',
-        'cd /tmp/integration && tracee-integration -test.v'
-      ))
-  '';
+      with subtest("run integration tests"):
+        # Test_EventFilters/trace_event_set_in_a_specific_scope expects to be in a dir that includes "integration"
+        # tests must be ran with 1 process
+        print(machine.succeed(
+          'mkdir /tmp/integration',
+          'cd /tmp/integration && export PATH="/tmp/testdir:$PATH" && integration.test -test.v -test.parallel 1 -test.skip="^${builtins.concatStringsSep "$|^" skippedTests}$"'
+        ))
+    '';
 })
diff --git a/nixos/tests/ustreamer.nix b/nixos/tests/ustreamer.nix
new file mode 100644
index 0000000000000..1354eb03a3269
--- /dev/null
+++ b/nixos/tests/ustreamer.nix
@@ -0,0 +1,75 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "ustreamer-vmtest";
+  nodes = {
+    client = {...}: {
+      environment.systemPackages = [ pkgs.curl ];
+    };
+    camera = {config, ...}: let
+      configFile = pkgs.writeText "akvcam-configFile" ''
+        [Cameras]
+        cameras/size = 2
+
+        cameras/1/type = output
+        cameras/1/mode = mmap, userptr, rw
+        cameras/1/description = Virtual Camera (output device)
+        cameras/1/formats = 2
+        cameras/1/videonr = 7
+
+        cameras/2/type = capture
+        cameras/2/mode = mmap, rw
+        cameras/2/description = Virtual Camera
+        cameras/2/formats = 1, 2
+        cameras/2/videonr = 9
+
+        [Connections]
+        connections/size = 1
+        connections/1/connection = 1:2
+
+        [Formats]
+        formats/size = 2
+
+        formats/1/format = YUY2
+        formats/1/width = 640
+        formats/1/height = 480
+        formats/1/fps = 30
+
+        formats/2/format = RGB24, YUY2
+        formats/2/width = 640
+        formats/2/height = 480
+        formats/2/fps = 20/1, 15/2
+      '';
+    in {
+      environment.systemPackages = [ pkgs.ustreamer ];
+      networking.firewall.enable = false;
+      systemd.services.ustreamer = {
+        description = "ustreamer service";
+        wantedBy = ["multi-user.target"];
+        serviceConfig = {
+          DynamicUser = true;
+          ExecStart = "${pkgs.ustreamer}/bin/ustreamer --host=0.0.0.0 --port 8000 --device /dev/video9 --device-timeout=8";
+          PrivateTmp = true;
+          BindReadOnlyPaths = "/dev/video9";
+          SupplementaryGroups = [
+            "video"
+          ];
+          Restart = "always";
+        };
+      };
+      boot.extraModulePackages = [config.boot.kernelPackages.akvcam];
+      boot.kernelModules = ["akvcam"];
+      boot.extraModprobeConfig = ''
+        options akvcam config_file=${configFile}
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    camera.wait_for_unit("ustreamer.service")
+    camera.wait_for_open_port(8000)
+
+    client.wait_for_unit("multi-user.target")
+    client.succeed("curl http://camera:8000")
+  '';
+})
diff --git a/nixos/tests/vscodium.nix b/nixos/tests/vscodium.nix
index d817ce927ff8d..76d5244b3ee34 100644
--- a/nixos/tests/vscodium.nix
+++ b/nixos/tests/vscodium.nix
@@ -76,4 +76,4 @@ let
     });
 
 in
-builtins.mapAttrs (k: v: mkTest k v { }) tests
+builtins.mapAttrs (k: v: mkTest k v) tests
diff --git a/nixos/tests/web-apps/gotosocial.nix b/nixos/tests/web-apps/gotosocial.nix
index 6d279ab63a799..8c4e76b14e3bf 100644
--- a/nixos/tests/web-apps/gotosocial.nix
+++ b/nixos/tests/web-apps/gotosocial.nix
@@ -1,7 +1,7 @@
 { lib, ... }:
 {
   name = "gotosocial";
-  meta.maintainers = with lib.maintainers; [ misuzu ];
+  meta.maintainers = with lib.maintainers; [ misuzu blakesmith ];
 
   nodes.machine = { pkgs, ... }: {
     environment.systemPackages = [ pkgs.jq ];
diff --git a/nixos/tests/web-apps/pretix.nix b/nixos/tests/web-apps/pretix.nix
new file mode 100644
index 0000000000000..559316f9b85cb
--- /dev/null
+++ b/nixos/tests/web-apps/pretix.nix
@@ -0,0 +1,47 @@
+{
+  lib,
+  pkgs,
+  ...
+}:
+
+{
+  name = "pretix";
+  meta.maintainers = with lib.maintainers; [ hexa ];
+
+  nodes = {
+    pretix = {
+      networking.extraHosts = ''
+        127.0.0.1 tickets.local
+      '';
+
+      services.pretix = {
+        enable = true;
+        nginx.domain = "tickets.local";
+        plugins = with pkgs.pretix.plugins; [
+          passbook
+          pages
+        ];
+        settings = {
+          pretix = {
+            instance_name = "NixOS Test";
+            url = "http://tickets.local";
+          };
+          mail.from = "hello@tickets.local";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    pretix.wait_for_unit("pretix-web.service")
+    pretix.wait_for_unit("pretix-worker.service")
+
+    pretix.wait_until_succeeds("curl -q --fail http://tickets.local")
+
+    pretix.succeed("pretix-manage --help")
+
+    pretix.log(pretix.succeed("systemd-analyze security pretix-web.service"))
+  '';
+}